2nd POC for How to Use ROCm for AI (#282) (#4299)

* New TOC for ROCm for AI developed

Co-authored-by: Peter Park <peter.park@amd.com>
This commit is contained in:
Pratik Basyal
2025-01-27 15:49:21 -05:00
committed by GitHub
parent 5fabb45bd1
commit 353d2fe1c1
38 changed files with 309 additions and 257 deletions

View File

@@ -36,40 +36,62 @@ subtrees:
title: Use ROCm for AI
subtrees:
- entries:
- file: how-to/rocm-for-ai/install.rst
title: Installation
- file: how-to/rocm-for-ai/train-a-model.rst
title: Train a model
- file: how-to/rocm-for-ai/scale-model-training.rst
title: Scale model training
- file: how-to/rocm-for-ai/hugging-face-models.rst
title: Run models from Hugging Face
- file: how-to/rocm-for-ai/deploy-your-model.rst
title: Deploy your model
- file: how-to/rocm-for-hpc/index.rst
title: Use ROCm for HPC
- file: how-to/llm-fine-tuning-optimization/index.rst
title: Fine-tune LLMs and inference optimization
subtrees:
- entries:
- file: how-to/llm-fine-tuning-optimization/overview.rst
title: Conceptual overview
- file: how-to/llm-fine-tuning-optimization/fine-tuning-and-inference.rst
- file: how-to/rocm-for-ai/training/index.rst
title: Training
subtrees:
- entries:
- file: how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference.rst
title: Use a single accelerator
- file: how-to/llm-fine-tuning-optimization/multi-gpu-fine-tuning-and-inference.rst
title: Use multiple accelerators
- file: how-to/llm-fine-tuning-optimization/model-quantization.rst
- file: how-to/llm-fine-tuning-optimization/model-acceleration-libraries.rst
- file: how-to/llm-fine-tuning-optimization/llm-inference-frameworks.rst
- file: how-to/llm-fine-tuning-optimization/optimizing-with-composable-kernel.md
title: Optimize with Composable Kernel
- file: how-to/llm-fine-tuning-optimization/optimizing-triton-kernel.rst
title: Optimize Triton kernels
- file: how-to/llm-fine-tuning-optimization/profiling-and-debugging.rst
title: Profile and debug
- file: how-to/rocm-for-ai/training/train-a-model.rst
title: Train a model
- file: how-to/rocm-for-ai/training/scale-model-training.rst
title: Scale model training
- file: how-to/rocm-for-ai/fine-tuning/index.rst
title: Fine-tuning LLMs
subtrees:
- entries:
- file: how-to/rocm-for-ai/fine-tuning/overview.rst
title: Conceptual overview
- file: how-to/rocm-for-ai/fine-tuning/fine-tuning-and-inference.rst
title: Fine-tuning
subtrees:
- entries:
- file: how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference.rst
title: Use a single accelerator
- file: how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference.rst
title: Use multiple accelerators
- file: how-to/rocm-for-ai/inference/index.rst
title: Inference
subtrees:
- entries:
- file: how-to/rocm-for-ai/inference/install.rst
title: Installation
- file: how-to/rocm-for-ai/inference/hugging-face-models.rst
title: Run models from Hugging Face
- file: how-to/rocm-for-ai/inference/llm-inference-frameworks.rst
title: LLM inference frameworks
- file: how-to/rocm-for-ai/inference/vllm-benchmark.rst
title: Performance validation
- file: how-to/rocm-for-ai/inference/deploy-your-model.rst
title: Deploy your model
- file: how-to/rocm-for-ai/inference-optimization/index.rst
title: Inference optimization
subtrees:
- entries:
- file: how-to/rocm-for-ai/inference-optimization/model-quantization.rst
- file: how-to/rocm-for-ai/inference-optimization/model-acceleration-libraries.rst
- file: how-to/rocm-for-ai/inference-optimization/optimizing-with-composable-kernel.md
title: Optimize with Composable Kernel
- file: how-to/rocm-for-ai/inference-optimization/optimizing-triton-kernel.rst
title: Optimize Triton kernels
- file: how-to/rocm-for-ai/inference-optimization/profiling-and-debugging.rst
title: Profile and debug
- file: how-to/rocm-for-ai/inference-optimization/workload.rst
title: Workload tuning
- file: how-to/rocm-for-hpc/index.rst
title: Use ROCm for HPC
- file: how-to/system-optimization/index.rst
title: System optimization
subtrees:
@@ -86,14 +108,6 @@ subtrees:
title: AMD RDNA 2
- file: how-to/tuning-guides/mi300x/index.rst
title: AMD MI300X performance validation and tuning
subtrees:
- entries:
- file: how-to/performance-validation/mi300x/vllm-benchmark.rst
title: Performance validation
- file: how-to/tuning-guides/mi300x/system.rst
title: System tuning
- file: how-to/tuning-guides/mi300x/workload.rst
title: Workload tuning
- file: how-to/system-debugging.md
- file: conceptual/compiler-topics.md
title: Use advanced compiler features