mirror of
https://github.com/ROCm/ROCm.git
synced 2026-01-06 21:33:57 -05:00
239 lines
10 KiB
YAML
239 lines
10 KiB
YAML
# Anywhere {branch} is used, the branch name will be substituted.
|
|
# These comments will also be removed.
|
|
defaults:
|
|
numbered: False
|
|
maxdepth: 6
|
|
root: index
|
|
subtrees:
|
|
- entries:
|
|
- file: what-is-rocm.rst
|
|
- file: about/release-notes.md
|
|
title: Release notes
|
|
- file: compatibility/compatibility-matrix.rst
|
|
title: Compatibility matrix
|
|
entries:
|
|
- url: https://rocm.docs.amd.com/projects/install-on-linux-internal/en/latest/reference/system-requirements.html
|
|
title: Linux system requirements
|
|
- url: https://rocm.docs.amd.com/projects/install-on-windows/en/${branch}/reference/system-requirements.html
|
|
title: Windows system requirements
|
|
|
|
- caption: Install
|
|
entries:
|
|
- url: https://rocm.docs.amd.com/projects/install-on-linux-internal/en/latest/
|
|
title: ROCm on Linux
|
|
- url: https://rocm.docs.amd.com/projects/install-on-windows/en/latest/
|
|
title: HIP SDK on Windows
|
|
- url: https://rocm.docs.amd.com/projects/radeon-ryzen/en/latest/index.html
|
|
title: ROCm on Radeon and Ryzen
|
|
- file: how-to/deep-learning-rocm.md
|
|
title: Deep learning frameworks
|
|
subtrees:
|
|
- entries:
|
|
- file: compatibility/ml-compatibility/pytorch-compatibility.rst
|
|
title: PyTorch compatibility
|
|
- file: compatibility/ml-compatibility/tensorflow-compatibility.rst
|
|
title: TensorFlow compatibility
|
|
- file: compatibility/ml-compatibility/jax-compatibility.rst
|
|
title: JAX compatibility
|
|
- file: compatibility/ml-compatibility/verl-compatibility.rst
|
|
title: verl compatibility
|
|
- file: compatibility/ml-compatibility/stanford-megatron-lm-compatibility.rst
|
|
title: Stanford Megatron-LM compatibility
|
|
- file: compatibility/ml-compatibility/dgl-compatibility.rst
|
|
title: DGL compatibility
|
|
- file: compatibility/ml-compatibility/megablocks-compatibility.rst
|
|
title: Megablocks compatibility
|
|
- file: compatibility/ml-compatibility/taichi-compatibility.rst
|
|
title: Taichi compatibility
|
|
- file: compatibility/ml-compatibility/ray-compatibility.rst
|
|
title: Ray compatibility
|
|
- file: compatibility/ml-compatibility/llama-cpp-compatibility.rst
|
|
title: llama.cpp compatibility
|
|
- file: compatibility/ml-compatibility/flashinfer-compatibility.rst
|
|
title: FlashInfer compatibility
|
|
- file: how-to/build-rocm.rst
|
|
title: Build ROCm from source
|
|
|
|
- caption: How to
|
|
entries:
|
|
- file: how-to/rocm-for-ai/index.rst
|
|
title: Use ROCm for AI
|
|
subtrees:
|
|
- entries:
|
|
- file: how-to/rocm-for-ai/install.rst
|
|
title: Installation
|
|
- file: how-to/rocm-for-ai/system-setup/index.rst
|
|
title: System setup
|
|
entries:
|
|
- file: how-to/rocm-for-ai/system-setup/prerequisite-system-validation.rst
|
|
title: System validation
|
|
- file: how-to/rocm-for-ai/system-setup/multi-node-setup.rst
|
|
title: Multi-node setup
|
|
- file: how-to/rocm-for-ai/system-setup/system-health-check.rst
|
|
title: System health benchmarks
|
|
- file: how-to/rocm-for-ai/training/index.rst
|
|
title: Training
|
|
subtrees:
|
|
- entries:
|
|
- file: how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst
|
|
title: Train a model with Primus and Megatron-LM
|
|
- file: how-to/rocm-for-ai/training/benchmark-docker/primus-pytorch.rst
|
|
title: Train a model with Primus and PyTorch
|
|
- file: how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst
|
|
title: Train a model with JAX MaxText
|
|
- file: how-to/rocm-for-ai/training/benchmark-docker/mpt-llm-foundry
|
|
title: Train a model with LLM Foundry
|
|
- file: how-to/rocm-for-ai/training/scale-model-training.rst
|
|
title: Scale model training
|
|
|
|
- file: how-to/rocm-for-ai/fine-tuning/index.rst
|
|
title: Fine-tuning LLMs
|
|
subtrees:
|
|
- entries:
|
|
- file: how-to/rocm-for-ai/fine-tuning/overview.rst
|
|
title: Conceptual overview
|
|
- file: how-to/rocm-for-ai/fine-tuning/fine-tuning-and-inference.rst
|
|
title: Fine-tuning
|
|
subtrees:
|
|
- entries:
|
|
- file: how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference.rst
|
|
title: Use a single GPU
|
|
- file: how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference.rst
|
|
title: Use multiple GPUs
|
|
|
|
- file: how-to/rocm-for-ai/inference/index.rst
|
|
title: Inference
|
|
subtrees:
|
|
- entries:
|
|
- file: how-to/rocm-for-ai/inference/hugging-face-models.rst
|
|
title: Run models from Hugging Face
|
|
- file: how-to/rocm-for-ai/inference/llm-inference-frameworks.rst
|
|
title: LLM inference frameworks
|
|
- file: how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst
|
|
title: vLLM inference performance testing
|
|
- file: how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst
|
|
title: PyTorch inference performance testing
|
|
- file: how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst
|
|
title: SGLang inference performance testing
|
|
- file: how-to/rocm-for-ai/inference/benchmark-docker/sglang-distributed.rst
|
|
title: SGLang distributed inference with Mooncake
|
|
- file: how-to/rocm-for-ai/inference/deploy-your-model.rst
|
|
title: Deploy your model
|
|
|
|
- file: how-to/rocm-for-ai/inference-optimization/index.rst
|
|
title: Inference optimization
|
|
subtrees:
|
|
- entries:
|
|
- file: how-to/rocm-for-ai/inference-optimization/model-quantization.rst
|
|
- file: how-to/rocm-for-ai/inference-optimization/model-acceleration-libraries.rst
|
|
- file: how-to/rocm-for-ai/inference-optimization/optimizing-with-composable-kernel.md
|
|
title: Optimize with Composable Kernel
|
|
- file: how-to/rocm-for-ai/inference-optimization/optimizing-triton-kernel.rst
|
|
title: Optimize Triton kernels
|
|
- file: how-to/rocm-for-ai/inference-optimization/profiling-and-debugging.rst
|
|
title: Profile and debug
|
|
- file: how-to/rocm-for-ai/inference-optimization/workload.rst
|
|
title: Workload optimization
|
|
- file: how-to/rocm-for-ai/inference-optimization/vllm-optimization.rst
|
|
title: vLLM V1 performance optimization
|
|
|
|
- url: https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/
|
|
title: AI tutorials
|
|
|
|
- file: how-to/rocm-for-hpc/index.rst
|
|
title: Use ROCm for HPC
|
|
- file: how-to/system-optimization/index.rst
|
|
title: System optimization
|
|
- file: how-to/gpu-performance/mi300x.rst
|
|
title: AMD Instinct MI300X performance guides
|
|
- file: how-to/system-debugging.md
|
|
- file: conceptual/compiler-topics.md
|
|
title: Use advanced compiler features
|
|
subtrees:
|
|
- entries:
|
|
- url: https://rocm.docs.amd.com/projects/llvm-project/en/latest/index.html
|
|
title: ROCm compiler infrastructure
|
|
- url: https://rocm.docs.amd.com/projects/llvm-project/en/latest/conceptual/using-gpu-sanitizer.html
|
|
title: Use AddressSanitizer
|
|
- url: https://rocm.docs.amd.com/projects/llvm-project/en/latest/conceptual/openmp.html
|
|
title: OpenMP support
|
|
- file: how-to/setting-cus
|
|
title: Set the number of CUs
|
|
- file: how-to/Bar-Memory.rst
|
|
title: Troubleshoot BAR access limitation
|
|
- url: https://github.com/amd/rocm-examples
|
|
title: ROCm examples
|
|
|
|
|
|
- caption: Conceptual
|
|
entries:
|
|
- file: conceptual/gpu-arch.md
|
|
title: GPU architecture overview
|
|
subtrees:
|
|
- entries:
|
|
- file: conceptual/gpu-arch/mi300.md
|
|
title: MI300 microarchitecture
|
|
subtrees:
|
|
- entries:
|
|
- url: https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/instruction-set-architectures/amd-instinct-mi300-cdna3-instruction-set-architecture.pdf
|
|
title: AMD Instinct MI300/CDNA3 ISA
|
|
- url: https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/white-papers/amd-cdna-3-white-paper.pdf
|
|
title: White paper
|
|
- file: conceptual/gpu-arch/mi300-mi200-performance-counters.rst
|
|
title: MI300 and MI200 performance counters
|
|
- file: conceptual/gpu-arch/mi350-performance-counters.rst
|
|
title: MI350 Series performance counters
|
|
- file: conceptual/gpu-arch/mi250.md
|
|
title: MI250 microarchitecture
|
|
subtrees:
|
|
- entries:
|
|
- url: https://www.amd.com/system/files/TechDocs/instinct-mi200-cdna2-instruction-set-architecture.pdf
|
|
title: AMD Instinct MI200/CDNA2 ISA
|
|
- url: https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna2-white-paper.pdf
|
|
title: White paper
|
|
- file: conceptual/gpu-arch/mi100.md
|
|
title: MI100 microarchitecture
|
|
subtrees:
|
|
- entries:
|
|
- url: https://www.amd.com/system/files/TechDocs/instinct-mi100-cdna1-shader-instruction-set-architecture%C2%A0.pdf
|
|
title: AMD Instinct MI100/CDNA1 ISA
|
|
- url: https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna-white-paper.pdf
|
|
title: White paper
|
|
- file: conceptual/file-reorg.md
|
|
title: File structure (Linux FHS)
|
|
- file: conceptual/gpu-isolation.md
|
|
title: GPU isolation techniques
|
|
- file: conceptual/cmake-packages.rst
|
|
title: Using CMake
|
|
- file: conceptual/ai-pytorch-inception.md
|
|
title: Inception v3 with PyTorch
|
|
|
|
- caption: Reference
|
|
entries:
|
|
- file: reference/api-libraries.md
|
|
title: ROCm libraries
|
|
- file: reference/rocm-tools.md
|
|
title: ROCm tools, compilers, and runtimes
|
|
- file: reference/gpu-arch-specs.rst
|
|
- file: reference/gpu-atomics-operation.rst
|
|
- file: reference/env-variables.rst
|
|
title: Environment variables
|
|
- file: reference/precision-support.rst
|
|
title: Data types and precision support
|
|
- file: reference/graph-safe-support.rst
|
|
title: Graph safe support
|
|
|
|
- caption: Contribute
|
|
entries:
|
|
- file: contribute/contributing.md
|
|
title: Contributing to the ROCm documentation
|
|
subtrees:
|
|
- entries:
|
|
- file: contribute/toolchain.md
|
|
title: ROCm documentation toolchain
|
|
- file: contribute/building.md
|
|
- file: contribute/feedback.md
|
|
title: Providing feedback about the ROCm documentation
|
|
- file: about/license.md
|
|
title: ROCm licenses
|