mirror of
https://github.com/ROCm/ROCm.git
synced 2026-01-11 15:47:59 -05:00
Compare commits
8 Commits
rocm-submo
...
pytorch_co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a13947e8f | ||
|
|
b82258bf51 | ||
|
|
2beb93c33c | ||
|
|
a66bc1d85e | ||
|
|
36b6ffaf7c | ||
|
|
40e4ba3ecc | ||
|
|
1f41ce26be | ||
|
|
9293723381 |
@@ -89,6 +89,8 @@ jobs:
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
parameters:
|
||||
gpuTarget: ${{ job.target }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml
|
||||
# - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
|
||||
# parameters:
|
||||
@@ -122,6 +124,8 @@ jobs:
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml
|
||||
parameters:
|
||||
gpuTarget: ${{ job.target }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
@@ -147,4 +151,3 @@ jobs:
|
||||
environment: test
|
||||
gpuTarget: ${{ job.target }}
|
||||
registerROCmPackages: true
|
||||
optSymLink: true
|
||||
|
||||
251
.gitmodules
vendored
251
.gitmodules
vendored
@@ -1,251 +0,0 @@
|
||||
[submodule "submodule-srcs/ROCR-Runtime"]
|
||||
path = submodule-srcs/ROCR-Runtime
|
||||
url = https://github.com/rocm/ROCR-Runtime
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/amdsmi"]
|
||||
path = submodule-srcs/amdsmi
|
||||
url = https://github.com/rocm/amdsmi
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rdc"]
|
||||
path = submodule-srcs/rdc
|
||||
url = https://github.com/rocm/rdc
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm_bandwidth_test"]
|
||||
path = submodule-srcs/rocm_bandwidth_test
|
||||
url = https://github.com/rocm/rocm_bandwidth_test
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm_smi_lib"]
|
||||
path = submodule-srcs/rocm_smi_lib
|
||||
url = https://github.com/rocm/rocm_smi_lib
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm-core"]
|
||||
path = submodule-srcs/rocm-core
|
||||
url = https://github.com/rocm/rocm-core
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm-examples"]
|
||||
path = submodule-srcs/rocm-examples
|
||||
url = https://github.com/rocm/rocm-examples
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocminfo"]
|
||||
path = submodule-srcs/rocminfo
|
||||
url = https://github.com/rocm/rocminfo
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler"]
|
||||
path = submodule-srcs/rocprofiler
|
||||
url = https://github.com/rocm/rocprofiler
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-register"]
|
||||
path = submodule-srcs/rocprofiler-register
|
||||
url = https://github.com/rocm/rocprofiler-register
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-sdk"]
|
||||
path = submodule-srcs/rocprofiler-sdk
|
||||
url = https://github.com/rocm/rocprofiler-sdk
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-compute"]
|
||||
path = submodule-srcs/rocprofiler-compute
|
||||
url = https://github.com/rocm/rocprofiler-compute
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-systems"]
|
||||
path = submodule-srcs/rocprofiler-systems
|
||||
url = https://github.com/rocm/rocprofiler-systems
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/roctracer"]
|
||||
path = submodule-srcs/roctracer
|
||||
url = https://github.com/rocm/roctracer
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/HIP"]
|
||||
path = submodule-srcs/HIP
|
||||
url = https://github.com/rocm/HIP
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hip-tests"]
|
||||
path = submodule-srcs/hip-tests
|
||||
url = https://github.com/rocm/hip-tests
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/HIPIFY"]
|
||||
path = submodule-srcs/HIPIFY
|
||||
url = https://github.com/rocm/HIPIFY
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/clr"]
|
||||
path = submodule-srcs/clr
|
||||
url = https://github.com/rocm/clr
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipother"]
|
||||
path = submodule-srcs/hipother
|
||||
url = https://github.com/rocm/hipother
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/half"]
|
||||
path = submodule-srcs/half
|
||||
url = https://github.com/rocm/half
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/llvm-project"]
|
||||
path = submodule-srcs/llvm-project
|
||||
url = https://github.com/rocm/llvm-project
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/spirv-llvm-translator"]
|
||||
path = submodule-srcs/spirv-llvm-translator
|
||||
url = https://github.com/rocm/spirv-llvm-translator
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCdbgapi"]
|
||||
path = submodule-srcs/ROCdbgapi
|
||||
url = https://github.com/rocm/ROCdbgapi
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCgdb"]
|
||||
path = submodule-srcs/ROCgdb
|
||||
url = https://github.com/rocm/ROCgdb
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocr_debug_agent"]
|
||||
path = submodule-srcs/rocr_debug_agent
|
||||
url = https://github.com/rocm/rocr_debug_agent
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/AMDMIGraphX"]
|
||||
path = submodule-srcs/AMDMIGraphX
|
||||
url = https://github.com/rocm/AMDMIGraphX
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/MIOpen"]
|
||||
path = submodule-srcs/MIOpen
|
||||
url = https://github.com/rocm/MIOpen
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/MIVisionX"]
|
||||
path = submodule-srcs/MIVisionX
|
||||
url = https://github.com/rocm/MIVisionX
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCmValidationSuite"]
|
||||
path = submodule-srcs/ROCmValidationSuite
|
||||
url = https://github.com/rocm/ROCmValidationSuite
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/Tensile"]
|
||||
path = submodule-srcs/Tensile
|
||||
url = https://github.com/rocm/Tensile
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/composable_kernel"]
|
||||
path = submodule-srcs/composable_kernel
|
||||
url = https://github.com/rocm/composable_kernel
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipBLAS-common"]
|
||||
path = submodule-srcs/hipBLAS-common
|
||||
url = https://github.com/rocm/hipBLAS-common
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipBLAS"]
|
||||
path = submodule-srcs/hipBLAS
|
||||
url = https://github.com/rocm/hipBLAS
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipBLASLt"]
|
||||
path = submodule-srcs/hipBLASLt
|
||||
url = https://github.com/rocm/hipBLASLt
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipCUB"]
|
||||
path = submodule-srcs/hipCUB
|
||||
url = https://github.com/rocm/hipCUB
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipFFT"]
|
||||
path = submodule-srcs/hipFFT
|
||||
url = https://github.com/rocm/hipFFT
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipRAND"]
|
||||
path = submodule-srcs/hipRAND
|
||||
url = https://github.com/rocm/hipRAND
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipSOLVER"]
|
||||
path = submodule-srcs/hipSOLVER
|
||||
url = https://github.com/rocm/hipSOLVER
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipSPARSE"]
|
||||
path = submodule-srcs/hipSPARSE
|
||||
url = https://github.com/rocm/hipSPARSE
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipSPARSELt"]
|
||||
path = submodule-srcs/hipSPARSELt
|
||||
url = https://github.com/rocm/hipSPARSELt
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipTensor"]
|
||||
path = submodule-srcs/hipTensor
|
||||
url = https://github.com/rocm/hipTensor
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipfort"]
|
||||
path = submodule-srcs/hipfort
|
||||
url = https://github.com/rocm/hipfort
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rccl"]
|
||||
path = submodule-srcs/rccl
|
||||
url = https://github.com/rocm/rccl
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocAL"]
|
||||
path = submodule-srcs/rocAL
|
||||
url = https://github.com/rocm/rocAL
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocALUTION"]
|
||||
path = submodule-srcs/rocALUTION
|
||||
url = https://github.com/rocm/rocALUTION
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocBLAS"]
|
||||
path = submodule-srcs/rocBLAS
|
||||
url = https://github.com/rocm/rocBLAS
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocDecode"]
|
||||
path = submodule-srcs/rocDecode
|
||||
url = https://github.com/rocm/rocDecode
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocJPEG"]
|
||||
path = submodule-srcs/rocJPEG
|
||||
url = https://github.com/rocm/rocJPEG
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocPyDecode"]
|
||||
path = submodule-srcs/rocPyDecode
|
||||
url = https://github.com/rocm/rocPyDecode
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocFFT"]
|
||||
path = submodule-srcs/rocFFT
|
||||
url = https://github.com/rocm/rocFFT
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocPRIM"]
|
||||
path = submodule-srcs/rocPRIM
|
||||
url = https://github.com/rocm/rocPRIM
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocRAND"]
|
||||
path = submodule-srcs/rocRAND
|
||||
url = https://github.com/rocm/rocRAND
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocSHMEM"]
|
||||
path = submodule-srcs/rocSHMEM
|
||||
url = https://github.com/rocm/rocSHMEM
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocSOLVER"]
|
||||
path = submodule-srcs/rocSOLVER
|
||||
url = https://github.com/rocm/rocSOLVER
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocSPARSE"]
|
||||
path = submodule-srcs/rocSPARSE
|
||||
url = https://github.com/rocm/rocSPARSE
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocThrust"]
|
||||
path = submodule-srcs/rocThrust
|
||||
url = https://github.com/rocm/rocThrust
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocWMMA"]
|
||||
path = submodule-srcs/rocWMMA
|
||||
url = https://github.com/rocm/rocWMMA
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm-cmake"]
|
||||
path = submodule-srcs/rocm-cmake
|
||||
url = https://github.com/rocm/rocm-cmake
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rpp"]
|
||||
path = submodule-srcs/rpp
|
||||
url = https://github.com/rocm/rpp
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/TransferBench"]
|
||||
path = submodule-srcs/TransferBench
|
||||
url = https://github.com/rocm/TransferBench
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/openmp-extras/aomp"]
|
||||
path = submodule-srcs/openmp-extras/aomp
|
||||
url = https://github.com/rocm/aomp
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/openmp-extras/aomp-extras"]
|
||||
path = submodule-srcs/openmp-extras/aomp-extras
|
||||
url = https://github.com/rocm/aomp-extras
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCK-Kernel-Driver"]
|
||||
path = submodule-srcs/ROCK-Kernel-Driver
|
||||
url = https://github.com/rocm/ROCK-Kernel-Driver
|
||||
44
README.md
44
README.md
@@ -20,17 +20,12 @@ source software compilers, debuggers, and libraries. ROCm is fully integrated in
|
||||
(ML) frameworks, such as PyTorch and TensorFlow.
|
||||
|
||||
## Getting the ROCm Source Code
|
||||
AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. The source code for ROCm components can be cloned from each of the GitHub repositories using git.
|
||||
|
||||
There are two methods to clone/sync the ROCm sources. you can use either of the methods to sync the ROCm Sources
|
||||
|
||||
## [Method 1]
|
||||
|
||||
For easy access to download the correct versions of each of these tools, the ROCm repository contains a repo manifest file called [default.xml](./default.xml). You can use this manifest file to download the source code for ROCm software.
|
||||
AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. The source code for ROCm components can be cloned from each of the GitHub repositories using git. For easy access to download the correct versions of each of these tools, the ROCm repository contains a repo manifest file called [default.xml](./default.xml). You can use this manifest file to download the source code for ROCm software.
|
||||
|
||||
### Installing the repo tool
|
||||
|
||||
We need the repo tool to work with the manifest file. The repo tool from Google allows you to manage multiple git repositories simultaneously. Run the following commands to install the repo tool:
|
||||
The repo tool from Google allows you to manage multiple git repositories simultaneously. Run the following commands to install the repo tool:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/bin/
|
||||
@@ -48,12 +43,11 @@ Some ROCm projects use the Git Large File Storage (LFS) format that may require
|
||||
sudo apt-get install git-lfs
|
||||
```
|
||||
|
||||
### Downloading the ROCm source code
|
||||
|
||||
The following example shows how to use the repo tool to download the ROCm source code. If you choose a directory other than ~/bin/ to install the repo tool, you must use that chosen directory in the code as shown below:
|
||||
|
||||
```bash
|
||||
# --------------------------------------
|
||||
# Step1: clone source code
|
||||
# --------------------------------------
|
||||
mkdir -p ~/ROCm/
|
||||
cd ~/ROCm/
|
||||
export ROCM_VERSION=6.4.0
|
||||
@@ -63,35 +57,29 @@ export ROCM_VERSION=6.4.0
|
||||
|
||||
**Note:** Using this sample code will cause the repo tool to download the open source code associated with the specified ROCm release. Ensure that you have ssh-keys configured on your machine for your GitHub ID prior to the download as explained at [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh).
|
||||
|
||||
|
||||
## [Method 2]
|
||||
|
||||
This repository contains the source code for ROCm. Below you will find instructions for cloning the repository using submodules as an alternative to using the `repo` tool.
|
||||
|
||||
## Cloning with Git Submodules
|
||||
|
||||
As an alternative method, you can clone this repository and its submodules using Git's submodule functionality. This approach may be preferred if you are familiar with Git and wish to avoid using the `repo` tool.
|
||||
|
||||
To clone the repository along with all its submodules, use the following command:
|
||||
|
||||
```bash
|
||||
# --------------------------------------
|
||||
# Step1: clone source code
|
||||
# --------------------------------------
|
||||
git clone --recurse-submodules --remote-submodules https://github.com/ROCm/ROCm.git
|
||||
cd ROCm/submodule-srcs
|
||||
```
|
||||
## Building the ROCm source code
|
||||
|
||||
Each ROCm component repository contains directions for building that component, such as the rocSPARSE documentation [Installation and Building for Linux](https://rocm.docs.amd.com/projects/rocSPARSE/en/latest/install/Linux_Install_Guide.html). Refer to the specific component documentation for instructions on building the repository.
|
||||
|
||||
Each release of the ROCm software supports specific hardware and software configurations. Refer to [System requirements (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/system-requirements.html) for the current supported hardware and OS.
|
||||
|
||||
## Build ROCm from source
|
||||
|
||||
The Build will use as many processors as it can find to build in parallel. Some of the compiles can consume as much as 10GB of RAM, so make sure you have plenty of Swap Space !
|
||||
|
||||
By default the ROCm build will compile for all supported GPU architectures and will take approximately 500 CPU hours.
|
||||
The Build time will reduce significantly if we limit the GPU Architecture/s against which we need to build by using the environment variable GPU_ARCHS as mentioned below.
|
||||
|
||||
```bash
|
||||
# --------------------------------------
|
||||
# Step1: clone source code
|
||||
# --------------------------------------
|
||||
|
||||
mkdir -p ~/WORKSPACE/ # Or any folder name other than WORKSPACE
|
||||
cd ~/WORKSPACE/
|
||||
export ROCM_VERSION=6.4.0
|
||||
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.4.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
|
||||
~/bin/repo sync
|
||||
|
||||
# --------------------------------------
|
||||
# Step 2: Prepare build environment
|
||||
|
||||
@@ -21,31 +21,68 @@ release cycles for PyTorch on ROCm:
|
||||
|
||||
- ROCm PyTorch release:
|
||||
|
||||
- Provides the latest version of ROCm but doesn't immediately support the latest stable PyTorch
|
||||
version.
|
||||
- Provides the latest version of ROCm but might not necessarily support the
|
||||
latest stable PyTorch version.
|
||||
|
||||
- Offers :ref:`Docker images <pytorch-docker-compat>` with ROCm and PyTorch
|
||||
pre-installed.
|
||||
preinstalled.
|
||||
|
||||
- ROCm PyTorch repository: `<https://github.com/ROCm/pytorch>`_
|
||||
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>` to get started.
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>`
|
||||
to get started.
|
||||
|
||||
- Official PyTorch release:
|
||||
|
||||
- Provides the latest stable version of PyTorch but doesn't immediately support the latest ROCm version.
|
||||
- Provides the latest stable version of PyTorch but might not necessarily
|
||||
support the latest ROCm version.
|
||||
|
||||
- Official PyTorch repository: `<https://github.com/pytorch/pytorch>`_
|
||||
|
||||
- See the `Nightly and latest stable version installation guide <https://pytorch.org/get-started/locally/>`_
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_ to get started.
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_
|
||||
to get started.
|
||||
|
||||
The upstream PyTorch includes an automatic HIPification solution that automatically generates HIP
|
||||
source code from the CUDA backend. This approach allows PyTorch to support ROCm without requiring
|
||||
manual code modifications.
|
||||
PyTorch includes tooling that generates HIP source code from the CUDA backend.
|
||||
This approach allows PyTorch to support ROCm without requiring manual code
|
||||
modifications. For more information, see :doc:`HIPIFY <hipify:index>`.
|
||||
|
||||
Development of ROCm is aligned with the stable release of PyTorch while upstream PyTorch testing uses
|
||||
the stable release of ROCm to maintain consistency.
|
||||
ROCm development is aligned with the stable release of PyTorch, while upstream
|
||||
PyTorch testing uses the stable release of ROCm to maintain consistency.
|
||||
|
||||
.. _pytorch-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/training/benchmark-docker/pytorch-training>`
|
||||
guides how to leverage the ROCm platform for training AI models. It covers the
|
||||
steps, tools, and best practices for optimizing training workflows on AMD GPUs
|
||||
using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning
|
||||
and inference of machine learning models, particularly large language models
|
||||
(LLMs), on systems with a single GPU. This topic provides a detailed guide for
|
||||
setting up, optimizing, and executing fine-tuning and inference workflows in
|
||||
such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning
|
||||
models on systems with multiple GPUs.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/rocm-for-ai/inference-optimization/workload>`
|
||||
provides detailed guidance on optimizing workloads for the AMD Instinct MI300X
|
||||
accelerator using ROCm. This guide helps users achieve optimal performance for
|
||||
deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the
|
||||
use of PyTorch on the ROCm platform and focuses on efficiently leveraging AMD
|
||||
GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_.
|
||||
|
||||
.. _pytorch-docker-compat:
|
||||
|
||||
@@ -56,10 +93,10 @@ Docker image compatibility
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes ready-made `PyTorch images <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are validated for `ROCm 6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`_.
|
||||
Click the |docker-icon| icon to view the image on Docker Hub.
|
||||
AMD validates and publishes `PyTorch images <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and associated
|
||||
inventories were tested on `ROCm 6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`_.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. list-table:: PyTorch Docker image components
|
||||
:header-rows: 1
|
||||
@@ -212,13 +249,12 @@ Click the |docker-icon| icon to view the image on Docker Hub.
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Critical ROCm libraries for PyTorch
|
||||
Key ROCm libraries for PyTorch
|
||||
================================================================================
|
||||
|
||||
The functionality of PyTorch with ROCm is determined by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers. The versions described
|
||||
are available in ROCm :version:`rocm_version`.
|
||||
PyTorch functionality on ROCm is determined by its underlying library
|
||||
dependencies. These ROCm components affect the capabilities, performance, and
|
||||
feature set available to developers.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
@@ -238,24 +274,23 @@ are available in ROCm :version:`rocm_version`.
|
||||
- :version-ref:`hipBLAS rocm_version`
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations like matrix multiplication, matrix-vector products,
|
||||
and tensor contractions. Utilized in both dense and batched linear
|
||||
algebra operations.
|
||||
- Supports operations such as matrix multiplication, matrix-vector
|
||||
products, and tensor contractions. Utilized in both dense and batched
|
||||
linear algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- :version-ref:`hipBLASLt rocm_version`
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
|
||||
- Accelerates operations such as ``torch.matmul``, ``torch.mm``, and the
|
||||
matrix multiplications used in convolutional and linear layers.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- :version-ref:`hipCUB rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
|
||||
and ``torch.topk``. Operations on sparse tensors or tensors with
|
||||
irregular shapes often involve scanning, sorting, and filtering, which
|
||||
hipCUB handles efficiently.
|
||||
- Supports operations such as ``torch.sum``, ``torch.cumsum``,
|
||||
``torch.sort`` irregular shapes often involve scanning, sorting, and
|
||||
filtering, which hipCUB handles efficiently.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- :version-ref:`hipFFT rocm_version`
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
@@ -263,8 +298,8 @@ are available in ROCm :version:`rocm_version`.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- :version-ref:`hipRAND rocm_version`
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
|
||||
``torch.nn.Dropout``.
|
||||
- The ``torch.rand``, ``torch.randn``, and stochastic layers like
|
||||
``torch.nn.Dropout`` rely on hipRAND.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- :version-ref:`hipSOLVER rocm_version`
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
@@ -335,7 +370,7 @@ are available in ROCm :version:`rocm_version`.
|
||||
- :version-ref:`RPP rocm_version`
|
||||
- Speeds up data augmentation, transformation, and other preprocessing steps.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
``torchvision`` data load workloads to speed up data processing.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- :version-ref:`rocThrust rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
@@ -352,11 +387,11 @@ are available in ROCm :version:`rocm_version`.
|
||||
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
|
||||
more.
|
||||
|
||||
Supported and unsupported features
|
||||
Supported features
|
||||
================================================================================
|
||||
|
||||
The following section maps GPU-accelerated PyTorch features to their supported
|
||||
ROCm and PyTorch versions.
|
||||
This section maps GPU-accelerated PyTorch features to their supported ROCm and
|
||||
PyTorch versions.
|
||||
|
||||
torch
|
||||
--------------------------------------------------------------------------------
|
||||
@@ -364,23 +399,24 @@ torch
|
||||
`torch <https://pytorch.org/docs/stable/index.html>`_ is the central module of
|
||||
PyTorch, providing data structures for multi-dimensional tensors and
|
||||
implementing mathematical operations on them. It also includes utilities for
|
||||
efficient serialization of tensors and arbitrary data types, along with various
|
||||
other tools.
|
||||
efficient serialization of tensors and arbitrary data types and other tools.
|
||||
|
||||
Tensor data types
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or argument, and PyTorch supports a wide range of data types for different use cases.
|
||||
The tensor data type is specified using the ``dtype`` attribute or argument.
|
||||
PyTorch supports many data types for different use cases.
|
||||
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_'s single data types:
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_
|
||||
single data types:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
* - ``torch.float8_e4m3fn``
|
||||
- 8-bit floating point, e4m3
|
||||
- 2.3
|
||||
@@ -472,11 +508,11 @@ The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors
|
||||
|
||||
.. note::
|
||||
|
||||
Unsigned types aside from ``uint8`` are currently only have limited support in
|
||||
eager mode (they primarily exist to assist usage with ``torch.compile``).
|
||||
Unsigned types except ``uint8`` have limited support in eager mode. They
|
||||
primarily exist to assist usage with ``torch.compile``.
|
||||
|
||||
The :doc:`ROCm precision support page <rocm:reference/precision-support>`
|
||||
collected the native HW support of different data types.
|
||||
See :doc:`ROCm precision support <rocm:reference/precision-support>` for the
|
||||
native hardware support of data types.
|
||||
|
||||
torch.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -491,8 +527,8 @@ leveraging ROCm and CUDA as the underlying frameworks.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
* - Device management
|
||||
- Utilities for managing and interacting with GPUs.
|
||||
- 0.4.0
|
||||
@@ -566,8 +602,8 @@ PyTorch interacts with the ROCm or CUDA environment.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
* - ``cufft_plan_cache``
|
||||
- Manages caching of GPU FFT plans to optimize repeated FFT computations.
|
||||
- 1.7.0
|
||||
@@ -615,8 +651,8 @@ Supported ``torch`` options include:
|
||||
|
||||
* - Option
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
* - ``allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions on NVIDIA
|
||||
Ampere or newer GPUs.
|
||||
@@ -631,28 +667,28 @@ Supported ``torch`` options include:
|
||||
Automatic mixed precision: torch.amp
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
PyTorch that automates the process of using both 16-bit (half-precision,
|
||||
float16) and 32-bit (single-precision, float32) floating-point types in model
|
||||
training and inference.
|
||||
PyTorch automates the process of using both 16-bit (half-precision, float16) and
|
||||
32-bit (single-precision, float32) floating-point types in model training and
|
||||
inference.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
* - Autocasting
|
||||
- Instances of autocast serve as context managers or decorators that allow
|
||||
- Autocast instances serve as context managers or decorators that allow
|
||||
regions of your script to run in mixed precision.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - Gradient scaling
|
||||
- To prevent underflow, “gradient scaling” multiplies the network’s
|
||||
loss(es) by a scale factor and invokes a backward pass on the scaled
|
||||
loss(es). Gradients flowing backward through the network are then
|
||||
scaled by the same factor. In other words, gradient values have a
|
||||
larger magnitude, so they don’t flush to zero.
|
||||
loss by a scale factor and invokes a backward pass on the scaled
|
||||
loss. The same factor then scales gradients flowing backward through
|
||||
the network. In other words, gradient values have a larger magnitude so
|
||||
that they don’t flush to zero.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - CUDA op-specific behavior
|
||||
@@ -666,7 +702,7 @@ training and inference.
|
||||
Distributed library features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The PyTorch distributed library includes a collective of parallelism modules, a
|
||||
PyTorch distributed library includes a collective of parallelism modules, a
|
||||
communications layer, and infrastructure for launching and debugging large
|
||||
training jobs. See :ref:`rocm-for-ai-pytorch-distributed` for more information.
|
||||
|
||||
@@ -680,13 +716,13 @@ of computational resources and scalability for large-scale tasks.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
* - TensorPipe
|
||||
- A point-to-point communication library integrated into
|
||||
PyTorch for distributed training. It is designed to handle tensor data
|
||||
transfers efficiently between different processes or devices, including
|
||||
those on separate machines.
|
||||
PyTorch for distributed training. It handles tensor data transfers
|
||||
efficiently between different processes or devices, including those on
|
||||
separate machines.
|
||||
- 1.8
|
||||
- 5.4
|
||||
* - Gloo
|
||||
@@ -705,8 +741,8 @@ torch.compiler
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
* - ``torch.compiler`` (AOT Autograd)
|
||||
- Autograd captures not only the user-level code, but also backpropagation,
|
||||
which results in capturing the backwards pass “ahead-of-time”. This
|
||||
@@ -729,8 +765,8 @@ The `torchaudio <https://pytorch.org/audio/stable/index.html>`_ library provides
|
||||
utilities for processing audio data in PyTorch, such as audio loading,
|
||||
transformations, and feature extraction.
|
||||
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to move audio
|
||||
data (waveform tensor) explicitly to GPU using ``.to('cuda')``.
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to
|
||||
explicitly move audio data (waveform tensor) to GPU using ``.to('cuda')``.
|
||||
|
||||
The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
@@ -739,10 +775,10 @@ The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since torchaudio version
|
||||
- Since ROCm
|
||||
- As of torchaudio version
|
||||
- As of ROCm
|
||||
* - ``torchaudio.transforms.Spectrogram``
|
||||
- Generates spectrogram of an input waveform using STFT.
|
||||
- Generate a spectrogram of an input waveform using STFT.
|
||||
- 0.6.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MelSpectrogram``
|
||||
@@ -762,7 +798,7 @@ torchvision
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchvision <https://pytorch.org/vision/stable/index.html>`_ library
|
||||
provide datasets, model architectures, and common image transformations for
|
||||
provides datasets, model architectures, and common image transformations for
|
||||
computer vision.
|
||||
|
||||
The following ``torchvision`` features are GPU-accelerated.
|
||||
@@ -772,8 +808,8 @@ The following ``torchvision`` features are GPU-accelerated.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since torchvision version
|
||||
- Since ROCm
|
||||
- As of torchvision version
|
||||
- As of ROCm
|
||||
* - ``torchvision.transforms.functional``
|
||||
- Provides GPU-compatible transformations for image preprocessing like
|
||||
resize, normalize, rotate and crop.
|
||||
@@ -819,7 +855,7 @@ torchtune
|
||||
The `torchtune <https://pytorch.org/torchtune/stable/index.html>`_ library for
|
||||
authoring, fine-tuning and experimenting with LLMs.
|
||||
|
||||
* Usage: It works out-of-the-box, enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
* Usage: Enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
@@ -830,7 +866,8 @@ The `torchserve <https://pytorch.org/serve/>`_ is a PyTorch domain library
|
||||
for common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by
|
||||
linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
@@ -841,14 +878,16 @@ The `torchrec <https://pytorch.org/torchrec/>`_ is a PyTorch domain library for
|
||||
common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by
|
||||
linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
Unsupported PyTorch features
|
||||
----------------------------
|
||||
================================================================================
|
||||
|
||||
The following are GPU-accelerated PyTorch features not currently supported by ROCm.
|
||||
The following GPU-accelerated PyTorch features are not supported by ROCm for
|
||||
the listed supported PyTorch versions.
|
||||
|
||||
.. list-table::
|
||||
:widths: 30, 60, 10
|
||||
@@ -856,7 +895,7 @@ The following are GPU-accelerated PyTorch features not currently supported by RO
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- As of PyTorch
|
||||
* - APEX batch norm
|
||||
- Use APEX batch norm instead of PyTorch batch norm.
|
||||
- 1.6.0
|
||||
@@ -912,31 +951,3 @@ The following are GPU-accelerated PyTorch features not currently supported by RO
|
||||
utilized effectively through custom CUDA extensions or advanced
|
||||
workflows.
|
||||
- Not a core feature
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/training/train-a-model>` provides
|
||||
guidance on how to leverage the ROCm platform for training AI models. It covers the steps, tools, and best practices
|
||||
for optimizing training workflows on AMD GPUs using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning and inference of
|
||||
machine learning models, particularly large language models (LLMs), on systems with a single AMD
|
||||
Instinct MI300X accelerator. This page provides a detailed guide for setting up, optimizing, and
|
||||
executing fine-tuning and inference workflows in such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning models on systems
|
||||
with multi MI300X accelerators.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/rocm-for-ai/inference-optimization/workload>` provides detailed
|
||||
guidance on optimizing workloads for the AMD Instinct MI300X accelerator using ROCm. This guide is aimed at helping
|
||||
users achieve optimal performance for deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the use of PyTorch on the ROCm platform and
|
||||
focuses on how to efficiently leverage AMD GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
vllm_benchmark:
|
||||
unified_docker:
|
||||
latest:
|
||||
pull_tag: rocm/vllm:instinct_main
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_instinct_vllm0.7.3_20250311/images/sha256-de0a2649b735f45b7ecab8813eb7b19778ae1f40591ca1196b07bc29c42ed4a3
|
||||
pull_tag: rocm/vllm:rocm6.3.1_instinct_vllm0.8.3_20250410
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_instinct_vllm0.8.3_20250410/images/sha256-a0b55c6c0f3fa5d437fb54a66e32a108306c36d4776e570dfd0ae902719bd190
|
||||
rocm_version: 6.3.1
|
||||
vllm_version: 0.7.3
|
||||
vllm_version: 0.8.3
|
||||
pytorch_version: 2.7.0 (dev nightly)
|
||||
hipblaslt_version: 0.13
|
||||
model_groups:
|
||||
@@ -102,19 +102,12 @@ vllm_benchmark:
|
||||
model_repo: Qwen/Qwen2-72B-Instruct
|
||||
url: https://huggingface.co/Qwen/Qwen2-72B-Instruct
|
||||
precision: float16
|
||||
- group: JAIS
|
||||
tag: jais
|
||||
models:
|
||||
- model: JAIS 13B
|
||||
mad_tag: pyt_vllm_jais-13b
|
||||
model_repo: core42/jais-13b-chat
|
||||
url: https://huggingface.co/core42/jais-13b-chat
|
||||
precision: float16
|
||||
- model: JAIS 30B
|
||||
mad_tag: pyt_vllm_jais-30b
|
||||
model_repo: core42/jais-30b-chat-v3
|
||||
url: https://huggingface.co/core42/jais-30b-chat-v3
|
||||
- model: QwQ-32B
|
||||
mad_tag: pyt_vllm_qwq-32b
|
||||
model_repo: Qwen/QwQ-32B
|
||||
url: https://huggingface.co/Qwen/QwQ-32B
|
||||
precision: float16
|
||||
tunableop: true
|
||||
- group: DBRX
|
||||
tag: dbrx
|
||||
models:
|
||||
|
||||
@@ -92,6 +92,10 @@ PyTorch inference performance testing
|
||||
|
||||
docker pull rocm/pytorch:rocm6.2.3_ubuntu22.04_py3.10_pytorch_release_2.3.0_triton_llvm_reg_issue
|
||||
|
||||
.. note::
|
||||
|
||||
The Chai-1 benchmark uses a specifically selected Docker image using ROCm 6.2.3 and PyTorch 2.3.0 to address an accuracy issue.
|
||||
|
||||
.. container:: model-doc pyt_clip_inference
|
||||
|
||||
2. Use the following command to pull the `ROCm PyTorch Docker image <https://hub.docker.com/layers/rocm/pytorch/rocm6.2.3_ubuntu22.04_py3.10_pytorch_release_2.3.0_triton_llvm_reg_issue/images/sha256-b736a4239ab38a9d0e448af6d4adca83b117debed00bfbe33846f99c4540f79b>`_ from Docker Hub.
|
||||
|
||||
@@ -34,7 +34,7 @@ vLLM inference performance testing
|
||||
|
||||
.. _vllm-benchmark-available-models:
|
||||
|
||||
Available models
|
||||
Supported models
|
||||
================
|
||||
|
||||
.. raw:: html
|
||||
@@ -183,6 +183,25 @@ vLLM inference performance testing
|
||||
to collect latency and throughput performance data, you can also change the benchmarking
|
||||
parameters. See the standalone benchmarking tab for more information.
|
||||
|
||||
{% if model.tunableop %}
|
||||
|
||||
.. note::
|
||||
|
||||
For improved performance, consider enabling :ref:`PyTorch TunableOp <mi300x-tunableop>`.
|
||||
TunableOp automatically explores different implementations and configurations of certain PyTorch
|
||||
operators to find the fastest one for your hardware.
|
||||
|
||||
By default, ``{{model.mad_tag}}`` runs with TunableOp disabled
|
||||
(see
|
||||
`<https://github.com/ROCm/MAD/blob/develop/models.json>`__). To
|
||||
enable it, edit the default run behavior in the ``models.json``
|
||||
configuration before running inference -- update the model's run
|
||||
``args`` by changing ``--tunableop off`` to ``--tunableop on``.
|
||||
|
||||
Enabling TunableOp triggers a two-pass run -- a warm-up followed by the performance-collection run.
|
||||
|
||||
{% endif %}
|
||||
|
||||
.. tab-item:: Standalone benchmarking
|
||||
|
||||
Run the vLLM benchmark tool independently by starting the
|
||||
@@ -331,11 +350,18 @@ for benchmarking, see the version-specific documentation.
|
||||
- PyTorch version
|
||||
- Resources
|
||||
|
||||
* - 6.3.1
|
||||
- 0.7.3
|
||||
- 2.7.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.3/how-to/rocm-for-ai/inference/vllm-benchmark.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_instinct_vllm0.7.3_20250325/images/sha256-25245924f61750b19be6dcd8e787e46088a496c1fe17ee9b9e397f3d84d35640>`_
|
||||
|
||||
* - 6.3.1
|
||||
- 0.6.6
|
||||
- 2.7.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.2/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.html>`_
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.2/how-to/rocm-for-ai/inference/vllm-benchmark.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6/images/sha256-9a12ef62bbbeb5a4c30a01f702c8e025061f575aa129f291a49fbd02d6b4d6c9>`_
|
||||
|
||||
* - 6.2.1
|
||||
|
||||
Submodule submodule-srcs/AMDMIGraphX deleted from e8b735f65e
Submodule submodule-srcs/HIP deleted from a0a2dd7fc8
Submodule submodule-srcs/HIPIFY deleted from b803a5270b
Submodule submodule-srcs/MIOpen deleted from a583315f3e
Submodule submodule-srcs/MIVisionX deleted from a2b69e5b30
Submodule submodule-srcs/ROCK-Kernel-Driver deleted from e796ccd5f9
Submodule submodule-srcs/ROCR-Runtime deleted from 4264d016ec
Submodule submodule-srcs/ROCdbgapi deleted from 59be7ff0aa
Submodule submodule-srcs/ROCgdb deleted from 401bb21f2f
Submodule submodule-srcs/ROCmValidationSuite deleted from fb251886ed
Submodule submodule-srcs/Tensile deleted from be49885fce
Submodule submodule-srcs/TransferBench deleted from 3ea2f226ec
Submodule submodule-srcs/amdsmi deleted from ede62f2534
Submodule submodule-srcs/clr deleted from 0f2d602424
Submodule submodule-srcs/composable_kernel deleted from a8c5bd9b9a
Submodule submodule-srcs/half deleted from 1ddada2251
Submodule submodule-srcs/hip-tests deleted from 3573bde0c2
Submodule submodule-srcs/hipBLAS deleted from 0a335435e9
Submodule submodule-srcs/hipBLAS-common deleted from 7c1566ba46
Submodule submodule-srcs/hipBLASLt deleted from a999b0721d
Submodule submodule-srcs/hipCUB deleted from a6005943c5
Submodule submodule-srcs/hipFFT deleted from 396169c84a
Submodule submodule-srcs/hipRAND deleted from d2516cc199
Submodule submodule-srcs/hipSOLVER deleted from ca0de3c9c9
Submodule submodule-srcs/hipSPARSE deleted from a6c62e48eb
Submodule submodule-srcs/hipSPARSELt deleted from f3f4f590a4
Submodule submodule-srcs/hipTensor deleted from e5529b9291
Submodule submodule-srcs/hipfort deleted from f3d6aa3e86
Submodule submodule-srcs/hipother deleted from 49b1588f83
Submodule submodule-srcs/llvm-project deleted from c7fe45cf4b
Submodule submodule-srcs/openmp-extras/aomp deleted from 1cd9ec1017
Submodule submodule-srcs/openmp-extras/aomp-extras deleted from 97567952ae
Submodule submodule-srcs/rccl deleted from 7b86f83d84
Submodule submodule-srcs/rdc deleted from be34d624f6
Submodule submodule-srcs/rocAL deleted from 373ef865ac
Submodule submodule-srcs/rocALUTION deleted from 9713084af8
Submodule submodule-srcs/rocBLAS deleted from 80e5394d6a
Submodule submodule-srcs/rocDecode deleted from a2a7b63cad
Submodule submodule-srcs/rocFFT deleted from 058ba87fdc
Submodule submodule-srcs/rocJPEG deleted from 73d36d35d9
Submodule submodule-srcs/rocPRIM deleted from d8771ec18a
Submodule submodule-srcs/rocPyDecode deleted from 848e49d29d
Submodule submodule-srcs/rocRAND deleted from 4d5d3a88d1
Submodule submodule-srcs/rocSHMEM deleted from 7702b3c0f3
Submodule submodule-srcs/rocSOLVER deleted from db754e3f55
Submodule submodule-srcs/rocSPARSE deleted from 4953add0ae
Submodule submodule-srcs/rocThrust deleted from 6bf2777019
Submodule submodule-srcs/rocWMMA deleted from 1a5b623166
Submodule submodule-srcs/rocm-cmake deleted from ecc716b97c
Submodule submodule-srcs/rocm-core deleted from 73dae9c82a
Submodule submodule-srcs/rocm-examples deleted from 3bbd2987a3
Submodule submodule-srcs/rocm_bandwidth_test deleted from 84b8ddd268
Submodule submodule-srcs/rocm_smi_lib deleted from 03a4530b68
Submodule submodule-srcs/rocminfo deleted from 6ea2ba38c8
Submodule submodule-srcs/rocprofiler deleted from 40da7312a0
Submodule submodule-srcs/rocprofiler-compute deleted from a11d700e10
Submodule submodule-srcs/rocprofiler-register deleted from 7c6cd44f63
Submodule submodule-srcs/rocprofiler-sdk deleted from e8e49fe769
Submodule submodule-srcs/rocprofiler-systems deleted from 9c07bf3ab0
Submodule submodule-srcs/rocr_debug_agent deleted from 5c49ec91fd
Submodule submodule-srcs/roctracer deleted from f55a694381
Submodule submodule-srcs/rpp deleted from 5fb204ca70
Submodule submodule-srcs/spirv-llvm-translator deleted from 8ed662a93b
Reference in New Issue
Block a user