From 3c71bb25e8bed1dbbacc01362839fd37dbf8904e Mon Sep 17 00:00:00 2001 From: Jan Stephan Date: Wed, 16 Jul 2025 14:10:06 +0200 Subject: [PATCH 01/81] Make initial directory and copy operations platform-independent --- docs/conf.py | 21 ++++++++++++++------- docs/contribute/building.md | 22 ++++++++++++++++++++-- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index dfd48d80c..d672ba3a0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,17 +9,21 @@ import shutil import sys from pathlib import Path -shutil.copy2("../RELEASE.md", "./about/release-notes.md") -shutil.copy2("../CHANGELOG.md", "./release/changelog.md") +gh_release_path = os.path.join("..", "RELEASE.md") +gh_changelog_path = os.path.join("..", "CHANGELOG.md") +sphinx_release_path = os.path.join("about", "release-notes.md") +sphinx_changelog_path = os.path.join("release", "changelog.md") +shutil.copy2(gh_release_path, sphinx_release_path) +shutil.copy2(gh_changelog_path, sphinx_changelog_path) # Mark the consolidated changelog as orphan to prevent Sphinx from warning about missing toctree entries -with open("./release/changelog.md", "r+") as file: +with open(sphinx_changelog_path, "r+", encoding="utf-8") as file: content = file.read() file.seek(0) file.write(":orphan:\n" + content) # Replace GitHub-style [!ADMONITION]s with Sphinx-compatible ```{admonition} blocks -with open("./release/changelog.md", "r") as file: +with open(sphinx_changelog_path, "r", encoding="utf-8") as file: lines = file.readlines() modified_lines = [] @@ -57,11 +61,14 @@ with open("./release/changelog.md", "r") as file: file.close() - with open("./release/changelog.md", 'w') as file: + with open(sphinx_changelog_path, "w", encoding="utf-8") as file: file.writelines(modified_lines) -os.system("mkdir -p ../_readthedocs/html/downloads") -os.system("cp compatibility/compatibility-matrix-historical-6.0.csv ../_readthedocs/html/downloads/compatibility-matrix-historical-6.0.csv") +matrix_path = os.path.join("compatibility", "compatibility-matrix-historical-6.0.csv") +rtd_path = os.path.join("..", "_readthedocs", "html", "downloads") +if not os.path.exists(rtd_path): + os.makedirs(rtd_path) +shutil.copy2(matrix_path, rtd_path) latex_engine = "xelatex" latex_elements = { diff --git a/docs/contribute/building.md b/docs/contribute/building.md index 97801832b..d4b88b071 100644 --- a/docs/contribute/building.md +++ b/docs/contribute/building.md @@ -28,13 +28,31 @@ See the [Python requirements file](https://github.com/ROCm/ROCm/blob/develop/doc Use the Python Virtual Environment (`venv`) and run the following commands from the project root: +::::{tab-set} +:::{tab-item} Linux and WSL +:sync: linux + ```sh python3 -mvenv .venv -.venv/bin/python -m pip install -r docs/sphinx/requirements.txt -.venv/bin/python -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html +.venv/bin/python -m pip install -r docs/sphinx/requirements.txt +.venv/bin/python -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html ``` +::: +:::{tab-item} Windows +:sync: windows + +```powershell +python -mvenv .venv + +.venv\Scripts\python.exe -m pip install -r docs/sphinx/requirements.txt +.venv\Scripts\python.exe -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html +``` + +::: +:::: + Navigate to `_build/html/index.html` and open this file in a web browser. ## Visual Studio Code From 1cf3eef9dafc7c7003105dd127878a969eeccfb5 Mon Sep 17 00:00:00 2001 From: Matt Williams Date: Mon, 28 Jul 2025 14:39:39 -0400 Subject: [PATCH 02/81] AQLProfile component additions --- docs/about/license.md | 9 ++++----- docs/compatibility/compatibility-matrix.rst | 1 + docs/reference/rocm-tools.md | 1 + docs/what-is-rocm.rst | 1 + 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/about/license.md b/docs/about/license.md index 3ab8b3544..27fdc163a 100644 --- a/docs/about/license.md +++ b/docs/about/license.md @@ -29,6 +29,7 @@ additional licenses. Please review individual repositories for more information. | [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/amd-staging/LICENSE) | | [aomp](https://github.com/ROCm/aomp/) | [Apache 2.0](https://github.com/ROCm/aomp/blob/aomp-dev/LICENSE) | | [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) | +| [AQLProfile]| [MIT] | | [Code Object Manager (Comgr)](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/comgr) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/comgr/LICENSE.txt) | | [Composable Kernel](https://github.com/ROCm/composable_kernel) | [MIT](https://github.com/ROCm/composable_kernel/blob/develop/LICENSE) | | [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/rocm/LICENSE.txt) | @@ -46,7 +47,6 @@ additional licenses. Please review individual repositories for more information. | [hipSPARSE](https://github.com/ROCm/hipSPARSE/) | [MIT](https://github.com/ROCm/hipSPARSE/blob/develop/LICENSE.md) | | [hipSPARSELt](https://github.com/ROCm/hipSPARSELt/) | [MIT](https://github.com/ROCm/hipSPARSELt/blob/develop/LICENSE.md) | | [hipTensor](https://github.com/ROCm/hipTensor) | [MIT](https://github.com/ROCm/hipTensor/blob/develop/LICENSE) | -| hsa-amd-aqlprofile | [AMD Software EULA](https://www.amd.com/en/legal/eula/amd-software-eula.html) | | [llvm-project](https://github.com/ROCm/llvm-project/) | [Apache](https://github.com/ROCm/llvm-project/blob/amd-staging/LICENSE.TXT) | | [llvm-project/flang](https://github.com/ROCm/llvm-project/tree/amd-staging/flang) | [Apache 2.0](https://github.com/ROCm/llvm-project/blob/amd-staging/flang/LICENSE.TXT) | | [MIGraphX](https://github.com/ROCm/AMDMIGraphX/) | [MIT](https://github.com/ROCm/AMDMIGraphX/blob/develop/LICENSE) | @@ -132,12 +132,11 @@ companies. ### Package licensing :::{attention} -AQL Profiler and AOCC CPU optimization are both provided in binary form, each -subject to the license agreement enclosed in the directory for the binary available +AOCC CPU optimization is provided in binary form, subject to the license agreement enclosed in the directory for the binary available in `/opt/rocm/share/doc/hsa-amd-aqlprofile/EULA`. By using, installing, -copying or distributing AQL Profiler and/or AOCC CPU Optimizations, you agree to +copying or distributing AOCC CPU Optimizations, you agree to the terms and conditions of this license agreement. If you do not agree to the -terms of this agreement, do not install, copy or use the AQL Profiler and/or the +terms of this agreement, do not install, copy or use the AOCC CPU Optimizations. ::: diff --git a/docs/compatibility/compatibility-matrix.rst b/docs/compatibility/compatibility-matrix.rst index b61cfca48..6f05217ed 100644 --- a/docs/compatibility/compatibility-matrix.rst +++ b/docs/compatibility/compatibility-matrix.rst @@ -123,6 +123,7 @@ compatibility and system requirements. :doc:`ROCm Validation Suite `,1.1.0,1.1.0,1.1.0 ,,, PERFORMANCE TOOLS,,, + :doc:`AQLProfile `,0.6.0,0.6.0,0.5.0 :doc:`ROCm Bandwidth Test `,1.4.0,1.4.0,1.4.0 :doc:`ROCm Compute Profiler `,3.1.1,3.1.0,3.0.0 :doc:`ROCm Systems Profiler `,1.0.2,1.0.1,0.1.0 diff --git a/docs/reference/rocm-tools.md b/docs/reference/rocm-tools.md index 6f3c1fbfd..71d7a65b2 100644 --- a/docs/reference/rocm-tools.md +++ b/docs/reference/rocm-tools.md @@ -29,6 +29,7 @@ (performance-tools)= +* {doc}`AQLProfile ` * {doc}`ROCm Bandwidth Test ` * {doc}`ROCm Compute Profiler ` * {doc}`ROCm Systems Profiler ` diff --git a/docs/what-is-rocm.rst b/docs/what-is-rocm.rst index 4accd00ed..32722b498 100644 --- a/docs/what-is-rocm.rst +++ b/docs/what-is-rocm.rst @@ -110,6 +110,7 @@ Performance .. csv-table:: :header: "Component", "Description" + ":doc:`AQLProfile `", "The Architected Queuing Language Profiling Library (AQLProfile) is an open source library that enables advanced GPU profiling and tracing on AMD platforms" ":doc:`ROCm Bandwidth Test `", "Captures the performance characteristics of buffer copying and kernel read/write operations" ":doc:`ROCm Compute Profiler `", "Kernel-level profiling for machine learning and high performance computing (HPC) workloads" ":doc:`ROCm Systems Profiler `", "Comprehensive profiling and tracing of applications running on the CPU or the CPU and GPU" From 95543cae2ab06270a406cb55cff2474e22a63449 Mon Sep 17 00:00:00 2001 From: Matt Williams Date: Wed, 30 Jul 2025 14:43:52 -0400 Subject: [PATCH 03/81] Final edits --- docs/about/license.md | 9 ++++----- docs/compatibility/compatibility-matrix.rst | 1 - docs/reference/rocm-tools.md | 1 - docs/what-is-rocm.rst | 1 - 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/docs/about/license.md b/docs/about/license.md index 27fdc163a..c6c44e31d 100644 --- a/docs/about/license.md +++ b/docs/about/license.md @@ -29,7 +29,7 @@ additional licenses. Please review individual repositories for more information. | [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/amd-staging/LICENSE) | | [aomp](https://github.com/ROCm/aomp/) | [Apache 2.0](https://github.com/ROCm/aomp/blob/aomp-dev/LICENSE) | | [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) | -| [AQLProfile]| [MIT] | +| [AQLprofile]| [MIT](https://github.com/ROCm/aqlprofile/blob/amd-staging/LICENSE) | | [Code Object Manager (Comgr)](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/comgr) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/comgr/LICENSE.txt) | | [Composable Kernel](https://github.com/ROCm/composable_kernel) | [MIT](https://github.com/ROCm/composable_kernel/blob/develop/LICENSE) | | [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/rocm/LICENSE.txt) | @@ -132,11 +132,10 @@ companies. ### Package licensing :::{attention} -AOCC CPU optimization is provided in binary form, subject to the license agreement enclosed in the directory for the binary available -in `/opt/rocm/share/doc/hsa-amd-aqlprofile/EULA`. By using, installing, -copying or distributing AOCC CPU Optimizations, you agree to +ROCprof Trace Decoder and AOCC CPU optimizations are provided in binary form, subject to the license agreement enclosed in the directory for the binary available in `/opt/rocm/share/doc/rocprof-trace-decoder/EULA`. By using, installing, +copying or distributing ROCprof Trace Decoder or AOCC CPU Optimizations, you agree to the terms and conditions of this license agreement. If you do not agree to the -terms of this agreement, do not install, copy or use the +terms of this agreement, do not install, copy or use ROCprof Trace Decoder or the AOCC CPU Optimizations. ::: diff --git a/docs/compatibility/compatibility-matrix.rst b/docs/compatibility/compatibility-matrix.rst index 6f05217ed..b61cfca48 100644 --- a/docs/compatibility/compatibility-matrix.rst +++ b/docs/compatibility/compatibility-matrix.rst @@ -123,7 +123,6 @@ compatibility and system requirements. :doc:`ROCm Validation Suite `,1.1.0,1.1.0,1.1.0 ,,, PERFORMANCE TOOLS,,, - :doc:`AQLProfile `,0.6.0,0.6.0,0.5.0 :doc:`ROCm Bandwidth Test `,1.4.0,1.4.0,1.4.0 :doc:`ROCm Compute Profiler `,3.1.1,3.1.0,3.0.0 :doc:`ROCm Systems Profiler `,1.0.2,1.0.1,0.1.0 diff --git a/docs/reference/rocm-tools.md b/docs/reference/rocm-tools.md index 71d7a65b2..6f3c1fbfd 100644 --- a/docs/reference/rocm-tools.md +++ b/docs/reference/rocm-tools.md @@ -29,7 +29,6 @@ (performance-tools)= -* {doc}`AQLProfile ` * {doc}`ROCm Bandwidth Test ` * {doc}`ROCm Compute Profiler ` * {doc}`ROCm Systems Profiler ` diff --git a/docs/what-is-rocm.rst b/docs/what-is-rocm.rst index 32722b498..4accd00ed 100644 --- a/docs/what-is-rocm.rst +++ b/docs/what-is-rocm.rst @@ -110,7 +110,6 @@ Performance .. csv-table:: :header: "Component", "Description" - ":doc:`AQLProfile `", "The Architected Queuing Language Profiling Library (AQLProfile) is an open source library that enables advanced GPU profiling and tracing on AMD platforms" ":doc:`ROCm Bandwidth Test `", "Captures the performance characteristics of buffer copying and kernel read/write operations" ":doc:`ROCm Compute Profiler `", "Kernel-level profiling for machine learning and high performance computing (HPC) workloads" ":doc:`ROCm Systems Profiler `", "Comprehensive profiling and tracing of applications running on the CPU or the CPU and GPU" From 9786a7539050f56884bca532398abfdea0e62b1a Mon Sep 17 00:00:00 2001 From: Matt Williams Date: Thu, 31 Jul 2025 10:33:36 -0400 Subject: [PATCH 04/81] Update license --- docs/about/license.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/about/license.md b/docs/about/license.md index c6c44e31d..91dbca114 100644 --- a/docs/about/license.md +++ b/docs/about/license.md @@ -29,7 +29,7 @@ additional licenses. Please review individual repositories for more information. | [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/amd-staging/LICENSE) | | [aomp](https://github.com/ROCm/aomp/) | [Apache 2.0](https://github.com/ROCm/aomp/blob/aomp-dev/LICENSE) | | [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) | -| [AQLprofile]| [MIT](https://github.com/ROCm/aqlprofile/blob/amd-staging/LICENSE) | +| [AQLprofile] | [MIT](https://github.com/ROCm/aqlprofile/blob/amd-staging/LICENSE) | | [Code Object Manager (Comgr)](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/comgr) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/comgr/LICENSE.txt) | | [Composable Kernel](https://github.com/ROCm/composable_kernel) | [MIT](https://github.com/ROCm/composable_kernel/blob/develop/LICENSE) | | [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/rocm/LICENSE.txt) | @@ -132,7 +132,7 @@ companies. ### Package licensing :::{attention} -ROCprof Trace Decoder and AOCC CPU optimizations are provided in binary form, subject to the license agreement enclosed in the directory for the binary available in `/opt/rocm/share/doc/rocprof-trace-decoder/EULA`. By using, installing, +ROCprof Trace Decoder and AOCC CPU optimizations are provided in binary form, subject to the license agreement enclosed on [GitHub](https://github.com/ROCm/rocprof-trace-decoder/blob/amd-mainline/LICENSE) for ROCprof Trace Decoder, and [Developer Central](https://www.amd.com/en/developer/aocc.html) for AOCC. By using, installing, copying or distributing ROCprof Trace Decoder or AOCC CPU Optimizations, you agree to the terms and conditions of this license agreement. If you do not agree to the terms of this agreement, do not install, copy or use ROCprof Trace Decoder or the From dd56fd4d3abf5696e8b258e7d60849882724872a Mon Sep 17 00:00:00 2001 From: anisha-amd Date: Tue, 12 Aug 2025 14:25:37 -0400 Subject: [PATCH 05/81] develop: compatibility matrix frameworks support update (#5185) --- docs/compatibility/compatibility-matrix-historical-6.0.csv | 4 ++-- docs/compatibility/compatibility-matrix.rst | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/compatibility/compatibility-matrix-historical-6.0.csv b/docs/compatibility/compatibility-matrix-historical-6.0.csv index 38a9ed893..b8f7b6ba2 100644 --- a/docs/compatibility/compatibility-matrix-historical-6.0.csv +++ b/docs/compatibility/compatibility-matrix-historical-6.0.csv @@ -31,9 +31,9 @@ ROCm Version,6.4.3,6.4.2,6.4.1,6.4.0,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6 :doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.18.1, 2.17.1, 2.16.2","2.18.1, 2.17.1, 2.16.2","2.18.1, 2.17.1, 2.16.2","2.18.1, 2.17.1, 2.16.2","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1" :doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.4.35,0.4.35,0.4.35,0.4.35,0.4.31,0.4.31,0.4.31,0.4.31,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26 :doc:`verl <../compatibility/ml-compatibility/verl-compatibility>` [#verl_compat]_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.3.0.post0,N/A,N/A,N/A,N/A,N/A - :doc:`Stanford Megatron-LM <../compatibility/ml-compatibility/stanford-megatron-lm-compatibility>`,N/A,N/A,N/A,N/A,85f95ae,85f95ae,85f95ae,85f95ae,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A + :doc:`Stanford Megatron-LM <../compatibility/ml-compatibility/stanford-megatron-lm-compatibility>` [#stanford-megatron-lm_compat]_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,85f95ae,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A :doc:`DGL <../compatibility/ml-compatibility/dgl-compatibility>` [#dgl_compat]_,N/A,N/A,N/A,2.4.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A, - :doc:`Megablocks <../compatibility/ml-compatibility/megablocks-compatibility>`,N/A,N/A,N/A,N/A,0.7.0,0.7.0,0.7.0,0.7.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A + :doc:`Megablocks <../compatibility/ml-compatibility/megablocks-compatibility>` [#megablocks_compat]_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.7.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A :doc:`Taichi <../compatibility/ml-compatibility/taichi-compatibility>` [#taichi_compat]_,N/A,N/A,N/A,N/A,N/A,1.8.0b1,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A `ONNX Runtime `_,1.2,1.2,1.2,1.2,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1 ,,,,,,,,,,,,,,,,,, diff --git a/docs/compatibility/compatibility-matrix.rst b/docs/compatibility/compatibility-matrix.rst index 3cae61198..797e2894e 100644 --- a/docs/compatibility/compatibility-matrix.rst +++ b/docs/compatibility/compatibility-matrix.rst @@ -242,7 +242,9 @@ Expand for full historical view of: .. [#mi300_602-past-60] **For ROCm 6.0.2** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3. .. [#mi300_600-past-60] **For ROCm 6.0.0** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3. .. [#verl_compat] verl is only supported on ROCm 6.2.0. + .. [#stanford-megatron-lm_compat] Stanford Megatron-LM is only supported on ROCm 6.3.0. .. [#dgl_compat] DGL is only supported on ROCm 6.4.0. + .. [#megablocks_compat] Megablocks is only supported on ROCm 6.3.0. .. [#taichi_compat] Taichi is only supported on ROCm 6.3.2. .. [#kfd_support-past-60] As of ROCm 6.4.0, forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software is provided up to a year apart. For earlier ROCm releases, the compatibility is provided for +/- 2 releases. The tested user space versions on this page were accurate as of the time of initial ROCm release. For the most up-to-date information, see the latest version of this information at `User and kernel-space support matrix `_. .. [#ROCT-rocr-past-60] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package. From c4135ab541925c7cb0c99aec861ebf6abddac0ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Aug 2025 09:22:31 -0600 Subject: [PATCH 06/81] Bump sphinx-sitemap from 2.7.2 to 2.8.0 in /docs/sphinx (#5192) Bumps [sphinx-sitemap](https://github.com/jdillard/sphinx-sitemap) from 2.7.2 to 2.8.0. - [Release notes](https://github.com/jdillard/sphinx-sitemap/releases) - [Changelog](https://github.com/jdillard/sphinx-sitemap/blob/master/CHANGELOG.rst) - [Commits](https://github.com/jdillard/sphinx-sitemap/compare/v2.7.2...v2.8.0) --- updated-dependencies: - dependency-name: sphinx-sitemap dependency-version: 2.8.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/sphinx/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sphinx/requirements.txt b/docs/sphinx/requirements.txt index 2b678c219..bd30d7406 100644 --- a/docs/sphinx/requirements.txt +++ b/docs/sphinx/requirements.txt @@ -234,7 +234,7 @@ sphinx-notfound-page==1.1.0 # via rocm-docs-core sphinx-reredirects==0.1.6 # via -r requirements.in -sphinx-sitemap==2.7.2 +sphinx-sitemap==2.8.0 # via -r requirements.in sphinxcontrib-applehelp==2.0.0 # via sphinx From 39e7ccd3c54502cbcad85c97379b2615a5c51de9 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Tue, 12 Aug 2025 17:44:46 -0400 Subject: [PATCH 07/81] Update variables-global.yml --- .azuredevops/variables-global.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.azuredevops/variables-global.yml b/.azuredevops/variables-global.yml index de2ff9411..831e2cb47 100644 --- a/.azuredevops/variables-global.yml +++ b/.azuredevops/variables-global.yml @@ -28,13 +28,13 @@ variables: - name: GFX90A_TEST_POOL value: gfx90a_test_pool - name: LATEST_RELEASE_VERSION - value: 6.4.2 + value: 6.4.3 - name: REPO_RADEON_VERSION - value: 6.4.2 + value: 6.4.3 - name: NEXT_RELEASE_VERSION value: 7.0.0 - name: LATEST_RELEASE_TAG - value: rocm-6.4.2 + value: rocm-6.4.3 - name: DOCKER_SKIP_GFX value: gfx90a - name: COMPOSABLE_KERNEL_PIPELINE_ID From ec05312de78f7f77dc75a8266992e5aff8bee48d Mon Sep 17 00:00:00 2001 From: Daniel Su Date: Thu, 14 Aug 2025 14:31:34 -0400 Subject: [PATCH 08/81] [Ex CI] enable rocprofiler monorepo (#5197) * [Ex CI] enable rocprofiler monorepo * set ROCM_PATH --- .azuredevops/components/rocprofiler.yml | 151 ++++++++++++++---------- 1 file changed, 91 insertions(+), 60 deletions(-) diff --git a/.azuredevops/components/rocprofiler.yml b/.azuredevops/components/rocprofiler.yml index ddad69e3c..cb9195bb3 100644 --- a/.azuredevops/components/rocprofiler.yml +++ b/.azuredevops/components/rocprofiler.yml @@ -8,6 +8,22 @@ parameters: - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -70,6 +86,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -94,6 +114,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-vendor.yml parameters: dependencyList: @@ -108,6 +129,8 @@ jobs: gpuTarget: ${{ job.target }} os: ${{ job.os }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} @@ -115,6 +138,7 @@ jobs: extraBuildFlags: >- -DCMAKE_MODULE_PATH=$(Build.SourcesDirectory)/cmake_modules;$(Agent.BuildDirectory)/rocm/lib/cmake;$(Agent.BuildDirectory)/rocm/lib/cmake/hip;$(Agent.BuildDirectory)/rocm/lib64/cmake;$(Agent.BuildDirectory)/rocm/lib64/cmake/hip -DCMAKE_PREFIX_PATH="$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/vendor" + -DROCM_PATH=$(Agent.BuildDirectory)/rocm -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DENABLE_LDCONFIG=OFF -DUSE_PROF_API=1 @@ -122,10 +146,13 @@ jobs: multithreadFlag: -- -j32 - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} os: ${{ job.os }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml @@ -139,63 +166,67 @@ jobs: - HIP_ROCCLR_HOME:::/home/user/workspace/rocm - ROCM_PATH:::/home/user/workspace/rocm -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} - dependsOn: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - - name: ROCM_PATH - value: $(Agent.BuildDirectory)/rocm - - name: LD_LIBRARY_PATH - value: $(Agent.BuildDirectory)/rocm/lib/rocprofiler:$(Agent.BuildDirectory)/rocm/share/rocprofiler/tests-v1/test:$(Agent.BuildDirectory)/rocm/share/rocprofiler/tests - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - packageManager: ${{ job.packageManager }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - parameters: - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmDependencies }} - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocprofilerV1 - testDir: $(Agent.BuildDirectory)/rocm/share/rocprofiler/tests-v1 - testExecutable: ./run.sh - testParameters: '' - testPublishResults: false - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocprofilerV2 - testDir: $(Agent.BuildDirectory)/rocm - testExecutable: share/rocprofiler/tests/runUnitTests - testParameters: '--gtest_output=xml:./test_output.xml --gtest_color=yes' - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - gpuTarget: ${{ job.target }} +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} + dependsOn: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + - name: ROCM_PATH + value: $(Agent.BuildDirectory)/rocm + - name: LD_LIBRARY_PATH + value: $(Agent.BuildDirectory)/rocm/lib/rocprofiler:$(Agent.BuildDirectory)/rocm/share/rocprofiler/tests-v1/test:$(Agent.BuildDirectory)/rocm/share/rocprofiler/tests + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + preTargetFilter: ${{ parameters.componentName }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + parameters: + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmDependencies }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: rocprofilerV1 + testDir: $(Agent.BuildDirectory)/rocm/share/rocprofiler/tests-v1 + testExecutable: ./run.sh + testParameters: '' + testPublishResults: false + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: rocprofilerV2 + testDir: $(Agent.BuildDirectory)/rocm + testExecutable: share/rocprofiler/tests/runUnitTests + testParameters: '--gtest_output=xml:./test_output.xml --gtest_color=yes' + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} From 7ee22790ce2889c3e310a40988e0e0fe721a6fe9 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Thu, 14 Aug 2025 15:43:36 -0400 Subject: [PATCH 09/81] docs: Update vLLM benchmark doc for 20250812 Docker release (#5196) --- .wordlist.txt | 1 + docs/conf.py | 2 + .../vllm_0.9.1_20250715-benchmark_models.yaml | 163 +++++++ .../inference/vllm-benchmark-models.yaml | 83 +--- .../previous-versions/vllm-0.9.1-20250702.rst | 6 +- .../previous-versions/vllm-0.9.1-20250715.rst | 450 ++++++++++++++++++ .../previous-versions/vllm-history.rst | 13 +- .../inference/benchmark-docker/vllm.rst | 149 +++--- 8 files changed, 706 insertions(+), 161 deletions(-) create mode 100644 docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml create mode 100644 docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst diff --git a/.wordlist.txt b/.wordlist.txt index c32752d7c..7b592fc91 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -5,6 +5,7 @@ ACEs ACS AccVGPR AccVGPRs +AITER ALU AllReduce AMD diff --git a/docs/conf.py b/docs/conf.py index c4753c4b7..17ed6810c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -147,6 +147,8 @@ article_pages = [ {"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.8.5-20250521", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.0.1-20250605", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.0.1-20250702", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250702", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/inference/deploy-your-model", "os": ["linux"]}, diff --git a/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml new file mode 100644 index 000000000..5682828ce --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml @@ -0,0 +1,163 @@ +vllm_benchmark: + unified_docker: + latest: + # TODO: update me + pull_tag: rocm/vllm:rocm6.4.1_vllm_0.9.1_20250715 + docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.9.1_20250715/images/sha256-4a429705fa95a58f6d20aceab43b1b76fa769d57f32d5d28bd3f4e030e2a78ea + rocm_version: 6.4.1 + vllm_version: 0.9.1 (0.9.2.dev364+gb432b7a28.rocm641) + pytorch_version: 2.7.0+gitf717b2a + hipblaslt_version: 0.15 + model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.1 8B + mad_tag: pyt_vllm_llama-3.1-8b + model_repo: meta-llama/Llama-3.1-8B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: float16 + - model: Llama 3.1 70B + mad_tag: pyt_vllm_llama-3.1-70b + model_repo: meta-llama/Llama-3.1-70B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct + precision: float16 + - model: Llama 3.1 405B + mad_tag: pyt_vllm_llama-3.1-405b + model_repo: meta-llama/Llama-3.1-405B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct + precision: float16 + - model: Llama 2 7B + mad_tag: pyt_vllm_llama-2-7b + model_repo: meta-llama/Llama-2-7b-chat-hf + url: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf + precision: float16 + - model: Llama 2 70B + mad_tag: pyt_vllm_llama-2-70b + model_repo: meta-llama/Llama-2-70b-chat-hf + url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf + precision: float16 + - model: Llama 3.1 8B FP8 + mad_tag: pyt_vllm_llama-3.1-8b_fp8 + model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV + precision: float8 + - model: Llama 3.1 70B FP8 + mad_tag: pyt_vllm_llama-3.1-70b_fp8 + model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV + precision: float8 + - model: Llama 3.1 405B FP8 + mad_tag: pyt_vllm_llama-3.1-405b_fp8 + model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV + precision: float8 + - group: Mistral AI + tag: mistral + models: + - model: Mixtral MoE 8x7B + mad_tag: pyt_vllm_mixtral-8x7b + model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 + precision: float16 + - model: Mixtral MoE 8x22B + mad_tag: pyt_vllm_mixtral-8x22b + model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 + precision: float16 + - model: Mistral 7B + mad_tag: pyt_vllm_mistral-7b + model_repo: mistralai/Mistral-7B-Instruct-v0.3 + url: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3 + precision: float16 + - model: Mixtral MoE 8x7B FP8 + mad_tag: pyt_vllm_mixtral-8x7b_fp8 + model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + precision: float8 + - model: Mixtral MoE 8x22B FP8 + mad_tag: pyt_vllm_mixtral-8x22b_fp8 + model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + precision: float8 + - model: Mistral 7B FP8 + mad_tag: pyt_vllm_mistral-7b_fp8 + model_repo: amd/Mistral-7B-v0.1-FP8-KV + url: https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV + precision: float8 + - group: Qwen + tag: qwen + models: + - model: Qwen2 7B + mad_tag: pyt_vllm_qwen2-7b + model_repo: Qwen/Qwen2-7B-Instruct + url: https://huggingface.co/Qwen/Qwen2-7B-Instruct + precision: float16 + - model: Qwen2 72B + mad_tag: pyt_vllm_qwen2-72b + model_repo: Qwen/Qwen2-72B-Instruct + url: https://huggingface.co/Qwen/Qwen2-72B-Instruct + precision: float16 + - model: QwQ-32B + mad_tag: pyt_vllm_qwq-32b + model_repo: Qwen/QwQ-32B + url: https://huggingface.co/Qwen/QwQ-32B + precision: float16 + tunableop: true + - group: Databricks DBRX + tag: dbrx + models: + - model: DBRX Instruct + mad_tag: pyt_vllm_dbrx-instruct + model_repo: databricks/dbrx-instruct + url: https://huggingface.co/databricks/dbrx-instruct + precision: float16 + - model: DBRX Instruct FP8 + mad_tag: pyt_vllm_dbrx_fp8 + model_repo: amd/dbrx-instruct-FP8-KV + url: https://huggingface.co/amd/dbrx-instruct-FP8-KV + precision: float8 + - group: Google Gemma + tag: gemma + models: + - model: Gemma 2 27B + mad_tag: pyt_vllm_gemma-2-27b + model_repo: google/gemma-2-27b + url: https://huggingface.co/google/gemma-2-27b + precision: float16 + - group: Cohere + tag: cohere + models: + - model: C4AI Command R+ 08-2024 + mad_tag: pyt_vllm_c4ai-command-r-plus-08-2024 + model_repo: CohereForAI/c4ai-command-r-plus-08-2024 + url: https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024 + precision: float16 + - model: C4AI Command R+ 08-2024 FP8 + mad_tag: pyt_vllm_command-r-plus_fp8 + model_repo: amd/c4ai-command-r-plus-FP8-KV + url: https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV + precision: float8 + - group: DeepSeek + tag: deepseek + models: + - model: DeepSeek MoE 16B + mad_tag: pyt_vllm_deepseek-moe-16b-chat + model_repo: deepseek-ai/deepseek-moe-16b-chat + url: https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat + precision: float16 + - group: Microsoft Phi + tag: phi + models: + - model: Phi-4 + mad_tag: pyt_vllm_phi-4 + model_repo: microsoft/phi-4 + url: https://huggingface.co/microsoft/phi-4 + - group: TII Falcon + tag: falcon + models: + - model: Falcon 180B + mad_tag: pyt_vllm_falcon-180b + model_repo: tiiuae/falcon-180B + url: https://huggingface.co/tiiuae/falcon-180B + precision: float16 diff --git a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml index 5682828ce..5c3b1b51e 100644 --- a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml @@ -2,11 +2,11 @@ vllm_benchmark: unified_docker: latest: # TODO: update me - pull_tag: rocm/vllm:rocm6.4.1_vllm_0.9.1_20250715 - docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.9.1_20250715/images/sha256-4a429705fa95a58f6d20aceab43b1b76fa769d57f32d5d28bd3f4e030e2a78ea + pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.0_20250812 + docker_hub_url: rocm_version: 6.4.1 - vllm_version: 0.9.1 (0.9.2.dev364+gb432b7a28.rocm641) - pytorch_version: 2.7.0+gitf717b2a + vllm_version: 0.10.0 (0.10.1.dev395+g340ea86df.rocm641) + pytorch_version: 2.7.0+gitf717b2a (2.7.0+gitf717b2a) hipblaslt_version: 0.15 model_groups: - group: Meta Llama @@ -27,11 +27,6 @@ vllm_benchmark: model_repo: meta-llama/Llama-3.1-405B-Instruct url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct precision: float16 - - model: Llama 2 7B - mad_tag: pyt_vllm_llama-2-7b - model_repo: meta-llama/Llama-2-7b-chat-hf - url: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf - precision: float16 - model: Llama 2 70B mad_tag: pyt_vllm_llama-2-70b model_repo: meta-llama/Llama-2-70b-chat-hf @@ -65,11 +60,6 @@ vllm_benchmark: model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 precision: float16 - - model: Mistral 7B - mad_tag: pyt_vllm_mistral-7b - model_repo: mistralai/Mistral-7B-Instruct-v0.3 - url: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3 - precision: float16 - model: Mixtral MoE 8x7B FP8 mad_tag: pyt_vllm_mixtral-8x7b_fp8 model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV @@ -80,72 +70,15 @@ vllm_benchmark: model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV precision: float8 - - model: Mistral 7B FP8 - mad_tag: pyt_vllm_mistral-7b_fp8 - model_repo: amd/Mistral-7B-v0.1-FP8-KV - url: https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV - precision: float8 - group: Qwen tag: qwen models: - - model: Qwen2 7B - mad_tag: pyt_vllm_qwen2-7b - model_repo: Qwen/Qwen2-7B-Instruct - url: https://huggingface.co/Qwen/Qwen2-7B-Instruct - precision: float16 - - model: Qwen2 72B - mad_tag: pyt_vllm_qwen2-72b - model_repo: Qwen/Qwen2-72B-Instruct - url: https://huggingface.co/Qwen/Qwen2-72B-Instruct - precision: float16 - model: QwQ-32B mad_tag: pyt_vllm_qwq-32b model_repo: Qwen/QwQ-32B url: https://huggingface.co/Qwen/QwQ-32B precision: float16 tunableop: true - - group: Databricks DBRX - tag: dbrx - models: - - model: DBRX Instruct - mad_tag: pyt_vllm_dbrx-instruct - model_repo: databricks/dbrx-instruct - url: https://huggingface.co/databricks/dbrx-instruct - precision: float16 - - model: DBRX Instruct FP8 - mad_tag: pyt_vllm_dbrx_fp8 - model_repo: amd/dbrx-instruct-FP8-KV - url: https://huggingface.co/amd/dbrx-instruct-FP8-KV - precision: float8 - - group: Google Gemma - tag: gemma - models: - - model: Gemma 2 27B - mad_tag: pyt_vllm_gemma-2-27b - model_repo: google/gemma-2-27b - url: https://huggingface.co/google/gemma-2-27b - precision: float16 - - group: Cohere - tag: cohere - models: - - model: C4AI Command R+ 08-2024 - mad_tag: pyt_vllm_c4ai-command-r-plus-08-2024 - model_repo: CohereForAI/c4ai-command-r-plus-08-2024 - url: https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024 - precision: float16 - - model: C4AI Command R+ 08-2024 FP8 - mad_tag: pyt_vllm_command-r-plus_fp8 - model_repo: amd/c4ai-command-r-plus-FP8-KV - url: https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV - precision: float8 - - group: DeepSeek - tag: deepseek - models: - - model: DeepSeek MoE 16B - mad_tag: pyt_vllm_deepseek-moe-16b-chat - model_repo: deepseek-ai/deepseek-moe-16b-chat - url: https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat - precision: float16 - group: Microsoft Phi tag: phi models: @@ -153,11 +86,3 @@ vllm_benchmark: mad_tag: pyt_vllm_phi-4 model_repo: microsoft/phi-4 url: https://huggingface.co/microsoft/phi-4 - - group: TII Falcon - tag: falcon - models: - - model: Falcon 180B - mad_tag: pyt_vllm_falcon-180b - model_repo: tiiuae/falcon-180B - url: https://huggingface.co/tiiuae/falcon-180B - precision: float16 diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250702.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250702.rst index 80cd9b9c2..a482c27c7 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250702.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250702.rst @@ -14,7 +14,7 @@ vLLM inference performance testing This documentation does not reflect the latest version of ROCm vLLM inference performance documentation. See :doc:`../vllm` for the latest version. -.. _vllm-benchmark-unified-docker: +.. _vllm-benchmark-unified-docker-702: .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250702-benchmark-models.yaml @@ -77,7 +77,7 @@ vLLM inference performance testing - .. _vllm-benchmark-vllm: + .. _vllm-benchmark-vllm-702: {% for model_group in model_groups %} {% for model in model_group.models %} @@ -159,7 +159,7 @@ vLLM inference performance testing Once the setup is complete, choose between two options to reproduce the benchmark results: - .. _vllm-benchmark-mad: + .. _vllm-benchmark-mad-702: {% for model_group in model_groups %} {% for model in model_group.models %} diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst new file mode 100644 index 000000000..f2850b09c --- /dev/null +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst @@ -0,0 +1,450 @@ +:orphan: + +.. meta:: + :description: Learn how to validate LLM inference performance on MI300X accelerators using AMD MAD and the + ROCm vLLM Docker image. + :keywords: model, MAD, automation, dashboarding, validate + +********************************** +vLLM inference performance testing +********************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm vLLM + inference performance documentation. See :doc:`../vllm` for the latest version. + +.. _vllm-benchmark-unified-docker-715: + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + The `ROCm vLLM Docker <{{ unified_docker.docker_hub_url }}>`_ image offers + a prebuilt, optimized environment for validating large language model (LLM) + inference performance on AMD Instinctâ„¢ MI300X series accelerators. This ROCm vLLM + Docker image integrates vLLM and PyTorch tailored specifically for MI300X series + accelerators and includes the following components: + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + * - `ROCm `__ + - {{ unified_docker.rocm_version }} + + * - `vLLM `__ + - {{ unified_docker.vllm_version }} + + * - `PyTorch `__ + - {{ unified_docker.pytorch_version }} + + * - `hipBLASLt `__ + - {{ unified_docker.hipblaslt_version }} + +With this Docker image, you can quickly test the :ref:`expected +inference performance numbers ` for +MI300X series accelerators. + +What's new +========== + +The following is summary of notable changes since the :doc:`previous ROCm/vLLM Docker release `. + +* The ``--compilation-config-parameter`` is no longer required as its options are now enabled by default. + This parameter has been removed from the benchmarking script. + +* Resolved Llama 3.1 405 B custom all-reduce issue, eliminating the need for ``--disable-custom-all-reduce``. + This parameter has been removed from the benchmarking script. + +* Fixed a ``+rms_norm`` custom kernel issue. + +* Added quick reduce functionality. Set ``VLLM_ROCM_QUICK_REDUCE_QUANTIZATION=FP`` to enable; supported modes are ``FP``, ``INT8``, ``INT6``, ``INT4``. + +* Implemented a workaround to potentially mitigate GPU crashes experienced with the Command R+ model, pending a driver fix. + +Supported models +================ + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + .. _vllm-benchmark-available-models-715: + + The following models are supported for inference performance benchmarking + with vLLM and ROCm. Some instructions, commands, and recommendations in this + documentation might vary by model -- select one to get started. + + .. raw:: html + +
+
+
Model group
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
+ +
+
Model
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
+ + .. _vllm-benchmark-vllm-715: + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{model.mad_tag}} + + .. note:: + + See the `{{ model.model }} model card on Hugging Face <{{ model.url }}>`_ to learn more about your selected model. + Some models require access authorization prior to use via an external license agreement through a third party. + + {% endfor %} + {% endfor %} + +.. note:: + + vLLM is a toolkit and library for LLM inference and serving. AMD implements + high-performance custom kernels and modules in vLLM to enhance performance. + See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for + more information. + +.. _vllm-benchmark-performance-measurements-715: + +Performance measurements +======================== + +To evaluate performance, the +`Performance results with AMD ROCm software `_ +page provides reference throughput and latency measurements for inferencing popular AI models. + +.. important:: + + The performance data presented in + `Performance results with AMD ROCm software `_ + only reflects the latest version of this inference benchmarking environment. + The listed measurements should not be interpreted as the peak performance achievable by AMD Instinct MI325X and MI300X accelerators or ROCm software. + +System validation +================= + +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. + +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. + +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + Pull the Docker image + ===================== + + Download the `ROCm vLLM Docker image <{{ unified_docker.docker_hub_url }}>`_. + Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + + Benchmarking + ============ + + Once the setup is complete, choose between two options to reproduce the + benchmark results: + + .. _vllm-benchmark-mad-715: + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{model.mad_tag}} + + .. tab-set:: + + .. tab-item:: MAD-integrated benchmarking + + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt + + 2. Use this command to run the performance benchmark test on the `{{model.model}} <{{ model.url }}>`_ model + using one GPU with the :literal:`{{model.precision}}` data type on the host machine. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{model.mad_tag}} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 + + MAD launches a Docker container with the name + ``container_ci-{{model.mad_tag}}``. The latency and throughput reports of the + model are collected in the following path: ``~/MAD/reports_{{model.precision}}/``. + + Although the :ref:`available models ` are preconfigured + to collect latency and throughput performance data, you can also change the benchmarking + parameters. See the standalone benchmarking tab for more information. + + {% if model.tunableop %} + + .. note:: + + For improved performance, consider enabling :ref:`PyTorch TunableOp `. + TunableOp automatically explores different implementations and configurations of certain PyTorch + operators to find the fastest one for your hardware. + + By default, ``{{model.mad_tag}}`` runs with TunableOp disabled + (see + ``__). + To enable it, include the ``--tunableop on`` argument in your + run. + + Enabling TunableOp triggers a two-pass run -- a warm-up followed + by the performance-collection run. + + {% endif %} + + .. tab-item:: Standalone benchmarking + + .. rubric:: Download the Docker image and required scripts + + 1. Run the vLLM benchmark tool independently by starting the + `Docker container <{{ unified_docker.docker_hub_url }}>`_ + as shown in the following snippet. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + docker run -it \ + --device=/dev/kfd \ + --device=/dev/dri \ + --group-add video \ + --shm-size 16G \ + --security-opt seccomp=unconfined \ + --security-opt apparmor=unconfined \ + --cap-add=SYS_PTRACE \ + -v $(pwd):/workspace \ + --env HUGGINGFACE_HUB_CACHE=/workspace \ + --name test \ + {{ unified_docker.pull_tag }} + + 2. In the Docker container, clone the ROCm MAD repository and navigate to the + benchmark scripts directory at ``~/MAD/scripts/vllm``. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD/scripts/vllm + + 3. To start the benchmark, use the following command with the appropriate options. + + .. dropdown:: Benchmark options + :open: + + .. list-table:: + :header-rows: 1 + :align: center + + * - Name + - Options + - Description + + * - ``$test_option`` + - latency + - Measure decoding token latency + + * - + - throughput + - Measure token generation throughput + + * - + - all + - Measure both throughput and latency + + * - ``$num_gpu`` + - 1 or 8 + - Number of GPUs + + * - ``$datatype`` + - ``float16`` or ``float8`` + - Data type + + The input sequence length, output sequence length, and tensor parallel (TP) are + already configured. You don't need to specify them with this script. + + Command: + + .. code-block:: + + ./vllm_benchmark_report.sh \ + -s $test_option \ + -m {{model.model_repo}} \ + -g $num_gpu \ + -d {{model.precision}} + + .. note:: + + For best performance, it's recommend to run with ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1``. + + If you encounter the following error, pass your access-authorized Hugging + Face token to the gated models. + + .. code-block:: + + OSError: You are trying to access a gated repo. + + # pass your HF_TOKEN + export HF_TOKEN=$your_personal_hf_token + + .. rubric:: Benchmarking examples + + Here are some examples of running the benchmark with various options: + + * Latency benchmark + + Use this command to benchmark the latency of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. + + .. code-block:: + + ./vllm_benchmark_report.sh \ + -s latency \ + -m {{model.model_repo}} \ + -g 8 \ + -d {{model.precision}} + + Find the latency report at ``./reports_{{model.precision}}_vllm_rocm{{unified_docker.rocm_version}}/summary/{{model.model_repo.split('/', 1)[1] if '/' in model.model_repo else model.model_repo}}_latency_report.csv``. + + * Throughput benchmark + + Use this command to benchmark the throughput of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. + + .. code-block:: shell + + ./vllm_benchmark_report.sh \ + -s throughput \ + -m {{model.model_repo}} \ + -g 8 \ + -d {{model.precision}} + + Find the throughput report at ``./reports_{{model.precision}}_vllm_rocm{{unified_docker.rocm_version}}/summary/{{model.model_repo.split('/', 1)[1] if '/' in model.model_repo else model.model_repo}}_throughput_report.csv``. + + .. raw:: html + + + + .. note:: + + Throughput is calculated as: + + - .. math:: throughput\_tot = requests \times (\mathsf{\text{input lengths}} + \mathsf{\text{output lengths}}) / elapsed\_time + + - .. math:: throughput\_gen = requests \times \mathsf{\text{output lengths}} / elapsed\_time + {% endfor %} + {% endfor %} + +Advanced usage +============== + +For information on experimental features and known issues related to ROCm optimization efforts on vLLM, +see the developer's guide at ``__. + +Reproducing the Docker image +---------------------------- + +To reproduce this ROCm/vLLM Docker image release, follow these steps: + +1. Clone the `vLLM repository `__. + + .. code-block:: shell + + git clone https://github.com/ROCm/vllm.git + +2. Checkout the specific release commit. + + .. code-block:: shell + + cd vllm + git checkout b432b7a285aa0dcb9677380936ffa74931bb6d6f + +3. Build the Docker image. Replace ``vllm-rocm`` with your desired image tag. + + .. code-block:: shell + + docker build -f docker/Dockerfile.rocm -t vllm-rocm . + +Known issues and workarounds +============================ + +AITER does not support FP8 KV cache yet. + +Further reading +=============== + +- To learn more about the options for latency and throughput benchmark scripts, + see ``_. + +- To learn more about MAD and the ``madengine`` CLI, see the `MAD usage guide `__. + +- To learn more about system settings and management practices to configure your system for + AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. + +- For application performance optimization strategies for HPC and AI workloads, + including inference with vLLM, see :doc:`/how-to/rocm-for-ai/inference-optimization/workload`. + +- To learn how to run community models from Hugging Face on AMD GPUs, see + :doc:`Running models from Hugging Face `. + +- To learn how to fine-tune LLMs and optimize inference, see + :doc:`Fine-tuning LLMs and inference optimization `. + +- For a list of other ready-made Docker images for AI with ROCm, see + `AMD Infinity Hub `_. + +Previous versions +================= + +See :doc:`vllm-history` to find documentation for previous releases +of the ``ROCm/vllm`` Docker image. diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst index b26cc522a..6f87670ec 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst @@ -16,14 +16,23 @@ previous releases of the ``ROCm/vllm`` Docker image on `Docker Hub ` + * `Docker Hub `__ + + * - ``rocm/vllm:rocm6.4.1_vllm_0.9.1_20250715`` - * ROCm 6.4.1 * vLLM 0.9.1 * PyTorch 2.7.0 - - * :doc:`Documentation <../vllm>` + * :doc:`Documentation ` * `Docker Hub `__ * - ``rocm/vllm:rocm6.4.1_vllm_0.9.1_20250702`` diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst index 58c5dc6bd..02c992620 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst @@ -7,7 +7,7 @@ vLLM inference performance testing ********************************** -.. _vllm-benchmark-unified-docker: +.. _vllm-benchmark-unified-docker-812: .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml @@ -47,17 +47,11 @@ What's new The following is summary of notable changes since the :doc:`previous ROCm/vLLM Docker release `. -* The ``--compilation-config-parameter`` is no longer required as its options are now enabled by default. - This parameter has been removed from the benchmarking script. +* Upgraded to vLLM v0.10. -* Resolved Llama 3.1 405 B custom all-reduce issue, eliminating the need for ``--disable-custom-all-reduce``. - This parameter has been removed from the benchmarking script. +* FP8 KV cache support via AITER. -* Fixed a ``+rms_norm`` custom kernel issue. - -* Added quick reduce functionality. Set ``VLLM_ROCM_QUICK_REDUCE_QUANTIZATION=FP`` to enable; supported modes are ``FP``, ``INT8``, ``INT6``, ``INT4``. - -* Implemented a workaround to potentially mitigate GPU crashes experienced with the Command R+ model, pending a driver fix. +* Full graph capture support via AITER. Supported models ================ @@ -67,7 +61,7 @@ Supported models {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} - .. _vllm-benchmark-available-models: + .. _vllm-benchmark-available-models-812: The following models are supported for inference performance benchmarking with vLLM and ROCm. Some instructions, commands, and recommendations in this @@ -102,7 +96,7 @@ Supported models - .. _vllm-benchmark-vllm: + .. _vllm-benchmark-vllm-812: {% for model_group in model_groups %} {% for model in model_group.models %} @@ -124,14 +118,14 @@ Supported models See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for more information. -.. _vllm-benchmark-performance-measurements: +.. _vllm-benchmark-performance-measurements-812: Performance measurements ======================== To evaluate performance, the `Performance results with AMD ROCm software `_ -page provides reference throughput and latency measurements for inferencing popular AI models. +page provides reference throughput and serving measurements for inferencing popular AI models. .. important:: @@ -176,7 +170,7 @@ system's configuration. Once the setup is complete, choose between two options to reproduce the benchmark results: - .. _vllm-benchmark-mad: + .. _vllm-benchmark-mad-812: {% for model_group in model_groups %} {% for model in model_group.models %} @@ -209,12 +203,15 @@ system's configuration. --timeout 28800 MAD launches a Docker container with the name - ``container_ci-{{model.mad_tag}}``. The latency and throughput reports of the - model are collected in the following path: ``~/MAD/reports_{{model.precision}}/``. + ``container_ci-{{model.mad_tag}}``. The throughput and serving reports of the + model are collected in the following paths: ``{{ model.mad_tag }}_throughput.csv`` + and ``{{ model.mad_tag }}_serving.csv``. - Although the :ref:`available models ` are preconfigured - to collect latency and throughput performance data, you can also change the benchmarking - parameters. See the standalone benchmarking tab for more information. + Although the :ref:`available models + ` are preconfigured to collect + offline throughput and online serving performance data, you can + also change the benchmarking parameters. See the standalone + benchmarking tab for more information. {% if model.tunableop %} @@ -224,14 +221,12 @@ system's configuration. TunableOp automatically explores different implementations and configurations of certain PyTorch operators to find the fastest one for your hardware. - By default, ``{{model.mad_tag}}`` runs with TunableOp disabled - (see - ``__). - To enable it, include the ``--tunableop on`` argument in your - run. + By default, ``{{model.mad_tag}}`` runs with TunableOp disabled (see + ``__). To enable it, include + the ``--tunableop on`` argument in your run. - Enabling TunableOp triggers a two-pass run -- a warm-up followed - by the performance-collection run. + Enabling TunableOp triggers a two-pass run -- a warm-up followed by the + performance-collection run. {% endif %} @@ -269,6 +264,13 @@ system's configuration. 3. To start the benchmark, use the following command with the appropriate options. + .. code-block:: + + ./run.sh \ + --config $CONFIG_CSV \ + --model_repo {{ model.model_repo }} \ + + .. dropdown:: Benchmark options :open: @@ -280,42 +282,40 @@ system's configuration. - Options - Description - * - ``$test_option`` - - latency - - Measure decoding token latency + * - ``--config`` + - ``configs/default.csv`` + - Run configs from the CSV for the chosen model repo and benchmark. * - - - throughput - - Measure token generation throughput + - ``configs/extended.csv`` + - * - - - all - - Measure both throughput and latency + - ``configs/performance.csv`` + - - * - ``$num_gpu`` - - 1 or 8 - - Number of GPUs + * - ``--benchmark`` + - ``throughput`` + - Measure offline end-to-end throughput. - * - ``$datatype`` - - ``float16`` or ``float8`` - - Data type + * - + - ``serving`` + - Measure online serving performance. + + * - + - ``all`` + - Measure both throughput and serving. + + * - `` + - See `run.sh `__ for more info. + - Additional overrides to the config CSV. The input sequence length, output sequence length, and tensor parallel (TP) are already configured. You don't need to specify them with this script. - Command: - - .. code-block:: - - ./vllm_benchmark_report.sh \ - -s $test_option \ - -m {{model.model_repo}} \ - -g $num_gpu \ - -d {{model.precision}} - .. note:: - For best performance, it's recommend to run with ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1``. + For best performance, it's recommended to run with ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1``. If you encounter the following error, pass your access-authorized Hugging Face token to the gated models. @@ -331,33 +331,33 @@ system's configuration. Here are some examples of running the benchmark with various options: - * Latency benchmark - - Use this command to benchmark the latency of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. - - .. code-block:: - - ./vllm_benchmark_report.sh \ - -s latency \ - -m {{model.model_repo}} \ - -g 8 \ - -d {{model.precision}} - - Find the latency report at ``./reports_{{model.precision}}_vllm_rocm{{unified_docker.rocm_version}}/summary/{{model.model_repo.split('/', 1)[1] if '/' in model.model_repo else model.model_repo}}_latency_report.csv``. - * Throughput benchmark Use this command to benchmark the throughput of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. .. code-block:: shell - ./vllm_benchmark_report.sh \ - -s throughput \ - -m {{model.model_repo}} \ - -g 8 \ - -d {{model.precision}} + export MAD_MODEL_NAME={{ model.mad_tag }} + ./run.sh \ + --config configs/default.csv \ + --model_repo {{model.model_repo}} \ + --benchmark throughput - Find the throughput report at ``./reports_{{model.precision}}_vllm_rocm{{unified_docker.rocm_version}}/summary/{{model.model_repo.split('/', 1)[1] if '/' in model.model_repo else model.model_repo}}_throughput_report.csv``. + Find the throughput benchmark report at ``./{{ model.mad_tag }}_throughput.csv``. + + * Serving benchmark + + Use this command to benchmark the serving performance of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. + + .. code-block:: + + export MAD_MODEL_NAME={{ model.mad_tag }} + ./run.sh \ + --config configs/default.csv \ + --model_repo {{model.model_repo}} \ + --benchmark serving + + Find the serving benchmark report at ``./{{ model.mad_tag }}_serving.csv``. .. raw:: html @@ -400,7 +400,7 @@ To reproduce this ROCm/vLLM Docker image release, follow these steps: .. code-block:: shell cd vllm - git checkout b432b7a285aa0dcb9677380936ffa74931bb6d6f + git checkout 340ea86dfe5955d6f9a9e767d6abab5aacf2c978 3. Build the Docker image. Replace ``vllm-rocm`` with your desired image tag. @@ -408,11 +408,6 @@ To reproduce this ROCm/vLLM Docker image release, follow these steps: docker build -f docker/Dockerfile.rocm -t vllm-rocm . -Known issues and workarounds -============================ - -AITER does not support FP8 KV cache yet. - Further reading =============== From 55d0a88ec5200462d1fb18e60cc3782047cd2a15 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Fri, 15 Aug 2025 13:20:39 -0400 Subject: [PATCH 10/81] vLLM inference benchmark doc: add missing data field (#5199) --- .../how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml | 2 +- .../previous-versions/vllm-0.9.1-20250715.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml index 5c3b1b51e..714534ef1 100644 --- a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml @@ -3,7 +3,7 @@ vllm_benchmark: latest: # TODO: update me pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.0_20250812 - docker_hub_url: + docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.0_20250812/images/sha256-4c277ad39af3a8c9feac9b30bf78d439c74d9b4728e788a419d3f1d0c30cacaa rocm_version: 6.4.1 vllm_version: 0.10.0 (0.10.1.dev395+g340ea86df.rocm641) pytorch_version: 2.7.0+gitf717b2a (2.7.0+gitf717b2a) diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst index f2850b09c..9e0f4443a 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst @@ -69,7 +69,7 @@ The following is summary of notable changes since the :doc:`previous ROCm/vLLM D Supported models ================ -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} @@ -162,7 +162,7 @@ To test for optimal performance, consult the recommended :ref:`System health ben `. This suite of tests will help you verify and fine-tune your system's configuration. -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} From 9f5cd4500c48b176defed0772a04dd22216f10ab Mon Sep 17 00:00:00 2001 From: David Dixon <165835255+davidd-amd@users.noreply.github.com> Date: Mon, 18 Aug 2025 06:19:27 -0600 Subject: [PATCH 11/81] Don't use local tensilelite (#5201) --- .azuredevops/components/hipSPARSELt.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azuredevops/components/hipSPARSELt.yml b/.azuredevops/components/hipSPARSELt.yml index 0ec046ed3..104e0ee6c 100644 --- a/.azuredevops/components/hipSPARSELt.yml +++ b/.azuredevops/components/hipSPARSELt.yml @@ -158,6 +158,7 @@ jobs: -DCMAKE_PREFIX_PATH="$(Agent.BuildDirectory)/rocm" -DROCM_PATH=$(Agent.BuildDirectory)/rocm -DBUILD_CLIENTS_TESTS=ON + -DBUILD_USE_LOCAL_TENSILE=OFF -GNinja ${{ if ne(parameters.sparseCheckoutDir, '') }}: cmakeSourceDir: $(Build.SourcesDirectory)/projects/hipsparselt From c154b7e0a3f28e522a72e2f347d6e31bb40779ab Mon Sep 17 00:00:00 2001 From: Peter Park Date: Mon, 18 Aug 2025 10:00:10 -0400 Subject: [PATCH 12/81] Fix documented VRAM for Radeon AI Pro R9700 (#5203) --- docs/reference/gpu-arch-specs.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/gpu-arch-specs.rst b/docs/reference/gpu-arch-specs.rst index 14eedd9b8..ea70ef70a 100644 --- a/docs/reference/gpu-arch-specs.rst +++ b/docs/reference/gpu-arch-specs.rst @@ -285,7 +285,7 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil - Radeon AI PRO R9700 - RDNA4 - gfx1201 - - 16 + - 32 - 64 - 32 or 64 - 128 From 14acec60003b062faa4160fc81851d09950161c6 Mon Sep 17 00:00:00 2001 From: Daniel Su Date: Tue, 19 Aug 2025 15:22:02 -0400 Subject: [PATCH 13/81] [Ex CI] switch rocprofiler pipeline ID (#5207) --- .azuredevops/components/rocprofiler.yml | 1 + .azuredevops/templates/steps/dependencies-rocm.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.azuredevops/components/rocprofiler.yml b/.azuredevops/components/rocprofiler.yml index cb9195bb3..b6fc4856f 100644 --- a/.azuredevops/components/rocprofiler.yml +++ b/.azuredevops/components/rocprofiler.yml @@ -187,6 +187,7 @@ jobs: workspace: clean: all steps: + - checkout: none - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml parameters: aptPackages: ${{ parameters.aptPackages }} diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index f0f4dfe8c..1e2f2b5d0 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -203,7 +203,7 @@ parameters: developBranch: develop hasGpuTarget: true rocprofiler: - pipelineId: 143 + pipelineId: 329 developBranch: amd-staging hasGpuTarget: true rocprofiler-compute: From 00b0d9430e620b803c7728bb22c6b5c5e9255243 Mon Sep 17 00:00:00 2001 From: Daniel Su Date: Tue, 19 Aug 2025 15:44:07 -0400 Subject: [PATCH 14/81] [Ex CI] change rocprofiler's branch to develop (#5208) --- .azuredevops/templates/steps/dependencies-rocm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 1e2f2b5d0..124ddd68b 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -204,7 +204,7 @@ parameters: hasGpuTarget: true rocprofiler: pipelineId: 329 - developBranch: amd-staging + developBranch: develop hasGpuTarget: true rocprofiler-compute: pipelineId: 257 From 3dfc0cdbf16623d9c6e24040fb16cf57acc1a3f0 Mon Sep 17 00:00:00 2001 From: Joseph Macaranas <145489236+jayhawk-commits@users.noreply.github.com> Date: Wed, 20 Aug 2025 11:37:15 -0400 Subject: [PATCH 15/81] [External CI] Update CMake on MIOpen build pipeline (#5210) --- .azuredevops/components/MIOpen.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.azuredevops/components/MIOpen.yml b/.azuredevops/components/MIOpen.yml index 2eba5020e..b606005c7 100644 --- a/.azuredevops/components/MIOpen.yml +++ b/.azuredevops/components/MIOpen.yml @@ -131,6 +131,7 @@ jobs: parameters: aptPackages: ${{ parameters.aptPackages }} pipModules: ${{ parameters.pipModules }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: @@ -210,6 +211,7 @@ jobs: parameters: aptPackages: ${{ parameters.aptPackages }} pipModules: ${{ parameters.pipModules }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: From 98029db4eeb729d7ab709d79c03f867966b275a1 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Thu, 21 Aug 2025 23:50:55 -0400 Subject: [PATCH 16/81] docs: Add Primus (Megatron) training Docker documentation (#5218) --- .wordlist.txt | 6 + .../megatron-lm-benchmark-models.yaml | 27 +- .../megatron-lm-v25.6-benchmark-models.yaml | 60 + .../primus-megatron-benchmark-models.yaml | 58 + .../training/benchmark-docker/megatron-lm.rst | 53 +- .../previous-versions/megatron-lm-history.rst | 12 +- .../megatron-lm-primus-migration-guide.rst | 175 +++ .../previous-versions/megatron-lm-v25.6.rst | 1041 +++++++++++++++++ .../benchmark-docker/primus-megatron.rst | 602 ++++++++++ docs/how-to/rocm-for-ai/training/index.rst | 2 + docs/sphinx/_toc.yml.in | 4 +- 11 files changed, 1994 insertions(+), 46 deletions(-) create mode 100644 docs/data/how-to/rocm-for-ai/training/previous-versions/megatron-lm-v25.6-benchmark-models.yaml create mode 100644 docs/data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml create mode 100644 docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-primus-migration-guide.rst create mode 100644 docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.6.rst create mode 100644 docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst diff --git a/.wordlist.txt b/.wordlist.txt index 7b592fc91..9d0f4d6cc 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -116,6 +116,7 @@ Deprecations DevCap DirectX Dockerfile +Dockerized Doxygen dropless ELMo @@ -361,6 +362,7 @@ PowerEdge PowerShell Pretrained Pretraining +Primus Profiler's PyPi Pytest @@ -525,6 +527,7 @@ Xilinx Xnack Xteam YAML +YAMLs YML YModel ZeRO @@ -585,6 +588,7 @@ completers composable concretization config +configs conformant constructible convolutional @@ -795,7 +799,9 @@ preprocessing preprocessor prequantized prerequisites +pretrain pretraining +primus profiler profilers protobuf diff --git a/docs/data/how-to/rocm-for-ai/training/megatron-lm-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/megatron-lm-benchmark-models.yaml index 77eaa5ba0..c743e00b6 100644 --- a/docs/data/how-to/rocm-for-ai/training/megatron-lm-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/training/megatron-lm-benchmark-models.yaml @@ -1,26 +1,15 @@ dockers: - - pull_tag: rocm/megatron-lm:v25.6_py312 - docker_hub_url: https://hub.docker.com/layers/rocm/megatron-lm/v25.6_py312/images/sha256-482ff906532285bceabdf2bda629bd32cb6174d2d07f4243a736378001b28df0 + - pull_tag: rocm/megatron-lm:v25.7_py310 + docker_hub_url: https://hub.docker.com/layers/rocm/megatron-lm/v25.7_py310/images/sha256-6189df849feeeee3ae31bb1e97aef5006d69d2b90c134e97708c19632e20ab5a components: - ROCm: 6.4.1 - PyTorch: 2.8.0a0+git7d205b2 - Python: 3.12 - Transformer Engine: 2.1.0.dev0+8c4a512 - hipBLASLt: 393e413 - Triton: 3.3.0 - RCCL: 2.23.4.7a84c5d - doc_name: Ubuntu 24.04 + Python 3.12 - - pull_tag: rocm/megatron-lm:v25.6_py310 - docker_hub_url: https://hub.docker.com/layers/rocm/megatron-lm/v25.6_py310/images/sha256-9627bd9378684fe26cb1a10c7dd817868f553b33402e49b058355b0f095568d6 - components: - ROCm: 6.4.1 - PyTorch: 2.8.0a0+git7d205b2 + ROCm: 6.4.2 + Primus: v0.1.0-rc1 + PyTorch: 2.8.0a0+gitd06a406 Python: "3.10" - Transformer Engine: 2.1.0.dev0+8c4a512 - hipBLASLt: 393e413 + Transformer Engine: 2.1.0.dev0+ba586519 + hipBLASLt: 37ba1d36 Triton: 3.3.0 - RCCL: 2.23.4.7a84c5d - doc_name: Ubuntu 22.04 + Python 3.10 + RCCL: 2.22.3 model_groups: - group: Meta Llama tag: llama diff --git a/docs/data/how-to/rocm-for-ai/training/previous-versions/megatron-lm-v25.6-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/previous-versions/megatron-lm-v25.6-benchmark-models.yaml new file mode 100644 index 000000000..77eaa5ba0 --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/training/previous-versions/megatron-lm-v25.6-benchmark-models.yaml @@ -0,0 +1,60 @@ +dockers: + - pull_tag: rocm/megatron-lm:v25.6_py312 + docker_hub_url: https://hub.docker.com/layers/rocm/megatron-lm/v25.6_py312/images/sha256-482ff906532285bceabdf2bda629bd32cb6174d2d07f4243a736378001b28df0 + components: + ROCm: 6.4.1 + PyTorch: 2.8.0a0+git7d205b2 + Python: 3.12 + Transformer Engine: 2.1.0.dev0+8c4a512 + hipBLASLt: 393e413 + Triton: 3.3.0 + RCCL: 2.23.4.7a84c5d + doc_name: Ubuntu 24.04 + Python 3.12 + - pull_tag: rocm/megatron-lm:v25.6_py310 + docker_hub_url: https://hub.docker.com/layers/rocm/megatron-lm/v25.6_py310/images/sha256-9627bd9378684fe26cb1a10c7dd817868f553b33402e49b058355b0f095568d6 + components: + ROCm: 6.4.1 + PyTorch: 2.8.0a0+git7d205b2 + Python: "3.10" + Transformer Engine: 2.1.0.dev0+8c4a512 + hipBLASLt: 393e413 + Triton: 3.3.0 + RCCL: 2.23.4.7a84c5d + doc_name: Ubuntu 22.04 + Python 3.10 +model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.3 70B + mad_tag: pyt_megatron_lm_train_llama-3.3-70b + - model: Llama 3.1 8B + mad_tag: pyt_megatron_lm_train_llama-3.1-8b + - model: Llama 3.1 70B + mad_tag: pyt_megatron_lm_train_llama-3.1-70b + - model: Llama 3.1 70B (proxy) + mad_tag: pyt_megatron_lm_train_llama-3.1-70b-proxy + - model: Llama 2 7B + mad_tag: pyt_megatron_lm_train_llama-2-7b + - model: Llama 2 70B + mad_tag: pyt_megatron_lm_train_llama-2-70b + - group: DeepSeek + tag: deepseek + models: + - model: DeepSeek-V3 (proxy) + mad_tag: pyt_megatron_lm_train_deepseek-v3-proxy + - model: DeepSeek-V2-Lite + mad_tag: pyt_megatron_lm_train_deepseek-v2-lite-16b + - group: Mistral AI + tag: mistral + models: + - model: Mixtral 8x7B + mad_tag: pyt_megatron_lm_train_mixtral-8x7b + - model: Mixtral 8x22B (proxy) + mad_tag: pyt_megatron_lm_train_mixtral-8x22b-proxy + - group: Qwen + tag: qwen + models: + - model: Qwen 2.5 7B + mad_tag: pyt_megatron_lm_train_qwen2.5-7b + - model: Qwen 2.5 72B + mad_tag: pyt_megatron_lm_train_qwen2.5-72b diff --git a/docs/data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml new file mode 100644 index 000000000..fec474f59 --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml @@ -0,0 +1,58 @@ +dockers: + - pull_tag: rocm/megatron-lm:v25.7_py310 + docker_hub_url: https://hub.docker.com/layers/rocm/megatron-lm/v25.7_py310/images/sha256-6189df849feeeee3ae31bb1e97aef5006d69d2b90c134e97708c19632e20ab5a + components: + ROCm: 6.4.2 + Primus: v0.1.0-rc1 + PyTorch: 2.8.0a0+gitd06a406 + Python: "3.10" + Transformer Engine: 2.1.0.dev0+ba586519 + hipBLASLt: 37ba1d36 + Triton: 3.3.0 + RCCL: 2.22.3 +model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.3 70B + mad_tag: primus_pyt_megatron_lm_train_llama-3.3-70b + config_name: llama3.3_70B-pretrain.yaml + - model: Llama 3.1 70B + mad_tag: primus_pyt_megatron_lm_train_llama-3.1-70b + config_name: llama3.1_70B-pretrain.yaml + - model: Llama 3.1 8B + mad_tag: primus_pyt_megatron_lm_train_llama-3.1-8b + config_name: llama3.1_8B-pretrain.yaml + - model: Llama 2 7B + mad_tag: primus_pyt_megatron_lm_train_llama-2-7b + config_name: llama2_7B-pretrain.yaml + - model: Llama 2 70B + mad_tag: primus_pyt_megatron_lm_train_llama-2-70b + config_name: llama2_70B-pretrain.yaml + - group: DeepSeek + tag: deepseek + models: + - model: DeepSeek-V3 (proxy) + mad_tag: primus_pyt_megatron_lm_train_deepseek-v3-proxy + config_name: deepseek_v3-pretrain.yaml + - model: DeepSeek-V2-Lite + mad_tag: primus_pyt_megatron_lm_train_deepseek-v2-lite-16b + config_name: deepseek_v2_lite-pretrain.yaml + - group: Mistral AI + tag: mistral + models: + - model: Mixtral 8x7B + mad_tag: primus_pyt_megatron_lm_train_mixtral-8x7b + config_name: mixtral_8x7B_v0.1-pretrain.yaml + - model: Mixtral 8x22B (proxy) + mad_tag: primus_pyt_megatron_lm_train_mixtral-8x22b-proxy + config_name: mixtral_8x22B_v0.1-pretrain.yaml + - group: Qwen + tag: qwen + models: + - model: Qwen 2.5 7B + mad_tag: primus_pyt_megatron_lm_train_qwen2.5-7b + config_name: primus_qwen2.5_7B-pretrain.yaml + - model: Qwen 2.5 72B + mad_tag: primus_pyt_megatron_lm_train_qwen2.5-72b + config_name: qwen2.5_72B-pretrain.yaml diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst index f9759c762..687cc514f 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst @@ -1,3 +1,5 @@ +:orphan: + .. meta:: :description: How to train a model using Megatron-LM for ROCm. :keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch @@ -6,6 +8,14 @@ Training a model with Megatron-LM for ROCm ****************************************** +.. caution:: + + The ROCm Megatron-LM framework now has limited support with this Docker + environment; it now focuses on Primus with Megatron-Core. See :doc:`primus-megatron`. + + To learn how to migrate your existing workloads to Primus with Megatron-Core, + see :doc:`previous-versions/megatron-lm-primus-migration-guide`. + The `Megatron-LM framework for ROCm `_ is a specialized fork of the robust Megatron-LM, designed to enable efficient training of large-scale language models on AMD GPUs. By leveraging AMD @@ -20,13 +30,17 @@ essential components, including PyTorch, ROCm libraries, and Megatron-LM utilities. It contains the following software components to accelerate training workloads: +.. note:: + + This Docker environment is based on Python 3.10 and Ubuntu 22.04. For an alternative environment with + Python 3.12 and Ubuntu 24.04, see the :doc:`previous ROCm Megatron-LM v25.6 Docker release `. + .. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/megatron-lm-benchmark-models.yaml {% set dockers = data.dockers %} - {% if dockers|length > 1 %} .. tab-set:: - {% for docker in data.dockers %} + {% for docker in dockers %} .. tab-item:: ``{{ docker.pull_tag }}`` :sync: {{ docker.pull_tag }} @@ -42,28 +56,14 @@ workloads: {% endfor %} {% endfor %} - {% elif dockers|length == 1 %} - .. list-table:: - :header-rows: 1 - - * - Software component - - Version - - {% for component_name, component_version in docker.components %} - * - {{ component_name }} - - {{ component_version }} - - {% endfor %} - {% endif %} .. _amd-megatron-lm-model-support: - The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. - Supported models ================ - The following models are supported for training performance benchmarking with Megatron-LM and ROCm. + The following models are supported for training performance benchmarking with Megatron-LM and ROCm + on AMD Instinct MI300X series accelerators. Some instructions, commands, and training recommendations in this documentation might vary by model -- select one to get started. @@ -177,7 +177,7 @@ Download the Docker image {% if dockers|length > 1 %} .. tab-set:: - {% for docker in data.dockers %} + {% for docker in dockers %} .. tab-item:: {{ docker.doc_name }} :sync: {{ docker.pull_tag }} @@ -227,10 +227,17 @@ Download the Docker image docker start megatron_training_env docker exec -it megatron_training_env bash -The Docker container includes a pre-installed, verified version of the ROCm -Megatron-LM development branch -``__, including necessary -training scripts. +4. **Megatron-LM backward compatibility setup** -- this Docker is primarily intended for use with Primus, but it maintains Megatron-LM compatibility with limited support. + To roll back to using Megatron-LM, follow these steps: + + .. code-block:: shell + + cd /workspace/Megatron-LM/ + pip uninstall megatron-core + pip install -e . + +The Docker container hosts +``__ at verified commit ``e8e9edc``. .. _amd-megatron-lm-environment-setup: diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-history.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-history.rst index 9dd1c8f2c..f4ed199ef 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-history.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-history.rst @@ -16,12 +16,20 @@ previous releases of the ``ROCm/megatron-lm`` Docker image on `Docker Hub ` + * `Docker Hub (py310) `__ + + * - v25.6 - * ROCm 6.4.1 * PyTorch 2.8.0a0+git7d205b2 - - * :doc:`Documentation <../megatron-lm>` + * :doc:`Documentation ` * `Docker Hub (py312) `__ * `Docker Hub (py310) `__ diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-primus-migration-guide.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-primus-migration-guide.rst new file mode 100644 index 000000000..9275c1f39 --- /dev/null +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-primus-migration-guide.rst @@ -0,0 +1,175 @@ +:orphan: + +********************************************************************** +Migrating workloads to Primus (Megatron-Core backend) from Megatron-LM +********************************************************************** + +Primus supports Megatron-Core as backend optimization library, +replacing ROCm Megatron-LM. This document outlines the steps to migrate +workload from ROCm Megatron-LM to Primus with the Megatron-Core backend. + +Model architecture +================== + +ROCm Megatron-LM defines model architecture parameters in the training scripts; +for example, the Llama 3 8B model parameters are defined in +`examples/llama/train_llama3.sh `__ +as shown below: + +.. code-block:: bash + + HIDDEN_SIZE=4096 + FFN_HIDDEN_SIZE=14336 + NUM_LAYERS=32 + NUM_HEADS=32 + NUM_KV_HEADS=8 + +Primus defines the model architecture through model YAML configuration files +inside the ``primus/configs/models/megatron/`` repository. For example, Llama 3 8B +model architecture parameters are defined in +`primus/configs/models/megatron/llama3_8B.yaml `__ +as shown below: + +.. code-block:: yaml + + bases: + - llama3_base.yaml + + tokenizer_type: Llama3Tokenizer + tokenizer_model: meta-llama/Llama-3.1-8B + + ffn_hidden_size: 14336 + hidden_size: 4096 + num_attention_heads: 32 + num_layers: 32 + num_query_groups: 8 + +Primus' model config files follow a hierarchical design, meaning that new model +config YAMLs can inherit existing model config files by importing them as +bases. For example, +`llama3.1_8B.yaml `__ +uses ``llama3_8B.yaml`` as a base config and overrides few parameters, as shown below. +In this example, ``llama3.1_8B`` overrides the ``max_position_embeddings`` value: + +.. code-block:: yaml + + bases: + - llama3_8B.yaml + + tokenizer_type: Llama3Tokenizer + tokenizer_model: meta-llama/Llama-3.1-8B + + max_position_embeddings: 131072 + +.. tip:: + + Primus provides ``llama_base.yaml`` as the base configuration, which can be + used as bases for additional model architectures. For example, + `mixtral_base.yaml `__ + and + `deepseek_v3_base.yaml `__ + define ``llama_base.yaml`` as its base. + + .. code-block:: yaml + + # Example mixtral_base.yaml: + + bases: + - llama_base.yaml + + init_method_std: 0.01 + rotary_base: 1000000 + qk_layernorm: false + + group_query_attention: true + num_query_groups: 8 + + # moe parameters + num_experts: 8 + moe_router_topk: 2 + moe_router_load_balancing_type: aux_loss + moe_aux_loss_coeff: 1e-2 + moe_grouped_gemm: true + moe_token_dispatcher_type: alltoall + +It is recommended to add a new ``${MODEL_NAME}_base.yaml`` to add a new +category of model and define new models on top of it. For example, to add +Qwen2.5 models in Primus, we define +`qwen2.5_base.yaml `__ +and build +`qwen2.5_7B.yaml `__ +and +`qwen2.5_72B.yaml `__ +using ``qwen2.5_base.yaml`` as the base config. + +Training parameters +=================== + +ROCm Megatron-LM also defines the training parameters, like batch size, +tensor-parallelism, precision, as so on, in the training scripts. For example, +Llama3 8B model parameters are defined in +`examples/llama/train_llama3.sh `__ +as shown below: + +.. code-block:: bash + + TP="${TP:-8}" + PP="${PP:-1}" + CP="${CP:-1}" + MBS="${MBS:-1}" + BS="${BS:-8}" + +Primus defines the training parameters in top-level YAML files -- see +`examples/megatron/configs/ +`__. +For example, the `llama3.1_8B-pretrain.yaml +`__ +configuration imports the ``llama3.1_8B.yaml`` model architecture file. Users can then override +the default training parameters in ``llama3.1_8B-pretrain.yaml``. + +.. code-block:: yaml + + # model to run + model: llama3.1_8B.yaml # Model architecture yaml + overrides: + # log + # disable_wandb: false + # disable_tensorboard: false + stderr_sink_level: DEBUG + + log_avg_skip_iterations: 2 + log_avg_reset_interval: 50 + + train_iters: 50 + micro_batch_size: 2 + global_batch_size: 128 + + seq_length: 8192 + max_position_embeddings: 8192 + + lr: 1.0e-5 + min_lr: 0.0 + lr_warmup_iters: 2 + lr_decay_iters: null + lr_decay_style: cosine + weight_decay: 0.1 + adam_beta1: 0.9 + adam_beta2: 0.95 + eod_mask_loss: true + init_method_std: 0.008 + norm_epsilon: 1.0e-6 + +Backward compatibility with Megatron-LM +======================================= + +The Dockerized environment used for Primus maintains compatibility with Megatron-LM with +limited support. To roll back to using Megatron-LM, follow these steps. + +.. code-block:: shell + + cd /workspace/Megatron-LM/ + pip uninstall megatron-core + pip install -e . + +Once Megatron-LM is installed, follow :doc:`the documentation <../megatron-lm>` to run workloads as +usual. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.6.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.6.rst new file mode 100644 index 000000000..32d72311b --- /dev/null +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.6.rst @@ -0,0 +1,1041 @@ +:orphan: + +.. meta:: + :description: How to train a model using Megatron-LM for ROCm. + :keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch + +****************************************** +Training a model with Megatron-LM for ROCm +****************************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm Megatron-LM + training performance documentation. See :doc:`../megatron-lm` for the latest version. + +The `Megatron-LM framework for ROCm `__ is +a specialized fork of the robust Megatron-LM, designed to enable efficient +training of large-scale language models on AMD GPUs. By leveraging AMD +Instinctâ„¢ MI300X series accelerators, Megatron-LM delivers enhanced +scalability, performance, and resource utilization for AI workloads. It is +purpose-built to support models like Llama, DeepSeek, and Mixtral, +enabling developers to train next-generation AI models more +efficiently. + +AMD provides ready-to-use Docker images for MI300X series accelerators containing +essential components, including PyTorch, ROCm libraries, and Megatron-LM +utilities. It contains the following software components to accelerate training +workloads: + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/previous-versions/megatron-lm-v25.6-benchmark-models.yaml + + {% set dockers = data.dockers %} + {% if dockers|length > 1 %} + .. tab-set:: + + {% for docker in data.dockers %} + .. tab-item:: ``{{ docker.pull_tag }}`` + :sync: {{ docker.pull_tag }} + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + + {% endfor %} + {% endfor %} + {% elif dockers|length == 1 %} + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + {% for component_name, component_version in docker.components %} + * - {{ component_name }} + - {{ component_version }} + + {% endfor %} + {% endif %} + + .. _amd-megatron-lm-model-support-v256: + + The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. + + Supported models + ================ + + The following models are supported for training performance benchmarking with Megatron-LM and ROCm. + Some instructions, commands, and training recommendations in this documentation might + vary by model -- select one to get started. + + {% set model_groups = data.model_groups %} + .. raw:: html + +
+
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
+ +
+
Model variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
+ +.. note:: + + Some models, such as Llama, require an external license agreement through + a third party (for example, Meta). + +.. _amd-megatron-lm-performance-measurements-v256: + +Performance measurements +======================== + +To evaluate performance, the +`Performance results with AMD ROCm software `__ +page provides reference throughput and latency measurements for training +popular AI models. + +.. important:: + + The performance data presented in + `Performance results with AMD ROCm software `__ + only reflects the latest version of this training benchmarking environment. + The listed measurements should not be interpreted as the peak performance achievable by AMD Instinct MI325X and MI300X accelerators or ROCm software. + +System validation +================= + +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. + +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. + +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. + +.. _mi300x-amd-megatron-lm-training-v256: + +Environment setup +================= + +Use the following instructions to set up the environment, configure the script to train models, and +reproduce the benchmark results on MI300X series accelerators with the AMD Megatron-LM Docker +image. + +.. _amd-megatron-lm-requirements-v256: + +Download the Docker image +------------------------- + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/previous-versions/megatron-lm-v25.6-benchmark-models.yaml + + {% set dockers = data.dockers %} + 1. Use the following command to pull the Docker image from Docker Hub. + + {% if dockers|length > 1 %} + .. tab-set:: + + {% for docker in data.dockers %} + .. tab-item:: {{ docker.doc_name }} + :sync: {{ docker.pull_tag }} + + .. code-block:: shell + + docker pull {{ docker.pull_tag }} + + {% endfor %} + {% elif dockers|length == 1 %} + {% set docker = dockers[0] %} + .. code-block:: shell + + docker pull {{ docker.pull_tag }} + + {% endif %} + 2. Launch the Docker container. + + {% if dockers|length > 1 %} + .. tab-set:: + + {% for docker in data.dockers %} + .. tab-item:: {{ docker.doc_name }} + :sync: {{ docker.pull_tag }} + + .. code-block:: shell + + docker run -it \ + --device /dev/dri \ + --device /dev/kfd \ + --device /dev/infiniband \ + --network host --ipc host \ + --group-add video \ + --cap-add SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --privileged \ + -v $HOME:$HOME \ + -v $HOME/.ssh:/root/.ssh \ + --shm-size 128G \ + --name megatron_training_env \ + {{ docker.pull_tag }} + + {% endfor %} + {% elif dockers|length == 1 %} + {% set docker = dockers[0] %} + .. code-block:: shell + + docker run -it \ + --device /dev/dri \ + --device /dev/kfd \ + --device /dev/infiniband \ + --network host --ipc host \ + --group-add video \ + --cap-add SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --privileged \ + -v $HOME:$HOME \ + -v $HOME/.ssh:/root/.ssh \ + --shm-size 128G \ + --name megatron_training_env \ + {{ docker.pull_tag }} + + {% endif %} + +3. Use these commands if you exit the ``megatron_training_env`` container and need to return to it. + + .. code-block:: shell + + docker start megatron_training_env + docker exec -it megatron_training_env bash + +The Docker container includes a pre-installed, verified version of the ROCm +Megatron-LM development branch +``__, including necessary +training scripts. + +.. _amd-megatron-lm-environment-setup-v256: + +Configuration +============= + +.. container:: model-doc pyt_megatron_lm_train_llama-3.3-70b pyt_megatron_lm_train_llama-3.1-8b pyt_megatron_lm_train_llama-3.1-70b + + Update the ``train_llama3.sh`` configuration script in the ``examples/llama`` + directory of + ``__ to configure your training run. + Options can also be passed as command line arguments as described in :ref:`Run training `. + +.. container:: model-doc pyt_megatron_lm_train_llama-2-7b pyt_megatron_lm_train_llama-2-70b + + Update the ``train_llama2.sh`` configuration script in the ``examples/llama`` + directory of + ``__ to configure your training run. + Options can also be passed as command line arguments as described in :ref:`Run training `. + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v3-proxy + + Update the ``train_deepseekv3.sh`` configuration script in the ``examples/deepseek_v3`` + directory of + ``__ to configure your training run. + Options can also be passed as command line arguments as described in :ref:`Run training `. + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v2-lite-16b + + Update the ``train_deepseekv2.sh`` configuration script in the ``examples/deepseek_v2`` + directory of + ``__ to configure your training run. + Options can also be passed as command line arguments as described in :ref:`Run training `. + +.. container:: model-doc pyt_megatron_lm_train_mixtral-8x7b pyt_megatron_lm_train_mixtral-8x22b-proxy + + Update the ``train_mixtral_moe.sh`` configuration script in the ``examples/mixtral`` + directory of + ``__ to configure your training run. + Options can also be passed as command line arguments as described in :ref:`Run training `. + +.. note:: + + See :ref:`Key options ` for more information on configuration options. + +Network interface +----------------- + +Update the network interface in the script to match your system's network interface. To +find your network interface, run the following (outside of any Docker container): + +.. code-block:: bash + + ip a + +Look for an active interface that has an IP address in the same subnet as +your other nodes. Then, update the following variables in the script, for +example: + +.. code-block:: bash + + export NCCL_SOCKET_IFNAME=ens50f0np0 + + export GLOO_SOCKET_IFNAME=ens50f0np0 + +.. _amd-megatron-lm-tokenizer-v256: + +Tokenizer +--------- + +You can assign the path of an existing tokenizer to the ``TOKENIZER_MODEL`` as shown in the following examples. +If the tokenizer is not found, it'll be downloaded if publicly available. + +.. container:: model-doc pyt_megatron_lm_train_llama-3.3-70b + + If you do not have Llama 3.3 tokenizer locally, you need to use your + personal Hugging Face access token ``HF_TOKEN`` to download the tokenizer. + See `Llama-3.3-70B-Instruct + `_. After you are + authorized, use your ``HF_TOKEN`` to download the tokenizer and set the + variable ``TOKENIZER_MODEL`` to the tokenizer path. + + .. code-block:: shell + + export HF_TOKEN= + + The training script uses the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL="meta-llama/Llama-3.3-70B-Instruct" + +.. container:: model-doc pyt_megatron_lm_train_llama-3.1-8b + + The training script uses the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL="meta-llama/Llama-3.1-8B" + +.. container:: model-doc pyt_megatron_lm_train_llama-3.1-70b + + The training script uses the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL="meta-llama/Llama-3.1-70B" + +.. container:: model-doc pyt_megatron_lm_train_llama-2-7b pyt_megatron_lm_train_llama-2-70b + + The training script uses either the ``Llama2Tokenizer`` or ``HuggingFaceTokenizer`` by default. + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v3-proxy + + The training script uses the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL="deepseek-ai/DeepSeek-V3" + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v2-lite-16b + + The training script uses the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL="deepseek-ai/DeepSeek-V2-Lite" + +.. container:: model-doc pyt_megatron_lm_train_mixtral-8x7b pyt_megatron_lm_train_mixtral-8x22b-proxy + + Download the Mixtral tokenizer. + + .. code-block:: shell + + mkdir tokenizer + cd tokenizer + export HF_TOKEN= + wget --header="Authorization: Bearer $HF_TOKEN" -O ./tokenizer.model https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/resolve/main/tokenizer.model + + Use the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL=tokenizer/tokenizer.model + +.. container:: model-doc pyt_megatron_lm_train_qwen2.5-7b + + The training script uses the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL="Qwen/Qwen2.5-7B" + +.. container:: model-doc pyt_megatron_lm_train_qwen2.5-72b + + The training script uses the ``HuggingFaceTokenizer``. Set ``TOKENIZER_MODEL`` to the appropriate Hugging Face model path. + + .. code-block:: shell + + TOKENIZER_MODEL="Qwen/Qwen2.5-72B" + +Dataset options +--------------- + +You can use either mock data or real data for training. + +* Mock data can be useful for testing and validation. Use the ``MOCK_DATA`` variable to toggle between mock and real data. The default + value is ``1`` for enabled. + + .. code-block:: bash + + MOCK_DATA=1 + +* If you're using a real dataset, update the ``DATA_PATH`` variable to point to the location of your dataset. + + .. code-block:: bash + + MOCK_DATA=0 + + DATA_PATH="/data/bookcorpus_text_sentence" # Change to where your dataset is stored + + Ensure that the files are accessible inside the Docker container. + +Download the dataset +^^^^^^^^^^^^^^^^^^^^ + +.. container:: model-doc pyt_megatron_lm_train_llama-3.3-70b pyt_megatron_lm_train_llama-3.1-8b pyt_megatron_lm_train_llama-3.1-70b pyt_megatron_lm_train_llama-2-7b pyt_megatron_lm_train_llama-2-70b pyt_megatron_lm_train_llama-3.1-70b-proxy + + For Llama models, use the `prepare_dataset.sh + `_ script + to prepare your dataset. + To download the dataset, set the ``DATASET`` variable to the dataset you'd + like to use. Three datasets are supported: ``DATASET=wiki``, ``DATASET=fineweb``, and + ``DATASET=bookcorpus``. + + .. code-block:: shell + + DATASET=wiki TOKENIZER_MODEL=NousResearch/Llama-2-7b-chat-hf bash examples/llama/prepare_dataset.sh #for wiki-en dataset + DATASET=bookcorpus TOKENIZER_MODEL=NousResearch/Llama-2-7b-chat-hf bash examples/llama/prepare_dataset.sh #for bookcorpus dataset + + ``TOKENIZER_MODEL`` can be any accessible Hugging Face tokenizer. + Remember to either pre-download the tokenizer or setup Hugging Face access + otherwise when needed -- see the :ref:`Tokenizer ` section. + + .. note:: + + When training set ``DATA_PATH`` to the specific file name prefix pointing to the ``.bin`` or ``.idx`` + as in the following example: + + .. code-block:: shell + + DATA_PATH="data/bookcorpus_text_sentence" # Change to where your dataset is stored. + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v3-proxy + + If you don't already have the dataset, download the DeepSeek dataset using the following + commands: + + .. code-block:: shell + + mkdir deepseek-datasets + cd deepseek-datasets + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/SlimPajama.json + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/alpaca_zh-train.json + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/alpaca_zh-valid.json + cd .. + bash tools/run_make_pretraining_dataset_megatron.sh deepseek-datasets/SlimPajama.json DeepSeekV3Tokenizer text deepseek-datasets deepseek-ai/DeepSeek-V3 + + To train on this data, update the ``DATA_DIR`` variable to point to the location of your dataset. + + .. code-block:: bash + + MOCK_DATA=0 # Train on real data + + DATA_DIR="/deepseek-datasets" # Change to where your dataset is stored + + Ensure that the files are accessible inside the Docker container. + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v2-lite-16b + + If you don't already have the dataset, download the DeepSeek dataset using the following + commands: + + .. code-block:: shell + + mkdir deepseek-datasets + cd deepseek-datasets + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/SlimPajama.json + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/alpaca_zh-train.json + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/alpaca_zh-valid.json + cd .. + bash tools/run_make_pretraining_dataset_megatron.sh deepseek-datasets/SlimPajama.json DeepSeekV3Tokenizer text deepseek-datasets deepseek-ai/DeepSeek-V3 + + To train on this data, update the ``DATA_DIR`` variable to point to the location of your dataset. + + .. code-block:: bash + + MOCK_DATA=0 # Train on real data + + DATA_DIR="/deepseek-datasets" # Change to where your dataset is stored + +.. container:: model-doc pyt_megatron_lm_train_mixtral-8x7b pyt_megatron_lm_train_mixtral-8x22b-proxy + + If you don't already have the dataset, download the Mixtral dataset using the following + commands: + + .. code-block:: shell + + mkdir mixtral-datasets + cd mixtral-datasets + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/mistral-datasets/wudao_mistralbpe_content_document.bin + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/mistral-datasets/wudao_mistralbpe_content_document.idx + + To train on this data, update the ``DATA_DIR`` variable to point to the location of your dataset. + + .. code-block:: bash + + MOCK_DATA=0 # Train on real data + + DATA_DIR="/mixtral-datasets" # Change to where your dataset is stored + + Ensure that the files are accessible inside the Docker container. + +.. container:: model-doc pyt_megatron_lm_train_qwen2.5-7b pyt_megatron_lm_train_qwen2.5-72b + + If you don't already have the dataset, download the Mixtral dataset using the following + commands: + + .. code-block:: shell + + mkdir -p temp/qwen-datasets + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/qwen-datasets/wudao_qwenbpe_text_document.bin + wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/qwen-datasets/wudao_qwenbpe_text_document.idx + + To train on this data, update the ``DATA_DIR`` variable to point to the location of your dataset. + + .. code-block:: bash + + MOCK_DATA=0 # Train on real data + + DATA_DIR="/qwen-datasets" # Change to where your dataset is stored + + Ensure that the files are accessible inside the Docker container. + +Multi-node configuration +------------------------ + +If you're running multi-node training, update the following environment variables. They can +also be passed as command line arguments. Refer to the following example configurations. + +* Change ``localhost`` to the master node's hostname: + + .. code-block:: shell + + MASTER_ADDR="${MASTER_ADDR:-localhost}" + +* Set the number of nodes you want to train on (for instance, ``2``, ``4``, ``8``): + + .. code-block:: shell + + NNODES="${NNODES:-1}" + +* Set the rank of each node (0 for master, 1 for the first worker node, and so on): + + .. code-block:: shell + + NODE_RANK="${NODE_RANK:-0}" + +* Set ``DATA_CACHE_PATH`` to a common directory accessible by all the nodes (for example, an + NFS directory) for multi-node runs: + + .. code-block:: shell + + DATA_CACHE_PATH=/root/cache # Set to a common directory for multi-node runs + +* For multi-node runs, make sure the correct network drivers are installed on the nodes. If + inside a Docker container, either install the drivers inside the Docker container or pass the network + drivers from the host while creating the Docker container. + + .. code-block:: shell + + # Specify which RDMA interfaces to use for communication + export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 + +.. _amd-megatron-lm-run-training-v256: + +Run training +============ + +Use the following example commands to set up the environment, configure +:ref:`key options `, and run training on +MI300X series accelerators with the AMD Megatron-LM environment. + +Single node training +-------------------- + +.. container:: model-doc pyt_megatron_lm_train_llama-3.3-70b + + To run the training on a single node for Llama 3.3 70B BF16 with FSDP-v2 enabled, add the ``FSDP=1`` argument. + For example, use the following command: + + .. code-block:: shell + + TOKENIZER_MODEL=meta-llama/Llama-3.3-70B-Instruct \ + CKPT_FORMAT=torch_dist \ + TEE_OUTPUT=1 \ + RECOMPUTE=1 \ + SEQ_LENGTH=8192 \ + MBS=2 \ + BS=16 \ + TE_FP8=0 \ + TP=1 \ + PP=1 \ + FSDP=1 \ + MODEL_SIZE=70 \ + TOTAL_ITERS=50 \ + bash examples/llama/train_llama3.sh + + .. note:: + + It is suggested to use ``TP=1`` when FSDP is enabled for higher + throughput. FSDP-v2 is not supported with pipeline parallelism, expert + parallelism, MCore's distributed optimizer, gradient accumulation fusion, + or FP16. + +.. container:: model-doc pyt_megatron_lm_train_llama-3.1-8b + + To run training on a single node for Llama 3.1 8B FP8, navigate to the Megatron-LM folder and use the + following command. + + .. code-block:: shell + + TEE_OUTPUT=1 \ + MBS=2 \ + BS=128 \ + TP=1 \ + TE_FP8=1 \ + SEQ_LENGTH=8192 \ + MODEL_SIZE=8 \ + TOTAL_ITERS=50 \ + bash examples/llama/train_llama3.sh + + For Llama 3.1 8B BF16, use the following command: + + .. code-block:: shell + + TEE_OUTPUT=1 \ + MBS=2 \ + BS=128 \ + TP=1 \ + TE_FP8=0 \ + SEQ_LENGTH=8192 \ + MODEL_SIZE=8 \ + TOTAL_ITERS=50 \ + bash examples/llama/train_llama3.sh + +.. container:: model-doc pyt_megatron_lm_train_llama-3.1-70b + + To run the training on a single node for Llama 3.1 70B BF16 with FSDP-v2 enabled, add the ``FSDP=1`` argument. + For example, use the following command: + + .. code-block:: shell + + CKPT_FORMAT=torch_dist \ + TEE_OUTPUT=1 \ + MBS=3 \ + BS=24 \ + TP=1 \ + TE_FP8=0 \ + FSDP=1 \ + RECOMPUTE=1 \ + SEQ_LENGTH=8192 \ + MODEL_SIZE=70 \ + TOTAL_ITERS=50 \ + bash examples/llama/train_llama3.sh + + .. note:: + + It is suggested to use ``TP=1`` when FSDP is enabled for higher + throughput. FSDP-v2 is not supported with pipeline parallelism, expert + parallelism, MCore's distributed optimizer, gradient accumulation fusion, + or FP16. + +.. container:: model-doc pyt_megatron_lm_train_llama-3.1-70b-proxy + + To run the training on a single node for Llama 3.1 70B with proxy, use the following command. + + .. code-block:: shell + + CKPT_FORMAT=torch_dist \ + TEE_OUTPUT=1 \ + RECOMPUTE=1 \ + MBS=3 \ + BS=24 \ + TP=1 \ + TE_FP8=1 \ + SEQ_LENGTH=8192 \ + MODEL_SIZE=70 \ + FSDP=1 \ + TOTAL_ITERS=10 \ + NUM_LAYERS=40 \ + bash examples/llama/train_llama3.sh + + .. note:: + + Use two or more nodes to run the *full* Llama 70B model with FP8 precision. + + .. note:: + + It is suggested to use ``TP=1`` when FSDP is enabled for higher + throughput. FSDP-v2 is not supported with pipeline parallelism, expert + parallelism, MCore's distributed optimizer, gradient accumulation fusion, + or FP16. + +.. container:: model-doc pyt_megatron_lm_train_llama-2-7b + + To run training on a single node for Llama 2 7B FP8, navigate to the Megatron-LM folder and use the + following command. + + .. code-block:: shell + + TEE_OUTPUT=1 \ + MBS=4 \ + BS=256 \ + TP=1 \ + TE_FP8=1 \ + SEQ_LENGTH=4096 \ + MODEL_SIZE=7 \ + TOTAL_ITERS=50 \ + bash examples/llama/train_llama2.sh + + For Llama 2 7B BF16, use the following command: + + .. code-block:: shell + + TEE_OUTPUT=1 \ + MBS=4 \ + BS=256 \ + TP=1 \ + TE_FP8=0 \ + SEQ_LENGTH=4096 \ + MODEL_SIZE=7 \ + TOTAL_ITERS=50 \ + bash examples/llama/train_llama2.sh + +.. container:: model-doc pyt_megatron_lm_train_llama-2-70b + + To run the training on a single node for Llama 2 70B BF16 with FSDP-v2 enabled, add the ``FSDP=1`` argument. + For example, use the following command: + + .. code-block:: shell + + CKPT_FORMAT=torch_dist \ + TEE_OUTPUT=1 \ + MBS=7 \ + BS=56 \ + TP=1 \ + TE_FP8=0 \ + FSDP=1 \ + RECOMPUTE=1 \ + SEQ_LENGTH=4096 \ + MODEL_SIZE=70 \ + TOTAL_ITERS=50 \ + bash examples/llama/train_llama2.sh + + .. note:: + + It is suggested to use ``TP=1`` when FSDP is enabled for higher + throughput. FSDP-v2 is not supported with pipeline parallelism, expert + parallelism, MCore's distributed optimizer, gradient accumulation fusion, + or FP16. + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v3-proxy + + To run training on a single node for DeepSeek-V3 (MoE with expert parallel) with 3-layer proxy, + navigate to the Megatron-LM folder and use the following command. + + .. code-block:: shell + + export NVTE_FUSED_ATTN_CK=0 + FORCE_BALANCE=true \ + RUN_ENV=cluster \ + MODEL_SIZE=671B \ + TRAIN_ITERS=50 \ + SEQ_LEN=4096 \ + NUM_LAYERS=3 \ + MICRO_BATCH_SIZE=1 GLOBAL_BATCH_SIZE=32 \ + PR=bf16 \ + TP=1 PP=1 ETP=1 EP=8 \ + GEMM_TUNING=1 \ + NVTE_CK_USES_BWD_V3=1 \ + USE_GROUPED_GEMM=true MOE_USE_LEGACY_GROUPED_GEMM=true \ + GPT_LAYER_IN_TE=true \ + bash examples/deepseek_v3/train_deepseekv3.sh + +.. container:: model-doc pyt_megatron_lm_train_deepseek-v2-lite-16b + + To run training on a single node for DeepSeek-V2-Lite (MoE with expert parallel), + navigate to the Megatron-LM folder and use the following command. + + .. code-block:: shell + + export NVTE_FUSED_ATTN_CK=0 + GEMM_TUNING=1 \ + PR=bf16 \ + MBS=4 \ + AC=none \ + SEQ_LEN=4096 \ + PAD_LEN=4096 \ + TRAIN_ITERS=50 \ + bash examples/deepseek_v2/train_deepseekv2.sh + +.. container:: model-doc pyt_megatron_lm_train_mixtral-8x7b + + To run training on a single node for Mixtral 8x7B (MoE with expert parallel), + navigate to the Megatron-LM folder and use the following command. + + .. code-block:: shell + + TOKENIZER_MODEL= + RECOMPUTE_NUM_LAYERS=0 \ + TEE_OUTPUT=1 \ + MBS=1 \ + GBS=16 \ + TP_SIZE=1 \ + PP_SIZE=1 \ + AC=none \ + PR=bf16 \ + EP_SIZE=8 \ + ETP_SIZE=1 \ + SEQLEN=4096 \ + FORCE_BALANCE=true \ + MOCK_DATA=1 \ + RUN_ENV=cluster \ + MODEL_SIZE=8x7B \ + TRAIN_ITERS=50 \ + bash examples/mixtral/train_mixtral_moe.sh + +.. container:: model-doc pyt_megatron_lm_train_mixtral-8x22b-proxy + + To run training on a single node for Mixtral 8x7B (MoE with expert parallel) with 4-layer proxy, + navigate to the Megatron-LM folder and use the following command. + + .. code-block:: shell + + TOKENIZER_MODEL= + RECOMPUTE_NUM_LAYERS=4 \ + TEE_OUTPUT=1 \ + MBS=1 \ + GBS=16 \ + TP_SIZE=1 \ + PP_SIZE=1 \ + AC=full \ + NUM_LAYERS=4 \ + PR=bf16 \ + EP_SIZE=8 \ + ETP_SIZE=1 \ + SEQLEN=8192 \ + FORCE_BALANCE=true \ + MOCK_DATA=1 \ + RUN_ENV=cluster \ + MODEL_SIZE=8x22B \ + TRAIN_ITERS=50 \ + bash examples/mixtral/train_mixtral_moe.sh + +.. container:: model-doc pyt_megatron_lm_train_qwen2.5-7b + + To run training on a single node for Qwen 2.5 7B BF16, use the following + command. + + .. code-block:: shell + + bash examples/qwen/train_qwen2.sh TP=1 \ + CP=1 \ + PP=1 \ + MBS=10 \ + BS=640 \ + TE_FP8=0 \ + MODEL_SIZE=7 \ + SEQ_LENGTH=2048 \ + TOTAL_ITERS=50 \ + MOCK_DATA=1 \ + TOKENIZER_MODEL=Qwen/Qwen2.5-7B + + For FP8, use the following command. + + .. code-block:: shell + + bash examples/qwen/train_qwen2.sh \ + TP=1 \ + CP=1 \ + PP=1 \ + MBS=10 \ + BS=640 \ + TE_FP8=1 \ + MODEL_SIZE=7 \ + SEQ_LENGTH=2048 \ + TOTAL_ITERS=50 \ + MOCK_DATA=1 \ + TOKENIZER_MODEL=Qwen/Qwen2.5-7B + +.. container:: model-doc pyt_megatron_lm_train_qwen2.5-72b + + To run the training on a single node for Qwen 2.5 72B BF16, use the following command. + + .. code-block:: shell + + bash examples/qwen/train_qwen2.sh \ + FSDP=1 \ + CP=1 \ + PP=1 \ + MBS=3 \ + BS=24 \ + TE_FP8=0 \ + MODEL_SIZE=72 \ + SEQ_LENGTH=2048 \ + TOTAL_ITERS=50 \ + MOCK_DATA=1 \ + TOKENIZER_MODEL=Qwen/Qwen2.5-72B \ + RECOMPUTE_ACTIVATIONS=full \ + CKPT_FORMAT=torch_dist + +Multi-node training examples +---------------------------- + +To run training on multiple nodes, launch the Docker container on each node. +For example, for Llama 3 using a two node setup (``NODE0`` as the master node), +use these commands. + +* On the master node ``NODE0``: + + .. code-block:: shell + + TEE_OUTPUT=1 \ + MBS=2 \ + BS=256 \ + TP=1 \ + TE_FP8=1 \ + SEQ_LENGTH=8192 \ + MODEL_SIZE=8 \ + MASTER_ADDR=IP_NODE0 \ + NNODES=2 \ + NODE_RANK=0 \ + bash examples/llama/train_llama3.sh + +* On the worker node ``NODE1``: + + .. code-block:: shell + + TEE_OUTPUT=1 \ + MBS=2 \ + BS=256 \ + TP=1 \ + TE_FP8=1 \ + SEQ_LENGTH=8192 \ + MODEL_SIZE=8 \ + MASTER_ADDR=IP_NODE0 \ + NNODES=2 \ + NODE_RANK=1 \ + bash examples/llama/train_llama3.sh + +Or, for DeepSeek-V3, an example script ``train_deepseek_v3_slurm.sh`` is +provided in +``__ to +enable training at scale under a SLURM environment. For example, to run +training on 16 nodes, try the following command: + +.. code-block:: shell + + sbatch examples/deepseek_v3/train_deepseek_v3_slurm.sh + +.. _amd-megatron-lm-benchmark-test-vars-v256: + +Key options +----------- + +The benchmark tests support the following sets of variables. + +``TEE_OUTPUT`` + ``1`` to enable training logs or ``0`` to disable. + +``TE_FP8`` + ``0`` for B16 or ``1`` for FP8 -- ``0`` by default. + +``GEMM_TUNING`` + ``1`` to enable GEMM tuning, which boosts performance by using the best GEMM kernels. + +``USE_FLASH_ATTN`` + ``1`` to enable Flash Attention. + +``FSDP`` + ``1`` to enable PyTorch FSDP2. If FSDP is enabled, ``--use-distributed-optimizer``, + ``--overlap-param-gather``, and ``--sequence-parallel`` are automatically disabled. + +``ENABLE_PROFILING`` + ``1`` to enable PyTorch profiling for performance analysis. + +``transformer-impl`` + ``transformer_engine`` to use the Transformer Engine (TE) or ``local`` to disable TE. + +``MODEL_SIZE`` + ``8B`` or ``70B`` for Llama 3 and 3.1. ``7B`` or ``70B`` for Llama 2, for example. + +``TOTAL_ITERS`` + The total number of iterations -- ``10`` by default. + +``MOCK_DATA`` + ``1`` to use mock data or ``0`` to use real data you provide. + +``MBS`` + Micro batch size. + +``BS`` + Global batch size. + +``TP`` / ``TP_SIZE`` + Tensor parallel (``1``, ``2``, ``4``, ``8``). ``TP`` is disabled when ``FSDP`` is turned on. + +``EP`` / ``EP_SIZE`` + Expert parallel for MoE models. + +``SEQ_LENGTH`` + Input sequence length. + +``PR`` + Precision for training. ``bf16`` for BF16 (default) or ``fp8`` for FP8 GEMMs. + +``AC`` + Activation checkpointing (``none``, ``sel``, or ``full``) -- ``sel`` by default. + +``NUM_LAYERS`` + Use reduced number of layers as a proxy model. + +``RECOMPUTE_NUM_LAYERS`` + Number of layers used for checkpointing recompute. + +Previous versions +================= + +See :doc:`megatron-lm-history` to find documentation for previous releases +of the ``ROCm/megatron-lm`` Docker image. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst new file mode 100644 index 000000000..0a80c7c9b --- /dev/null +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst @@ -0,0 +1,602 @@ +.. meta:: + :description: How to train a model using Megatron-LM for ROCm. + :keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch + +********************************************** +Training a model with Primus and Megatron-Core +********************************************** + +`Primus `__ is a unified and flexible +LLM training framework designed to streamline training. It streamlines LLM +training on AMD Instinct accelerators using a modular, reproducible configuration paradigm. +Primus is backend-agnostic and supports multiple training engines -- including Megatron-Core. + +.. note:: + + Primus with the Megatron-Core backend is intended to replace ROCm + Megatron-LM in this Dockerized training environment. To learn how to migrate + workloads from Megatron-LM to Primus with Megatron-Core, see + :doc:`previous-versions/megatron-lm-primus-migration-guide`. + +For ease of use, AMD provides a ready-to-use Docker image for MI300 series accelerators +containing essential components for Primus and Megatron-Core. + +.. note:: + + This Docker environment is based on Python 3.10 and Ubuntu 22.04. For an alternative environment with + Python 3.12 and Ubuntu 24.04, see the :doc:`previous ROCm Megatron-LM v25.6 Docker release `. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml + + {% set dockers = data.dockers %} + {% set docker = dockers[0] %} + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + {% endfor %} + +.. _amd-primus-megatron-lm-model-support: + +Supported models +================ + +The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. +Some instructions, commands, and training examples in this documentation might +vary by model -- select one to get started. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml + + {% set model_groups = data.model_groups %} + .. raw:: html + +
+
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
+ +
+
Model variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
+ +.. note:: + + Some models, such as Llama, require an external license agreement through + a third party (for example, Meta). + +System validation +================= + +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. + +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. + +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. + +.. _mi300x-amd-primus-megatron-lm-training: + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml + + {% set dockers = data.dockers %} + {% set docker = dockers[0] %} + + Environment setup + ================= + + Use the following instructions to set up the environment, configure the script to train models, and + reproduce the benchmark results on MI300X series accelerators with the ``{{ docker.pull_tag }}`` image. + + .. _amd-primus-megatron-lm-requirements: + + Download the Docker image + ------------------------- + + 1. Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull {{ docker.pull_tag }} + + 2. Launch the Docker container. + + .. code-block:: shell + + docker run -it \ + --device /dev/dri \ + --device /dev/kfd \ + --device /dev/infiniband \ + --network host --ipc host \ + --group-add video \ + --cap-add SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --privileged \ + -v $HOME:$HOME \ + --shm-size 128G \ + --name primus_training_env \ + {{ docker.pull_tag }} + +3. Use these commands if you exit the ``primus_training_env`` container and need to return to it. + + .. code-block:: shell + + docker start primus_training_env + docker exec -it primus_training_env bash + +The Docker container hosts verified release tag ``v0.1.0-rc1`` of the `Primus +`__ repository. + +.. _amd-primus-megatron-lm-environment-setup: + +Configuration +============= + +Primus defines a training configuration in YAML for each model in +`examples/megatron/configs `__. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml + + {% set model_groups = data.model_groups %} + {% for model_group in model_groups %} + {% for model in model_group.models %} + .. container:: model-doc {{ model.mad_tag }} + + To update training parameters for {{ model.model }}, you can update ``examples/megatron/configs/{{ model.config_name }}``. + Note that training configuration YAML files for other models follow this naming convention. + + {% endfor %} + {% endfor %} + +.. note:: + + See :ref:`Key options ` for more information on configuration options. + +Dataset options +--------------- + +You can use either mock data or real data for training. + +* Mock data can be useful for testing and validation. Use the ``mock_data`` field to toggle between mock and real data. The default + value is ``true`` for enabled. + + .. code-block:: yaml + + mock_data: true + +* If you're using a real dataset, update the ``train_data_path`` field to point to the location of your dataset. + + .. code-block:: bash + + mock_data: false + train_data_path: /path/to/your/dataset + + Ensure that the files are accessible inside the Docker container. + +.. _amd-primus-megatron-lm-tokenizer: + +Tokenizer +--------- + +In Primus, each model uses a tokenizer from Hugging Face. For example, Llama +3.1 8B model uses ``tokenizer_model: meta-llama/Llama-3.1-8B`` and +``tokenizer_type: Llama3Tokenizer`` defined in the `llama3.1-8B model +`__ +definition. As such, you need to set the ``HF_TOKEN`` environment variable with +right permissions to access the tokenizer for each model. + +.. code-block:: bash + + # Export your HF_TOKEN in the workspace + export HF_TOKEN= + +.. _amd-primus-megatron-lm-run-training: + +Run training +============ + +Use the following example commands to set up the environment, configure +:ref:`key options `, and run training on +MI300X series accelerators with the AMD Megatron-LM environment. + +Single node training +-------------------- + +To run training on a single node, navigate to ``/workspace/Primus`` and use the following setup command: + +.. code-block:: shell + + pip install -r requirements.txt + export HSA_NO_SCRATCH_RECLAIM=1 + export NVTE_CK_USES_BWD_V3=1 + +Once setup is complete, run the appropriate training command. + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-3.3-70b + + To run pre-training for Llama 3.3 70B BF16, run: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama3.3_70B-pretrain.yaml \ + bash ./examples/run_pretrain.sh \ + --micro_batch_size 2 \ + --global_batch_size 16 \ + --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-3.1-8b + + To run pre-training for Llama 3.1 8B FP8, run: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama3.1_8B-pretrain.yaml \ + bash ./examples/run_pretrain.sh \ + --train_iters 50 \ + --fp8 hybrid + + For Llama 3.1 8B BF16, use the following command: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama3.1_8B-pretrain.yaml \ + bash ./examples/run_pretrain.sh --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-3.1-70b + + To run pre-training for Llama 3.1 70B BF16, run: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama3.1_70B-pretrain.yaml \ + bash ./examples/run_pretrain.sh \ + --train_iters 50 + + To run the training on a single node for Llama 3.1 70B FP8 with proxy, use the following command: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama3.1_70B-pretrain.yaml \ + bash ./examples/run_pretrain.sh \ + --train_iters 50 \ + --num_layers 40 \ + --fp8 hybrid \ + --no_fp8_weight_transpose_cache true + + .. note:: + + Use two or more nodes to run the *full* Llama 70B model with FP8 precision. + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-2-7b + + To run pre-training for Llama 2 7B FP8, run: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama2_7B-pretrain.yaml \ + bash ./examples/run_pretrain.sh \ + --train_iters 50 \ + --fp8 hybrid + + To run pre-training for Llama 2 7B BF16, run: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama2_7B-pretrain.yaml \ + bash ./examples/run_pretrain.sh --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-2-70b + + To run pre-training for Llama 2 70B BF16, run: + + .. code-block:: shell + + EXP=examples/megatron/configs/llama2_70B-pretrain.yaml \ + bash ./examples/run_pretrain.sh --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_deepseek-v3-proxy + + To run training on a single node for DeepSeek-V3 (MoE with expert parallel) with 3-layer proxy, + use the following command: + + .. code-block:: shell + + EXP=examples/megatron/configs/deepseek_v3-pretrain.yaml \ + bash examples/run_pretrain.sh \ + --num_layers 3 \ + --moe_layer_freq 1 \ + --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_deepseek-v2-lite-16b + + To run training on a single node for DeepSeek-V2-Lite (MoE with expert parallel), + use the following command: + + .. code-block:: shell + + EXP=examples/megatron/configs/deepseek_v2_lite-pretrain.yaml \ + bash examples/run_pretrain.sh \ + --global_batch_size 256 \ + --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_mixtral-8x7b + + To run training on a single node for Mixtral 8x7B (MoE with expert parallel), + use the following command: + + .. code-block:: shell + + EXP=examples/megatron/configs/mixtral_8x7B_v0.1-pretrain.yaml \ + bash examples/run_pretrain.sh --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_mixtral-8x22b-proxy + + To run training on a single node for Mixtral 8x7B (MoE with expert parallel) with 4-layer proxy, + use the following command: + + .. code-block:: shell + + EXP=examples/megatron/configs/mixtral_8x22B_v0.1-pretrain.yaml \ + bash examples/run_pretrain.sh \ + --num_layers 4 \ + --pipeline_model_parallel_size 1 \ + --micro_batch_size 1 \ + --global_batch_size 16 \ + --train_iters 50 + +.. container:: model-doc primus_pyt_megatron_lm_train_qwen2.5-7b + + To run training on a single node for Qwen 2.5 7B BF16, use the following + command: + + .. code-block:: shell + + EXP=examples/megatron/configs/qwen2.5_7B-pretrain.yaml \ + bash examples/run_pretrain.sh --train_iters 50 + + For FP8, use the following command. + + .. code-block:: shell + + EXP=examples/megatron/configs/qwen2.5_7B-pretrain.yaml \ + bash examples/run_pretrain.sh \ + --train_iters 50 \ + --fp8 hybrid + +.. container:: model-doc primus_pyt_megatron_lm_train_qwen2.5-72b + + To run the training on a single node for Qwen 2.5 72B BF16, use the following command. + + .. code-block:: shell + + EXP=examples/megatron/configs/qwen2.5_72B-pretrain.yaml \ + bash examples/run_pretrain.sh --train_iters 50 + +Multi-node training examples +---------------------------- + +To run training on multiple nodes, you can use the +`run_slurm_pretrain.sh `__ +to launch the multi-node workload. Use the following steps to setup your environment: + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/primus-megatron-benchmark-models.yaml + + {% set dockers = data.dockers %} + {% set docker = dockers[0] %} + + .. code-block:: shell + + cd /workspace/Primus/ + export DOCKER_IMAGE={{ docker.pull_tag }} + export HF_TOKEN= + export HSA_NO_SCRATCH_RECLAIM=1 + export NVTE_CK_USES_BWD_V3=1 + export NCCL_IB_HCA= # specify which RDMA interfaces to use for communication + export NCCL_SOCKET_IFNAME= # your Network Interface + export GLOO_SOCKET_IFNAME= # your Network Interface + export NCCL_IB_GID_INDEX=3 # Set InfiniBand GID index for NCCL communication. Default is 3 for ROCE + +.. note:: + + * Make sure correct network drivers are installed on the nodes. If inside a Docker, either install the drivers inside the Docker container or pass the network drivers from the host while creating Docker container. + * If ``NCCL_IB_HCA`` and ``NCCL_SOCKET_IFNAME`` are not set, Primus will try to auto-detect. However, since NICs can vary accross different cluster, it is encouraged to explicitly export your NCCL parameters for the cluster. + * To find your network interface, you can use ``ip a``. + * To find RDMA interfaces, you can use ``ibv_devices`` to get the list of all the RDMA/IB devices. + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-3.3-70b + + To train Llama 3.3 70B FP8 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/llama3.3_70B-pretrain.yaml \ + bash examples/run_slurm_pretrain.sh \ + --micro_batch_size 4 \ + --global_batch_size 256 \ + --recompute_num_layers 80 \ + --no_fp8_weight_transpose_cache true \ + --fp8 hybrid + + To train Llama 3.3 70B BF16 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/llama3.3_70B-pretrain.yaml \ + bash examples/run_slurm_pretrain.sh \ + --micro_batch_size 1 \ + --global_batch_size 256 \ + --recompute_num_layers 12 + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-3.1-8b + + To train Llama 3.1 8B FP8 on 8 nodes, run: + + .. code-block:: shell + + # Adjust the training parameters. For e.g., `global_batch_size: 8 * #single_node_bs` for 8 nodes in this case + NNODES=8 EXP=examples/megatron/configs/llama3.1_8B-pretrain.yaml \ + bash ./examples/run_slurm_pretrain.sh \ + --global_batch_size 1024 \ + --fp8 hybrid + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-3.1-70b + + To train Llama 3.1 70B FP8 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/llama3.1_70B-pretrain.yaml \ + bash examples/run_slurm_pretrain.sh \ + --micro_batch_size 4 \ + --global_batch_size 256 \ + --recompute_num_layers 80 \ + --no_fp8_weight_transpose_cache true \ + --fp8 hybrid + + To train Llama 3.1 70B BF16 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/llama3.1_70B-pretrain.yaml \ + bash examples/run_slurm_pretrain.sh \ + --micro_batch_size 1 \ + --global_batch_size 256 \ + --recompute_num_layers 12 + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-2-7b + + To train Llama 2 8B FP8 on 8 nodes, run: + + .. code-block:: shell + + # Adjust the training parameters. For e.g., `global_batch_size: 8 * #single_node_bs` for 8 nodes in this case + NNODES=8 EXP=examples/megatron/configs/llama2_7B-pretrain.yaml bash ./examples/run_slurm_pretrain.sh --global_batch_size 2048 --fp8 hybrid + +.. container:: model-doc primus_pyt_megatron_lm_train_llama-2-70b + + To train Llama 2 70B FP8 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/llama2_70B-pretrain.yaml \ + bash examples/run_slurm_pretrain.sh \ + --micro_batch_size 10 \ + --global_batch_size 640 \ + --recompute_num_layers 80 \ + --no_fp8_weight_transpose_cache true \ + --fp8 hybrid + + To train Llama 2 70B BF16 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/llama2_70B-pretrain.yaml \ + bash ./examples/run_slurm_pretrain.sh \ + --micro_batch_size 2 \ + --global_batch_size 1536 \ + --recompute_num_layers 12 + +.. container:: model-doc primus_pyt_megatron_lm_train_mixtral-8x7b + + To train Mixtral 8x7B BF16 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/mixtral_8x7B_v0.1-pretrain.yaml \ + bash examples/run_slurm_pretrain.sh \ + --micro_batch_size 2 \ + --global_batch_size 256 + +.. container:: model-doc primus_pyt_megatron_lm_train_qwen2.5-72b + + To train Qwen2.5 72B FP8 on 8 nodes, run: + + .. code-block:: shell + + NNODES=8 EXP=examples/megatron/configs/qwen2.5_72B-pretrain.yaml \ + bash examples/run_slurm_pretrain.sh \ + --micro_batch_size 8 \ + --global_batch_size 512 \ + --recompute_num_layers 80 \ + --no_fp8_weight_transpose_cache true \ + --fp8 hybrid + +.. _amd-primus-megatron-lm-benchmark-test-vars: + +Key options +----------- + +The following are key options to take note of + +fp8 + ``hybrid`` enables FP8 GEMMs. + +use_torch_fsdp2 + ``use_torch_fsdp2: 1`` enables torch fsdp-v2. If FSDP is enabled, + set ``use_distributed_optimizer`` and ``overlap_param_gather`` to ``false``. + +profile + To enable PyTorch profiling, set these parameters: + + .. code-block:: yaml + + profile: true + use_pytorch_profiler: true + profile_step_end: 7 + profile_step_start: 6 + +train_iters + The total number of iterations (default: 50). + +mock_data + True by default. + +micro_batch_size + Micro batch size. + +global_batch_size + Global batch size. + +recompute_granularity + For activation checkpointing. + +num_layers + For using a reduced number of layers as with proxy models. + +Previous versions +================= + +See :doc:`previous-versions/megatron-lm-history` to find documentation for previous releases +of the ``ROCm/megatron-lm`` Docker image. + +This training environment now uses Primus with Megatron as the primary +configuration. Limited support for the legacy ROCm Megatron-LM is still +available. For instructions on using ROCm Megatron-LM, see the +:doc:`megatron-lm` document. diff --git a/docs/how-to/rocm-for-ai/training/index.rst b/docs/how-to/rocm-for-ai/training/index.rst index 13213c2e9..7f2ce1d97 100644 --- a/docs/how-to/rocm-for-ai/training/index.rst +++ b/docs/how-to/rocm-for-ai/training/index.rst @@ -21,6 +21,8 @@ In this guide, you'll learn about: - Training a model + - :doc:`With Primus (Megatron-LM backend) ` + - :doc:`With Megatron-LM ` - :doc:`With PyTorch ` diff --git a/docs/sphinx/_toc.yml.in b/docs/sphinx/_toc.yml.in index 8560f0c68..db786f0c4 100644 --- a/docs/sphinx/_toc.yml.in +++ b/docs/sphinx/_toc.yml.in @@ -44,8 +44,8 @@ subtrees: title: Training subtrees: - entries: - - file: how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst - title: Train a model with Megatron-LM + - file: how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst + title: Train a model with Primus and Megatron-Core - file: how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst title: Train a model with PyTorch - file: how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst From 1d42f7cc622ccabc752a253ac5b3f288ba78ca34 Mon Sep 17 00:00:00 2001 From: Matt Williams Date: Fri, 22 Aug 2025 11:46:07 -0400 Subject: [PATCH 17/81] Deep learning frameworks edits for scale (#5189) * Deep learning frameworks edits for scale Based on https://ontrack-internal.amd.com/browse/ROCDOC-1809 * update table table * leo comments * formatting * format * update table based on feedback * header * Update machine learning page * headers * Apply suggestions from code review Co-authored-by: anisha-amd * Update .wordlist.txt * formatting * Update docs/how-to/deep-learning-rocm.rst Co-authored-by: Leo Paoletti <164940351+lpaoletti@users.noreply.github.com> --------- Co-authored-by: Matt Williams Co-authored-by: anisha-amd Co-authored-by: Leo Paoletti <164940351+lpaoletti@users.noreply.github.com> --- .wordlist.txt | 2 + docs/how-to/deep-learning-rocm.rst | 146 +++++++++++++++++++++------- docs/how-to/rocm-for-ai/install.rst | 33 ++----- docs/sphinx/_toc.yml.in | 18 ++++ 4 files changed, 140 insertions(+), 59 deletions(-) diff --git a/.wordlist.txt b/.wordlist.txt index 9d0f4d6cc..09236fa95 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -124,6 +124,7 @@ ENDPGM EPYC ESXi EoS +fas FBGEMM FFT FFTs @@ -196,6 +197,7 @@ HWE HWS Haswell Higgs +href Hyperparameters Huggingface ICD diff --git a/docs/how-to/deep-learning-rocm.rst b/docs/how-to/deep-learning-rocm.rst index beab5c1c8..16dad363c 100644 --- a/docs/how-to/deep-learning-rocm.rst +++ b/docs/how-to/deep-learning-rocm.rst @@ -2,58 +2,132 @@ :description: How to install deep learning frameworks for ROCm :keywords: deep learning, frameworks, ROCm, install, PyTorch, TensorFlow, JAX, MAGMA, DeepSpeed, ML, AI -******************************************** -Installing deep learning frameworks for ROCm -******************************************** +********************************** +Deep learning frameworks for ROCm +********************************** -ROCm provides a comprehensive ecosystem for deep learning development, including -:ref:`libraries ` for optimized deep learning operations and ROCm-aware versions of popular -deep learning frameworks and libraries such as PyTorch, TensorFlow, and JAX. ROCm works closely with these -frameworks to ensure that framework-specific optimizations take advantage of AMD accelerator and GPU architectures. +Deep learning frameworks provide environments for machine learning, training, fine-tuning, inference, and performance optimization. -The following guides provide information on compatibility and supported -features for these ROCm-enabled deep learning frameworks. +ROCm offers a complete ecosystem for developing and running deep learning applications efficiently. It also provides ROCm-compatible versions of popular frameworks and libraries, such as PyTorch, TensorFlow, JAX, and others. -* :doc:`PyTorch compatibility <../compatibility/ml-compatibility/pytorch-compatibility>` -* :doc:`TensorFlow compatibility <../compatibility/ml-compatibility/tensorflow-compatibility>` -* :doc:`JAX compatibility <../compatibility/ml-compatibility/jax-compatibility>` -* :doc:`verl compatibility <../compatibility/ml-compatibility/verl-compatibility>` -* :doc:`Stanford Megatron-LM compatibility <../compatibility/ml-compatibility/stanford-megatron-lm-compatibility>` -* :doc:`DGL compatibility <../compatibility/ml-compatibility/dgl-compatibility>` -* :doc:`Megablocks compatibility <../compatibility/ml-compatibility/megablocks-compatibility>` -* :doc:`Taichi compatibility <../compatibility/ml-compatibility/taichi-compatibility>` +The AMD ROCm organization actively contributes to open-source development and collaborates closely with framework organizations. This collaboration ensures that framework-specific optimizations effectively leverage AMD GPUs and accelerators. -This chart steps through typical installation workflows for installing deep learning frameworks for ROCm. +The table below summarizes information about ROCm-enabled deep learning frameworks. It includes details on ROCm compatibility and third-party tool support, installation steps and options, and links to GitHub resources. For a complete list of supported framework versions on ROCm, see the :doc:`Compatibility matrix <../compatibility/compatibility-matrix>` topic. -.. image:: ../data/how-to/framework_install_2024_07_04.png - :alt: Flowchart for installing ROCm-aware machine learning frameworks - :align: center +.. list-table:: + :header-rows: 1 + :widths: 5 3 6 3 -See the installation instructions to get started. + * - Framework + - Installation + - Installation options + - GitHub -* :doc:`PyTorch for ROCm ` -* :doc:`TensorFlow for ROCm ` -* :doc:`JAX for ROCm ` -* :doc:`verl for ROCm ` -* :doc:`Stanford Megatron-LM for ROCm ` -* :doc:`DGL for ROCm ` -* :doc:`Megablocks for ROCm ` -* :doc:`Taichi for ROCm ` + * - `PyTorch `_ + - .. raw:: html + + + - + - `Docker image `_ + - `Wheels package `_ + - `ROCm Base Docker image `_ + - `Upstream Docker file `_ + - .. raw:: html + + + + * - `TensorFlow `_ + - .. raw:: html + + + - + - `Docker image `_ + - `Wheels package `_ -.. note:: + - .. raw:: html + + + + * - `JAX `_ + - .. raw:: html + + + - + - `Docker image `_ + - .. raw:: html + + + + * - `verl `_ + - .. raw:: html + + + - + - `Docker image `_ + - .. raw:: html + + + + * - `Stanford Megatron-LM `_ + - .. raw:: html + + + - + - `Docker image `_ + - .. raw:: html + + + + * - `DGL `_ + - .. raw:: html + + + - + - `Docker image `_ + - .. raw:: html + + + + * - `Megablocks `_ + - .. raw:: html + + + - + - `Docker image `_ + - .. raw:: html + + + + * - `Taichi `_ + - .. raw:: html + + + - + - `Docker image `_ + - `Wheels package `_ + + - .. raw:: html + + - For guidance on installing ROCm itself, refer to :doc:`ROCm installation for Linux `. Learn how to use your ROCm deep learning environment for training, fine-tuning, inference, and performance optimization through the following guides. * :doc:`rocm-for-ai/index` -* :doc:`Training ` +* :doc:`Use ROCm for training ` + +* :doc:`Use ROCm for fine-tuning LLMs ` + +* :doc:`Use ROCm for AI inference ` + +* :doc:`Use ROCm for AI inference optimization ` + + + + -* :doc:`Fine-tuning LLMs ` -* :doc:`Inference ` -* :doc:`Inference optimization ` diff --git a/docs/how-to/rocm-for-ai/install.rst b/docs/how-to/rocm-for-ai/install.rst index d9c9c345d..6847d06b4 100644 --- a/docs/how-to/rocm-for-ai/install.rst +++ b/docs/how-to/rocm-for-ai/install.rst @@ -1,14 +1,14 @@ .. meta:: - :description: How to install ROCm and popular machine learning frameworks. + :description: How to install ROCm and popular deep learning frameworks. :keywords: ROCm, AI, LLM, train, fine-tune, FSDP, DeepSpeed, LLaMA, tutorial .. _rocm-for-ai-install: -*********************************************** -Installing ROCm and machine learning frameworks -*********************************************** +******************************************** +Installing ROCm and deep learning frameworks +******************************************** -Before getting started, install ROCm and supported machine learning frameworks. +Before getting started, install ROCm and supported deep learning frameworks. .. grid:: 1 @@ -43,29 +43,16 @@ distribution's package manager. See the following documentation resources to get If you encounter any issues during installation, refer to the :doc:`Installation troubleshooting ` guide. -Machine learning frameworks -=========================== +Deep learning frameworks +======================== -ROCm supports popular machine learning frameworks and libraries including `PyTorch +ROCm supports deep learning frameworks and libraries including `PyTorch `_, `TensorFlow -`_, `JAX `_, and `DeepSpeed -`_. +`_, `JAX `_, and more. -Review the framework installation documentation. For ease-of-use, it's recommended to use official ROCm prebuilt Docker +Review the :doc:`framework installation documentation <../deep-learning-rocm>`. For ease-of-use, it's recommended to use official ROCm prebuilt Docker images with the framework pre-installed. -* :doc:`PyTorch for ROCm ` - -* :doc:`TensorFlow for ROCm ` - -* :doc:`JAX for ROCm ` - -* :doc:`verl for ROCm ` - -* :doc:`Stanford Megatron-LM for ROCm ` - -* :doc:`DGL for ROCm ` - Next steps ========== diff --git a/docs/sphinx/_toc.yml.in b/docs/sphinx/_toc.yml.in index db786f0c4..1bb9177f0 100644 --- a/docs/sphinx/_toc.yml.in +++ b/docs/sphinx/_toc.yml.in @@ -27,6 +27,24 @@ subtrees: title: ROCm on Radeon GPUs - file: how-to/deep-learning-rocm.md title: Deep learning frameworks + subtrees: + - entries: + - file: compatibility/ml-compatibility/pytorch-compatibility.rst + title: PyTorch compatibility + - file: compatibility/ml-compatibility/tensorflow-compatibility.rst + title: TensorFlow compatibility + - file: compatibility/ml-compatibility/jax-compatibility.rst + title: JAX compatibility + - file: compatibility/ml-compatibility/verl-compatibility.rst + title: verl compatibility + - file: compatibility/ml-compatibility/stanford-megatron-lm-compatibility.rst + title: Stanford Megatron-LM compatibility + - file: compatibility/ml-compatibility/dgl-compatibility.rst + title: DGL compatibility + - file: compatibility/ml-compatibility/megablocks-compatibility.rst + title: Megablocks compatibility + - file: compatibility/ml-compatibility/taichi-compatibility.rst + title: Taichi compatibility - file: how-to/build-rocm.rst title: Build ROCm from source From 9ea9b33d1422a783dd64b8800ceed6b75409d3fc Mon Sep 17 00:00:00 2001 From: Braden Stefanuk <121893577+bstefanuk@users.noreply.github.com> Date: Tue, 26 Aug 2025 15:12:19 -0600 Subject: [PATCH 18/81] [superbuild] Configure pipeline (#5221) --- .azuredevops/components/rocm-libraries.yml | 24 +++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/.azuredevops/components/rocm-libraries.yml b/.azuredevops/components/rocm-libraries.yml index 6342b8a79..234730749 100644 --- a/.azuredevops/components/rocm-libraries.yml +++ b/.azuredevops/components/rocm-libraries.yml @@ -36,8 +36,10 @@ parameters: - gfortran - git - libdrm-dev + - liblapack-dev - libmsgpack-dev - libnuma-dev + - libopenblas-dev - ninja-build - python3-pip - python3-venv @@ -46,6 +48,8 @@ parameters: default: - joblib - "packaging>=22.0" + - pytest + - pytest-cmake - --upgrade - name: rocmDependencies type: object @@ -98,12 +102,12 @@ jobs: workspace: clean: all steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml parameters: aptPackages: ${{ parameters.aptPackages }} pipModules: ${{ parameters.pipModules }} packageManager: ${{ job.packageManager }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: @@ -134,12 +138,26 @@ jobs: rocm-libraries | ${{ job.os }} | ${{ job.target }} | $(DAY_STRING) rocm-libraries | ${{ job.os }} | ${{ job.target }} rocm-libraries | ${{ job.os }} + - task: Bash@3 + displayName: Add paths for CMake and Python site-packages binaries + inputs: + targetType: inline + script: | + USER_BASE=$(python3 -m site --user-base) + echo "##vso[task.prependpath]$USER_BASE/bin" + echo "##vso[task.setvariable variable=PytestCmakePath]$USER_BASE/share/Pytest/cmake" + displayName: Set cmake configure paths - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} extraBuildFlags: >- - -DROCM_LIBRARIES_SUPERBUILD=ON - -GNinja + -D CMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/vendor;$(PytestCmakePath) + -D CMAKE_INCLUDE_PATH=$(Agent.BuildDirectory)/rocm/llvm/include + -D CMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++ + -D CMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache + -D CMAKE_C_COMPILER_LAUNCHER=ccache + -G Ninja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: componentName: ${{ parameters.componentName }} From 9d2868416152cbbcf002d67d49b52d17638cced4 Mon Sep 17 00:00:00 2001 From: Daniel Su Date: Wed, 27 Aug 2025 10:43:07 -0400 Subject: [PATCH 19/81] [Ex CI] enable clr/hip/hipother monorepo builds (#5217) --- .azuredevops/components/HIP.yml | 154 ++++++------------ .../templates/steps/artifact-download.yml | 5 +- 2 files changed, 54 insertions(+), 105 deletions(-) diff --git a/.azuredevops/components/HIP.yml b/.azuredevops/components/HIP.yml index 8790fbf9c..0f4864df7 100644 --- a/.azuredevops/components/HIP.yml +++ b/.azuredevops/components/HIP.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: hip_clr_combined - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -35,93 +54,24 @@ parameters: type: object default: - llvm-project - -# hip and clr are tightly-coupled -# run this same template for both repos -# any changes for clr should just trigger HIP pipeline -# similarly for hipother repo, for Nvidia backend + - ROCR-Runtime - name: jobMatrix type: object default: buildJobs: - - { os: ubuntu2204, packageManager: apt } - - { os: almalinux8, packageManager: dnf } + - { os: ubuntu2204, packageManager: apt, platform: amd } + - { os: ubuntu2204, packageManager: apt, platform: nvidia } + - { os: almalinux8, packageManager: dnf, platform: amd } + - { os: almalinux8, packageManager: dnf, platform: nvidia } -# HIP with AMD backend jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: hip_clr_combined_${{ job.os }}_amd - pool: - vmImage: 'ubuntu-22.04' - ${{ if eq(job.os, 'almalinux8') }}: - container: - image: rocmexternalcicd.azurecr.io/manylinux228:latest - endpoint: ContainerService3 - variables: - - group: common - - template: /.azuredevops/variables-global.yml - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - packageManager: ${{ job.packageManager }} - # checkout triggering repo (either HIP or clr) - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} - # if this is triggered by HIP repo, matching repo is clr - # if this is triggered by clr repo, matching repo is HIP - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: matching_repo - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: hipother_repo - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmDependenciesAMD }} - aggregatePipeline: ${{ parameters.aggregatePipeline }} - os: ${{ job.os }} - # compile clr - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml - parameters: - componentName: clr - cmakeBuildDir: '$(Build.SourcesDirectory)/clr/build' - cmakeSourceDir: '$(Build.SourcesDirectory)/clr' - os: ${{ job.os }} - useAmdclang: false - extraBuildFlags: >- - -DHIP_COMMON_DIR=$(Build.SourcesDirectory)/HIP - -DHIP_PLATFORM=amd - -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm - -DROCM_PATH=$(Agent.BuildDirectory)/rocm - -DHIPCC_BIN_DIR=$(Agent.BuildDirectory)/rocm/bin - -DCLR_BUILD_HIP=ON - -DCLR_BUILD_OCL=ON - -GNinja - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml - parameters: - artifactName: amd - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml - parameters: - artifactName: amd - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml - # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - # parameters: - # aptPackages: ${{ parameters.aptPackages }} - # pipModules: ${{ parameters.pipModules }} - # environment: amd - -# HIP with Nvidia backend -- ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: hip_clr_combined_${{ job.os }}_nvidia + - job: ${{ parameters.componentName }}_${{ job.os }}_${{ job.platform }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: vmImage: 'ubuntu-22.04' ${{ if eq(job.os, 'almalinux8') }}: @@ -140,49 +90,45 @@ jobs: pipModules: ${{ parameters.pipModules }} packageManager: ${{ job.packageManager }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - # checkout triggering repo (either HIP or clr) + # full checkout of rocm-systems superrepo, we need clr, hip, and hipother - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} - # if this is triggered by HIP repo, matching repo is clr - # if this is triggered by clr repo, matching repo is HIP - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: matching_repo - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: hipother_repo + # sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmDependenciesNvidia }} aggregatePipeline: ${{ parameters.aggregatePipeline }} os: ${{ job.os }} - - script: 'ls -1R $(Agent.BuildDirectory)/rocm' - displayName: 'Artifact listing' - # compile clr + ${{ if eq(job.platform, 'amd') }}: + dependencyList: ${{ parameters.rocmDependenciesAMD }} + ${{ elseif eq(job.platform, 'nvidia') }}: + dependencyList: ${{ parameters.rocmDependenciesNvidia }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: componentName: clr - cmakeBuildDir: '$(Build.SourcesDirectory)/clr/build' - cmakeSourceDir: '$(Build.SourcesDirectory)/clr' + cmakeBuildDir: $(Agent.BuildDirectory)/s/projects/clr/build + cmakeSourceDir: $(Agent.BuildDirectory)/s/projects/clr os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- - -DHIP_COMMON_DIR=$(Build.SourcesDirectory)/HIP - -DHIP_PLATFORM=nvidia + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DROCM_PATH=$(Agent.BuildDirectory)/rocm -DHIPCC_BIN_DIR=$(Agent.BuildDirectory)/rocm/bin + -DHIP_COMMON_DIR=$(Agent.BuildDirectory)/s/projects/hip + -DHIPNV_DIR=$(Agent.BuildDirectory)/s/projects/hipother/hipnv + -DHIP_PLATFORM=${{ job.platform }} -DCLR_BUILD_HIP=ON - -DCLR_BUILD_OCL=OFF - -DHIPNV_DIR=$(Build.SourcesDirectory)/hipother/hipnv + -DCLR_BUILD_OCL=ON -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml + parameters: + artifactName: ${{ job.platform }} + os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: - artifactName: nvidia + artifactName: ${{ job.platform }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml - # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - # parameters: - # aptPackages: ${{ parameters.aptPackages }} - # pipModules: ${{ parameters.pipModules }} - # environment: nvidia diff --git a/.azuredevops/templates/steps/artifact-download.yml b/.azuredevops/templates/steps/artifact-download.yml index 7ff67c6a8..e5445fe4e 100644 --- a/.azuredevops/templates/steps/artifact-download.yml +++ b/.azuredevops/templates/steps/artifact-download.yml @@ -25,7 +25,10 @@ steps: - task: DownloadPipelineArtifact@2 displayName: Download ${{ parameters.componentName }} inputs: - itemPattern: '**/*${{ parameters.componentName }}*${{ parameters.fileFilter }}*' + ${{ if eq(parameters.componentName, 'clr') }}: + itemPattern: '**/*${{ parameters.componentName }}*${{ parameters.fileFilter }}*amd*' # filter out nvidia clr artifacts + ${{ else }}: + itemPattern: '**/*${{ parameters.componentName }}*${{ parameters.fileFilter }}*' targetPath: '$(Pipeline.Workspace)/d' allowPartiallySucceededBuilds: true ${{ if parameters.aggregatePipeline }}: From 151a4bd7bc7206e4ec2c2cb383e934c54ebddb2a Mon Sep 17 00:00:00 2001 From: Daniel Su Date: Wed, 27 Aug 2025 13:05:26 -0400 Subject: [PATCH 20/81] [Ex CI] add retries to potentially flaky steps (#5175) --- .azuredevops/components/MIOpen.yml | 2 + .azuredevops/components/Tensile.yml | 1 + .azuredevops/components/rocPyDecode.yml | 1 + .azuredevops/nightly/pytorch.yml | 1 + .azuredevops/nightly/rocm-nightly.yml | 3 +- .../templates/steps/artifact-download.yml | 1 + .../templates/steps/dependencies-apt.yml | 19 ++---- .../steps/dependencies-aqlprofile.yml | 67 ++++++------------- .../templates/steps/dependencies-dnf.yml | 19 ++++-- .../templates/steps/dependencies-other.yml | 1 + .../templates/steps/dependencies-vendor.yml | 1 + .../steps/local-artifact-download.yml | 1 + .../templates/steps/miopen-get-ck-build.yml | 1 + 13 files changed, 52 insertions(+), 66 deletions(-) diff --git a/.azuredevops/components/MIOpen.yml b/.azuredevops/components/MIOpen.yml index b606005c7..cb2bd8c60 100644 --- a/.azuredevops/components/MIOpen.yml +++ b/.azuredevops/components/MIOpen.yml @@ -150,6 +150,7 @@ jobs: downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - task: Bash@3 displayName: Build and install other dependencies + retryCountOnTaskFailure: 3 inputs: targetType: inline workingDirectory: $(Agent.BuildDirectory)/s @@ -230,6 +231,7 @@ jobs: downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - task: Bash@3 displayName: Build and install other dependencies + retryCountOnTaskFailure: 3 inputs: targetType: inline workingDirectory: $(Agent.BuildDirectory)/s diff --git a/.azuredevops/components/Tensile.yml b/.azuredevops/components/Tensile.yml index f74cdc56d..3b31727ce 100644 --- a/.azuredevops/components/Tensile.yml +++ b/.azuredevops/components/Tensile.yml @@ -171,6 +171,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - task: DownloadPipelineArtifact@2 displayName: 'Download Pipeline Wheel Files' + retryCountOnTaskFailure: 3 inputs: itemPattern: '**/*${{ job.os }}*.whl' targetPath: $(Agent.BuildDirectory) diff --git a/.azuredevops/components/rocPyDecode.yml b/.azuredevops/components/rocPyDecode.yml index 885b5b51c..6e85a43ef 100644 --- a/.azuredevops/components/rocPyDecode.yml +++ b/.azuredevops/components/rocPyDecode.yml @@ -190,6 +190,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - task: DownloadPipelineArtifact@2 displayName: 'Download Pipeline Wheel Files' + retryCountOnTaskFailure: 3 inputs: itemPattern: '**/*.whl' targetPath: $(Agent.BuildDirectory) diff --git a/.azuredevops/nightly/pytorch.yml b/.azuredevops/nightly/pytorch.yml index 19daf1d8c..995206f7d 100644 --- a/.azuredevops/nightly/pytorch.yml +++ b/.azuredevops/nightly/pytorch.yml @@ -397,6 +397,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - task: DownloadPipelineArtifact@2 displayName: 'Download Pipeline Wheel Files' + retryCountOnTaskFailure: 3 inputs: itemPattern: '**/*.whl' targetPath: $(Agent.BuildDirectory) diff --git a/.azuredevops/nightly/rocm-nightly.yml b/.azuredevops/nightly/rocm-nightly.yml index 75f64304b..5b28e12ae 100644 --- a/.azuredevops/nightly/rocm-nightly.yml +++ b/.azuredevops/nightly/rocm-nightly.yml @@ -93,7 +93,7 @@ schedules: jobs: - ${{ each job in parameters.jobList }}: - job: nightly_${{ job.os }}_${{ job.target }} - timeoutInMinutes: 90 + timeoutInMinutes: 120 variables: - group: common - template: /.azuredevops/variables-global.yml @@ -226,6 +226,7 @@ jobs: cat Dockerfile - task: Docker@2 displayName: Build and upload Docker image + retryCountOnTaskFailure: 3 inputs: containerRegistry: ContainerService3 repository: 'nightly-${{ job.os }}-${{ job.target }}' diff --git a/.azuredevops/templates/steps/artifact-download.yml b/.azuredevops/templates/steps/artifact-download.yml index e5445fe4e..03855af49 100644 --- a/.azuredevops/templates/steps/artifact-download.yml +++ b/.azuredevops/templates/steps/artifact-download.yml @@ -24,6 +24,7 @@ parameters: steps: - task: DownloadPipelineArtifact@2 displayName: Download ${{ parameters.componentName }} + retryCountOnTaskFailure: 3 inputs: ${{ if eq(parameters.componentName, 'clr') }}: itemPattern: '**/*${{ parameters.componentName }}*${{ parameters.fileFilter }}*amd*' # filter out nvidia clr artifacts diff --git a/.azuredevops/templates/steps/dependencies-apt.yml b/.azuredevops/templates/steps/dependencies-apt.yml index a73dc3faa..7a35dcd32 100644 --- a/.azuredevops/templates/steps/dependencies-apt.yml +++ b/.azuredevops/templates/steps/dependencies-apt.yml @@ -10,6 +10,7 @@ steps: - ${{ if eq(parameters.registerROCmPackages, true) }}: - task: Bash@3 displayName: 'Register AMDGPU & ROCm repos (apt)' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: | @@ -20,7 +21,8 @@ steps: echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600 sudo apt update - task: Bash@3 - displayName: 'sudo apt-get update' + displayName: 'APT update and install packages' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: | @@ -28,15 +30,6 @@ steps: echo "deb http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list.d/default.list echo "deb http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list.d/default.list echo "deb http://archive.ubuntu.com/ubuntu/ jammy-security main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list.d/default.list - sudo DEBIAN_FRONTEND=noninteractive apt-get --yes update -- task: Bash@3 - displayName: 'sudo apt-get fix' - inputs: - targetType: inline - script: sudo DEBIAN_FRONTEND=noninteractive apt-get --yes --fix-broken install -- ${{ if gt(length(parameters.aptPackages), 0) }}: - - task: Bash@3 - displayName: 'sudo apt-get install ...' - inputs: - targetType: inline - script: sudo DEBIAN_FRONTEND=noninteractive apt-get --yes --fix-missing install ${{ join(' ', parameters.aptPackages) }} + sudo DEBIAN_FRONTEND=noninteractive apt-get --yes update && \ + sudo DEBIAN_FRONTEND=noninteractive apt-get --yes --fix-broken install && \ + sudo DEBIAN_FRONTEND=noninteractive apt-get --yes --fix-missing install ${{ join(' ', parameters.aptPackages) }} diff --git a/.azuredevops/templates/steps/dependencies-aqlprofile.yml b/.azuredevops/templates/steps/dependencies-aqlprofile.yml index 9c3cf836c..4ff4675d1 100644 --- a/.azuredevops/templates/steps/dependencies-aqlprofile.yml +++ b/.azuredevops/templates/steps/dependencies-aqlprofile.yml @@ -5,51 +5,28 @@ parameters: steps: - task: Bash@3 - displayName: Get aqlprofile package name - inputs: - targetType: inline - ${{ if eq(parameters.os, 'ubuntu2204') }}: - script: | - export packageName=$(curl -s https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION)/pool/main/h/hsa-amd-aqlprofile/ | grep -oP "href=\"\K[^\"]*$(lsb_release -rs)[^\"]*\.deb") - echo "##vso[task.setvariable variable=packageName;isreadonly=true]$packageName" - ${{ if eq(parameters.os, 'almalinux8') }}: - script: | - export packageName=$(curl -s https://repo.radeon.com/rocm/rhel8/$(REPO_RADEON_VERSION)/main/ | grep -oP "hsa-amd-aqlprofile-[^\"]+\.rpm" | head -n1) - echo "##vso[task.setvariable variable=packageName;isreadonly=true]$packageName" -- task: Bash@3 - displayName: 'Download aqlprofile' - inputs: - targetType: inline - workingDirectory: '$(Pipeline.Workspace)' - ${{ if eq(parameters.os, 'ubuntu2204') }}: - script: wget -nv https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION)/pool/main/h/hsa-amd-aqlprofile/$(packageName) - ${{ if eq(parameters.os, 'almalinux8') }}: - script: wget -nv https://repo.radeon.com/rocm/rhel8/$(REPO_RADEON_VERSION)/main/$(packageName) -- task: Bash@3 - displayName: 'Extract aqlprofile' - inputs: - targetType: inline - workingDirectory: '$(Pipeline.Workspace)' - ${{ if eq(parameters.os, 'ubuntu2204') }}: - script: | - mkdir hsa-amd-aqlprofile - dpkg-deb -R $(packageName) hsa-amd-aqlprofile - ${{ if eq(parameters.os, 'almalinux8') }}: - script: | - mkdir hsa-amd-aqlprofile - sudo dnf -y install rpm-build cpio - rpm2cpio $(packageName) | (cd hsa-amd-aqlprofile && cpio -idmv) -- task: Bash@3 - displayName: 'Copy aqlprofile files' + displayName: Download and install aqlprofile + retryCountOnTaskFailure: 3 inputs: targetType: inline + workingDirectory: $(Agent.BuildDirectory) script: | - mkdir -p $(Agent.BuildDirectory)/rocm - cp -R hsa-amd-aqlprofile/opt/rocm-*/* $(Agent.BuildDirectory)/rocm - workingDirectory: '$(Pipeline.Workspace)' -- task: Bash@3 - displayName: 'Clean up aqlprofile' - inputs: - targetType: inline - script: rm -rf hsa-amd-aqlprofile $(packageName) - workingDirectory: '$(Pipeline.Workspace)' + set -e + if [ "${{ parameters.os }}" = "ubuntu2204" ]; then + packageName=$(curl -s https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION)/pool/main/h/hsa-amd-aqlprofile/ | grep -oP "href=\"\K[^\"]*$(lsb_release -rs)[^\"]*\.deb") && \ + wget -nv https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION)/pool/main/h/hsa-amd-aqlprofile/$packageName && \ + mkdir -p hsa-amd-aqlprofile && \ + dpkg-deb -R $packageName hsa-amd-aqlprofile + elif [ "${{ parameters.os }}" = "almalinux8" ]; then + sudo dnf -y install rpm-build cpio && \ + packageName=$(curl -s https://repo.radeon.com/rocm/rhel8/$(REPO_RADEON_VERSION)/main/ | grep -oP "hsa-amd-aqlprofile-[^\"]+\.rpm" | head -n1) && \ + wget -nv https://repo.radeon.com/rocm/rhel8/$(REPO_RADEON_VERSION)/main/$packageName && \ + mkdir -p hsa-amd-aqlprofile && \ + rpm2cpio $packageName | (cd hsa-amd-aqlprofile && cpio -idmv) + else + echo "Unsupported OS: ${{ parameters.os }}" + exit 1 + fi && \ + mkdir -p $(Agent.BuildDirectory)/rocm && \ + cp -R hsa-amd-aqlprofile/opt/rocm-*/* $(Agent.BuildDirectory)/rocm && \ + rm -rf hsa-amd-aqlprofile $packageName diff --git a/.azuredevops/templates/steps/dependencies-dnf.yml b/.azuredevops/templates/steps/dependencies-dnf.yml index 432408cf7..81d2a045e 100644 --- a/.azuredevops/templates/steps/dependencies-dnf.yml +++ b/.azuredevops/templates/steps/dependencies-dnf.yml @@ -89,6 +89,7 @@ steps: - ${{ if eq(parameters.registerROCmPackages, true) }}: - task: Bash@3 displayName: 'Register AMDGPU & ROCm repos (dnf)' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: | @@ -109,12 +110,13 @@ steps: sudo dnf makecache - task: Bash@3 displayName: 'Install base dnf packages' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: | - sudo dnf config-manager --set-enabled powertools # rpm fusion free repo for some dependencies - sudo dnf -y install https://download1.rpmfusion.org/free/el/rpmfusion-free-release-8.noarch.rpm + sudo dnf config-manager --set-enabled powertools && \ + sudo dnf -y install https://download1.rpmfusion.org/free/el/rpmfusion-free-release-8.noarch.rpm && \ sudo dnf -y install ${{ join(' ', parameters.basePackages) }} - task: Bash@3 displayName: 'Check gcc environment' @@ -128,6 +130,7 @@ steps: g++ -print-file-name=libstdc++.so - task: Bash@3 displayName: 'Set python 3.11 as default' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: | @@ -142,18 +145,20 @@ steps: - ${{ if eq(pkg, 'ninja-build') }}: - task: Bash@3 displayName: 'Install ninja 1.11.1' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: | - curl -LO https://github.com/ninja-build/ninja/releases/download/v1.11.1/ninja-linux.zip - sudo dnf -y install unzip - unzip ninja-linux.zip - sudo mv ninja /usr/local/bin/ninja - sudo chmod +x /usr/local/bin/ninja + sudo dnf -y install unzip && \ + curl -LO https://github.com/ninja-build/ninja/releases/download/v1.11.1/ninja-linux.zip && \ + unzip ninja-linux.zip && \ + sudo mv ninja /usr/local/bin/ninja && \ + sudo chmod +x /usr/local/bin/ninja && \ echo "##vso[task.prependpath]/usr/local/bin" - ${{ if ne(parameters.aptToDnfMap[pkg], '') }}: - task: Bash@3 displayName: 'dnf install ${{ parameters.aptToDnfMap[pkg] }}' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: | diff --git a/.azuredevops/templates/steps/dependencies-other.yml b/.azuredevops/templates/steps/dependencies-other.yml index 177bbac8a..b39b32945 100644 --- a/.azuredevops/templates/steps/dependencies-other.yml +++ b/.azuredevops/templates/steps/dependencies-other.yml @@ -27,6 +27,7 @@ steps: - ${{ if gt(length(parameters.pipModules), 0) }}: - task: Bash@3 displayName: 'pip install ...' + retryCountOnTaskFailure: 3 inputs: targetType: inline script: python3 -m pip install -v --force-reinstall ${{ join(' ', parameters.pipModules) }} diff --git a/.azuredevops/templates/steps/dependencies-vendor.yml b/.azuredevops/templates/steps/dependencies-vendor.yml index 571877d1e..8d885b553 100644 --- a/.azuredevops/templates/steps/dependencies-vendor.yml +++ b/.azuredevops/templates/steps/dependencies-vendor.yml @@ -17,6 +17,7 @@ steps: - ${{ each dependency in parameters.dependencyList }}: - task: DownloadPipelineArtifact@2 displayName: Download ${{ dependency }} + retryCountOnTaskFailure: 3 inputs: project: ROCm-CI buildType: specific diff --git a/.azuredevops/templates/steps/local-artifact-download.yml b/.azuredevops/templates/steps/local-artifact-download.yml index 24d00ce0e..d9c9fe328 100644 --- a/.azuredevops/templates/steps/local-artifact-download.yml +++ b/.azuredevops/templates/steps/local-artifact-download.yml @@ -33,6 +33,7 @@ parameters: steps: - task: DownloadPipelineArtifact@2 displayName: Download ${{ parameters.preTargetFilter}}*${{ parameters.os }}_${{ parameters.gpuTarget}}*${{ parameters.postTargetFilter}} + retryCountOnTaskFailure: 3 inputs: ${{ if eq(parameters.buildType, 'specific') }}: buildType: specific diff --git a/.azuredevops/templates/steps/miopen-get-ck-build.yml b/.azuredevops/templates/steps/miopen-get-ck-build.yml index 6c6d44407..03803e3ee 100644 --- a/.azuredevops/templates/steps/miopen-get-ck-build.yml +++ b/.azuredevops/templates/steps/miopen-get-ck-build.yml @@ -7,6 +7,7 @@ steps: - task: Bash@3 name: downloadCKBuild displayName: Download specific CK build + retryCountOnTaskFailure: 3 env: CXX: $(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++ CC: $(Agent.BuildDirectory)/rocm/llvm/bin/amdclang From eac9772fff0f485df2d06fb61330e45413406d28 Mon Sep 17 00:00:00 2001 From: Daniel Su Date: Wed, 27 Aug 2025 13:05:51 -0400 Subject: [PATCH 21/81] [Ex CI] add temporary downstream path from rocBLAS to hipBLAS (#5184) --- .azuredevops/components/rocBLAS.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.azuredevops/components/rocBLAS.yml b/.azuredevops/components/rocBLAS.yml index 4fb70d380..6aab7ebb3 100644 --- a/.azuredevops/components/rocBLAS.yml +++ b/.azuredevops/components/rocBLAS.yml @@ -115,6 +115,13 @@ parameters: # buildDependsOn: # - rocBLAS_build # - rocPRIM_build + # temporary rocblas->hipblas downstream path while the SOLVERs are disabled + - hipBLAS: + name: hipBLAS + sparseCheckoutDir: projects/hipblas + skipUnifiedBuild: 'false' + buildDependsOn: + - rocBLAS_build jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: From 977e9c22950a47f1bbe8aa139e000c787cc29387 Mon Sep 17 00:00:00 2001 From: Daniel Su Date: Wed, 27 Aug 2025 13:06:08 -0400 Subject: [PATCH 22/81] [Ex CI] change hip-clr pipeline ID (#5230) --- .azuredevops/templates/steps/dependencies-rocm.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 124ddd68b..86d1b58e9 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -47,8 +47,8 @@ parameters: developBranch: aomp-dev hasGpuTarget: false clr: - pipelineId: 145 - developBranch: amd-staging + pipelineId: 335 + developBranch: develop hasGpuTarget: false composable_kernel: pipelineId: 86 @@ -59,8 +59,8 @@ parameters: developBranch: rocm hasGpuTarget: false HIP: - pipelineId: 93 - developBranch: amd-staging + pipelineId: 335 + developBranch: develop hasGpuTarget: false hip-tests: pipelineId: 233 From c34fddb26a296d5b95aa98ade938bf301b38f7d4 Mon Sep 17 00:00:00 2001 From: David Dixon <165835255+davidd-amd@users.noreply.github.com> Date: Tue, 2 Sep 2025 13:28:19 -0600 Subject: [PATCH 23/81] Add boost deps (#5235) --- .azuredevops/components/hipBLASLt.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.azuredevops/components/hipBLASLt.yml b/.azuredevops/components/hipBLASLt.yml index d8bfbd0bb..b2633e84d 100644 --- a/.azuredevops/components/hipBLASLt.yml +++ b/.azuredevops/components/hipBLASLt.yml @@ -35,6 +35,8 @@ parameters: - ccache - gfortran - git + - libboost-filesystem-dev + - libboost-program-options-dev - libdrm-dev - liblapack-dev - libmsgpack-dev From b6647dfb2231e8991fb2ee5e80ae706b97080b16 Mon Sep 17 00:00:00 2001 From: David Dixon <165835255+davidd-amd@users.noreply.github.com> Date: Wed, 3 Sep 2025 11:35:53 -0600 Subject: [PATCH 24/81] Add spdlog source builds (#5247) --- .azuredevops/dependencies/spdlog.yml | 64 ++++++++++++++++++++++++++++ .azuredevops/tag-builds/spdlog.yml | 16 +++++++ 2 files changed, 80 insertions(+) create mode 100644 .azuredevops/dependencies/spdlog.yml create mode 100644 .azuredevops/tag-builds/spdlog.yml diff --git a/.azuredevops/dependencies/spdlog.yml b/.azuredevops/dependencies/spdlog.yml new file mode 100644 index 000000000..74f997fb5 --- /dev/null +++ b/.azuredevops/dependencies/spdlog.yml @@ -0,0 +1,64 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + - libfmt-dev + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt, spdlogVersion: "v1.9.2"} + - { os: almalinux8, packageManager: dnf, spdlogVersion: "v1.5.0"} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: spdlog_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - task: Bash@3 + displayName: Clone spdlog ${{ job.spdlogVersion }} + inputs: + targetType: inline + script: git clone https://github.com/gabime/spdlog.git -b ${{ job.spdlogVersion }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/spdlog/build + cmakeSourceDir: $(Agent.BuildDirectory)/spdlog + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_BUILD_TYPE=Release + -DSPDLOG_USE_STD_FORMAT=OFF + -DSPDLOG_FMT_EXTERNAL_HO=ON + -DSPDLOG_INSTALL=ON + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/tag-builds/spdlog.yml b/.azuredevops/tag-builds/spdlog.yml new file mode 100644 index 000000000..0d8de151e --- /dev/null +++ b/.azuredevops/tag-builds/spdlog.yml @@ -0,0 +1,16 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/spdlog.yml From df3ea802908d1815b6f53271a59cc176337feba0 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 20 Aug 2025 16:52:43 -0400 Subject: [PATCH 25/81] Enable Roctracer Monorepo --- .azuredevops/components/roctracer.yml | 130 ++++++++++++++++---------- 1 file changed, 80 insertions(+), 50 deletions(-) diff --git a/.azuredevops/components/roctracer.yml b/.azuredevops/components/roctracer.yml index d00c03ecc..503cd18bd 100644 --- a/.azuredevops/components/roctracer.yml +++ b/.azuredevops/components/roctracer.yml @@ -8,6 +8,22 @@ parameters: - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -65,6 +81,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -87,6 +107,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} @@ -94,6 +115,8 @@ jobs: gpuTarget: ${{ job.target }} aggregatePipeline: ${{ parameters.aggregatePipeline }} os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} # the linker flags will not affect ubuntu2204 builds as the paths do not exist - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: @@ -109,10 +132,13 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml @@ -123,53 +149,57 @@ jobs: # gpuTarget: ${{ job.target }} # registerROCmPackages: true -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} - dependsOn: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - packageManager: ${{ job.packageManager }} - registerROCmPackages: true - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: roctracer - testExecutable: $(Agent.BuildDirectory)/rocm/share/roctracer/run_tests.sh - testParameters: '' - testDir: $(Agent.BuildDirectory) - testPublishResults: false - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - gpuTarget: ${{ job.target }} - registerROCmPackages: true +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} + dependsOn: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} + registerROCmPackages: true + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + preTargetFilter: ${{ parameters.componentName }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testExecutable: $(Agent.BuildDirectory)/rocm/share/roctracer/run_tests.sh + testParameters: '' + testDir: $(Agent.BuildDirectory) + testPublishResults: false + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} + registerROCmPackages: true From 2628812fc42be2574d47a938be248d8352062121 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Thu, 21 Aug 2025 18:18:00 -0400 Subject: [PATCH 26/81] [Ex CI] Enable rocprofiler-compute monorepo --- .../components/rocprofiler-compute.yml | 183 ++++++++++-------- 1 file changed, 107 insertions(+), 76 deletions(-) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index ed83b277a..6f307747f 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocPRIM - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -77,7 +96,11 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: rocprofiler_compute_build_${{ job.target }} + - job: ${{ parameters.componentName }}_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -94,15 +117,19 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: extraBuildFlags: >- -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml @@ -111,78 +138,82 @@ jobs: # pipModules: ${{ parameters.pipModules }} # gpuTarget: ${{ job.target }} -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocprofiler_compute_test_${{ job.target }} - timeoutInMinutes: 120 - dependsOn: rocprofiler_compute_build_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - - name: PYTHON_VERSION - value: 3.10 - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - gpuTarget: ${{ job.target }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - - task: Bash@3 - displayName: Add en_US.UTF-8 locale - inputs: - targetType: inline - script: | - sudo locale-gen en_US.UTF-8 - sudo update-locale - locale -a - - task: Bash@3 - displayName: Add ROCm binaries to PATH - inputs: - targetType: inline - script: | - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin" - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml - parameters: - extraBuildFlags: >- - -DCMAKE_HIP_ARCHITECTURES=${{ job.target }} - -DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang - -DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake/hip - -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm - -DROCM_PATH=$(Agent.BuildDirectory)/rocm - -DCMAKE_BUILD_TYPE=Release - -DENABLE_TESTS=ON - -DINSTALL_TESTS=ON - -GNinja - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocprofiler-compute - testDir: $(Build.BinariesDirectory)/libexec/rocprofiler-compute - testExecutable: ROCM_PATH=$(Agent.BuildDirectory)/rocm ctest - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - gpuTarget: ${{ job.target }} +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ${{ parameters.componentName }}_test_${{ job.target }} + timeoutInMinutes: 120 + dependsOn: ${{ parameters.componentName }}_build_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + - name: PYTHON_VERSION + value: 3.10 + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + preTargetFilter: ${{ parameters.componentName }} + gpuTarget: ${{ job.target }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - task: Bash@3 + displayName: Add en_US.UTF-8 locale + inputs: + targetType: inline + script: | + sudo locale-gen en_US.UTF-8 + sudo update-locale + locale -a + - task: Bash@3 + displayName: Add ROCm binaries to PATH + inputs: + targetType: inline + script: | + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin" + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + extraBuildFlags: >- + -DCMAKE_HIP_ARCHITECTURES=${{ job.target }} + -DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang + -DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake/hip + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DROCM_PATH=$(Agent.BuildDirectory)/rocm + -DCMAKE_BUILD_TYPE=Release + -DENABLE_TESTS=ON + -DINSTALL_TESTS=ON + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: $(Build.BinariesDirectory)/libexec/rocprofiler-compute + testExecutable: ROCM_PATH=$(Agent.BuildDirectory)/rocm ctest + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} From ceabccad83b83ffc3a2a74eddec1138f2947dee8 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Mon, 25 Aug 2025 11:10:20 -0400 Subject: [PATCH 27/81] Fixed componentName --- .azuredevops/components/rocprofiler-compute.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index 6f307747f..b80939806 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -1,7 +1,7 @@ parameters: - name: componentName type: string - default: rocPRIM + default: rocprofiler-compute - name: checkoutRepo type: string default: 'self' From b0abc43c469ab14cb19be5f2dd87e61897432109 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Mon, 25 Aug 2025 11:22:18 -0400 Subject: [PATCH 28/81] Added sparseCheckout to testjob template --- .azuredevops/components/rocprofiler-compute.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index b80939806..4ccb47c65 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -166,6 +166,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ component.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml parameters: preTargetFilter: ${{ parameters.componentName }} From bff5c4a955542d97b9394a585684000f378706de Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Mon, 25 Aug 2025 11:27:05 -0400 Subject: [PATCH 29/81] Fixed sparseCheckoutDir --- .azuredevops/components/rocprofiler-compute.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index 4ccb47c65..71cee6f30 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -166,7 +166,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} - sparseCheckoutDir: ${{ component.sparseCheckoutDir }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml parameters: preTargetFilter: ${{ parameters.componentName }} From e68d9e9ce2a0e7848e6fb4b41e55af4edaef9211 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Mon, 25 Aug 2025 11:32:36 -0400 Subject: [PATCH 30/81] Update rocprofiler-compute.yml --- .azuredevops/components/rocprofiler-compute.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index 71cee6f30..bb3282be0 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -1,7 +1,7 @@ parameters: - name: componentName type: string - default: rocprofiler-compute + default: rocprofiler_compute - name: checkoutRepo type: string default: 'self' From c486c39b50fe27c41aebdcf014555457a3495cd4 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Mon, 25 Aug 2025 11:36:28 -0400 Subject: [PATCH 31/81] Update rocprofiler-compute.yml Reverted Component name and updated job names --- .azuredevops/components/rocprofiler-compute.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index bb3282be0..d1bbaf3f6 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -1,7 +1,7 @@ parameters: - name: componentName type: string - default: rocprofiler_compute + default: rocprofiler-compute - name: checkoutRepo type: string default: 'self' @@ -96,7 +96,7 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: ${{ parameters.componentName }}_build_${{ job.target }} + - job: rocprofiler_compute_build_${{ job.target }} ${{ if parameters.buildDependsOn }}: dependsOn: - ${{ each build in parameters.buildDependsOn }}: @@ -140,7 +140,7 @@ jobs: - ${{ if eq(parameters.unifiedBuild, False) }}: - ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ${{ parameters.componentName }}_test_${{ job.target }} + - job: rocprofiler_compute_test_${{ job.target }} timeoutInMinutes: 120 dependsOn: ${{ parameters.componentName }}_build_${{ job.target }} condition: From 07cb61f96942334f5a152cfdb252188dabbcf855 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Mon, 25 Aug 2025 11:38:53 -0400 Subject: [PATCH 32/81] Update testjob dependsOn --- .azuredevops/components/rocprofiler-compute.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index d1bbaf3f6..d15414469 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -142,7 +142,7 @@ jobs: - ${{ each job in parameters.jobMatrix.testJobs }}: - job: rocprofiler_compute_test_${{ job.target }} timeoutInMinutes: 120 - dependsOn: ${{ parameters.componentName }}_build_${{ job.target }} + dependsOn: rocprofiler_compute_build_${{ job.target }} condition: and(succeeded(), eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), From f1be2d291a2651805e4151642aac10af07a66cf4 Mon Sep 17 00:00:00 2001 From: David Dixon <165835255+davidd-amd@users.noreply.github.com> Date: Wed, 3 Sep 2025 13:26:18 -0600 Subject: [PATCH 33/81] Add fmtlib version that works with spdlog (#5249) --- .azuredevops/dependencies/fmtlib.yml | 67 +++++++++++++++++++ .azuredevops/dependencies/spdlog.yml | 17 +++-- .azuredevops/tag-builds/fmtlib.yml | 23 +++++++ .azuredevops/tag-builds/spdlog.yml | 9 ++- .../templates/steps/dependencies-vendor.yml | 2 + 5 files changed, 112 insertions(+), 6 deletions(-) create mode 100644 .azuredevops/dependencies/fmtlib.yml create mode 100644 .azuredevops/tag-builds/fmtlib.yml diff --git a/.azuredevops/dependencies/fmtlib.yml b/.azuredevops/dependencies/fmtlib.yml new file mode 100644 index 000000000..c1ee707c4 --- /dev/null +++ b/.azuredevops/dependencies/fmtlib.yml @@ -0,0 +1,67 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: fmtlibVersion + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + - libfmt-dev + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: fmtlib_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - task: Bash@3 + displayName: Clone fmtlib ${{ parameters.fmtlibVersion }} + inputs: + targetType: inline + script: git clone https://github.com/fmtlib/fmt.git -b ${{ parameters.fmtlibVersion }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/fmt/build + cmakeSourceDir: $(Agent.BuildDirectory)/fmt + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_BUILD_TYPE=Release + -DFMT_SYSTEM_HEADERS=ON + -DFMT_INSTALL=ON + -DFMT_TEST=OFF + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/dependencies/spdlog.yml b/.azuredevops/dependencies/spdlog.yml index 74f997fb5..f561f8a52 100644 --- a/.azuredevops/dependencies/spdlog.yml +++ b/.azuredevops/dependencies/spdlog.yml @@ -5,20 +5,22 @@ parameters: - name: checkoutRef type: string default: '' +- name: spdlogVersion + type: string + default: '' - name: aptPackages type: object default: - cmake - git - ninja-build - - libfmt-dev - name: jobMatrix type: object default: buildJobs: - - { os: ubuntu2204, packageManager: apt, spdlogVersion: "v1.9.2"} - - { os: almalinux8, packageManager: dnf, spdlogVersion: "v1.5.0"} + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: @@ -41,11 +43,15 @@ jobs: aptPackages: ${{ parameters.aptPackages }} packageManager: ${{ job.packageManager }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-vendor.yml + parameters: + dependencyList: + - fmtlib - task: Bash@3 - displayName: Clone spdlog ${{ job.spdlogVersion }} + displayName: Clone spdlog ${{ parameters.spdlogVersion }} inputs: targetType: inline - script: git clone https://github.com/gabime/spdlog.git -b ${{ job.spdlogVersion }} + script: git clone https://github.com/gabime/spdlog.git -b ${{ parameters.spdlogVersion }} workingDirectory: $(Agent.BuildDirectory) - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: @@ -54,6 +60,7 @@ jobs: cmakeSourceDir: $(Agent.BuildDirectory)/spdlog useAmdclang: false extraBuildFlags: >- + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/vendor -DCMAKE_BUILD_TYPE=Release -DSPDLOG_USE_STD_FORMAT=OFF -DSPDLOG_FMT_EXTERNAL_HO=ON diff --git a/.azuredevops/tag-builds/fmtlib.yml b/.azuredevops/tag-builds/fmtlib.yml new file mode 100644 index 000000000..37d807b67 --- /dev/null +++ b/.azuredevops/tag-builds/fmtlib.yml @@ -0,0 +1,23 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: fmtlibVersion + type: string + default: "11.1.3" + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/fmtlib.yml + parameters: + fmtlibVersion: ${{ parameters.fmtlibVersion }} diff --git a/.azuredevops/tag-builds/spdlog.yml b/.azuredevops/tag-builds/spdlog.yml index 0d8de151e..3fbf62288 100644 --- a/.azuredevops/tag-builds/spdlog.yml +++ b/.azuredevops/tag-builds/spdlog.yml @@ -2,6 +2,11 @@ variables: - group: common - template: /.azuredevops/variables-global.yml +parameters: +- name: fmtlibVersion + type: string + default: "v1.15.1" + resources: repositories: - repository: pipelines_repo @@ -13,4 +18,6 @@ trigger: none pr: none jobs: - - template: ${{ variables.CI_DEPENDENCIES_PATH }}/spdlog.yml + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/fmtlib.yml + parameters: + fmtlibVersion: ${{ parameters.fmtlibVersion }} diff --git a/.azuredevops/templates/steps/dependencies-vendor.yml b/.azuredevops/templates/steps/dependencies-vendor.yml index 8d885b553..615adafd8 100644 --- a/.azuredevops/templates/steps/dependencies-vendor.yml +++ b/.azuredevops/templates/steps/dependencies-vendor.yml @@ -8,10 +8,12 @@ parameters: type: object default: boost: 250 + fmtlib: 341 grpc: 72 gtest: 73 half560: 68 lapack: 69 + spdlog: 340 steps: - ${{ each dependency in parameters.dependencyList }}: From 2b0ce5e5c20bc14b7373967891572780a1289955 Mon Sep 17 00:00:00 2001 From: David Dixon <165835255+davidd-amd@users.noreply.github.com> Date: Wed, 3 Sep 2025 13:59:41 -0600 Subject: [PATCH 34/81] Fix typo (#5250) --- .azuredevops/tag-builds/spdlog.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.azuredevops/tag-builds/spdlog.yml b/.azuredevops/tag-builds/spdlog.yml index 3fbf62288..300079340 100644 --- a/.azuredevops/tag-builds/spdlog.yml +++ b/.azuredevops/tag-builds/spdlog.yml @@ -3,7 +3,7 @@ variables: - template: /.azuredevops/variables-global.yml parameters: -- name: fmtlibVersion +- name: spdlogVersion type: string default: "v1.15.1" @@ -18,6 +18,6 @@ trigger: none pr: none jobs: - - template: ${{ variables.CI_DEPENDENCIES_PATH }}/fmtlib.yml + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/spdlog.yml parameters: - fmtlibVersion: ${{ parameters.fmtlibVersion }} + spdlogVersion: ${{ parameters.spdlogVersion }} From 3aab9e1bc54851c653ac7cf7613e0418b9942a84 Mon Sep 17 00:00:00 2001 From: Joseph Macaranas <145489236+jayhawk-commits@users.noreply.github.com> Date: Wed, 3 Sep 2025 16:58:17 -0400 Subject: [PATCH 35/81] Modify sparseCheckoutDirectories in checkout.yml (#5251) Added 'shared' to sparseCheckoutDirectories parameter. --- .azuredevops/templates/steps/checkout.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/templates/steps/checkout.yml b/.azuredevops/templates/steps/checkout.yml index f021cbc40..4c5d58f56 100644 --- a/.azuredevops/templates/steps/checkout.yml +++ b/.azuredevops/templates/steps/checkout.yml @@ -20,7 +20,7 @@ steps: retryCountOnTaskFailure: 3 fetchFilter: blob:none ${{ if ne(parameters.sparseCheckoutDir, '') }}: - sparseCheckoutDirectories: ${{ parameters.sparseCheckoutDir }} + sparseCheckoutDirectories: ${{ parameters.sparseCheckoutDir }} shared path: sparse - ${{ if ne(parameters.sparseCheckoutDir, '') }}: - task: Bash@3 From 9e1a82d327bf0e2713a78c9e5e3370b9fb82d133 Mon Sep 17 00:00:00 2001 From: David Dixon <165835255+davidd-amd@users.noreply.github.com> Date: Wed, 3 Sep 2025 20:11:38 -0600 Subject: [PATCH 36/81] Add libdivide (#5252) --- .azuredevops/dependencies/libdivide.yml | 64 +++++++++++++++++++++++++ .azuredevops/tag-builds/libdivide.yml | 23 +++++++++ 2 files changed, 87 insertions(+) create mode 100644 .azuredevops/dependencies/libdivide.yml create mode 100644 .azuredevops/tag-builds/libdivide.yml diff --git a/.azuredevops/dependencies/libdivide.yml b/.azuredevops/dependencies/libdivide.yml new file mode 100644 index 000000000..e20a1ccea --- /dev/null +++ b/.azuredevops/dependencies/libdivide.yml @@ -0,0 +1,64 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: libdivideVersion + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: libdivide_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - task: Bash@3 + displayName: Clone libdivide ${{ parameters.libdivideVersion }} + inputs: + targetType: inline + script: git clone https://github.com/ridiculousfish/libdivide.git -b ${{ parameters.libdivideVersion }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/libdivide/build + cmakeSourceDir: $(Agent.BuildDirectory)/libdivide + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_BUILD_TYPE=Release + -DLIBDIVIDE_BUILD_TESTS=OFF + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/tag-builds/libdivide.yml b/.azuredevops/tag-builds/libdivide.yml new file mode 100644 index 000000000..7ae199743 --- /dev/null +++ b/.azuredevops/tag-builds/libdivide.yml @@ -0,0 +1,23 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: libdivideVersion + type: string + default: master + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/libdivide.yml + parameters: + libdivideVersion: ${{ parameters.libdivideVersion }} From 2f401895757814beee25df978c7ade905cf7c164 Mon Sep 17 00:00:00 2001 From: David Dixon <165835255+davidd-amd@users.noreply.github.com> Date: Thu, 4 Sep 2025 18:48:34 -0600 Subject: [PATCH 37/81] add catch2 (#5257) --- .azuredevops/dependencies/catch2.yml | 63 +++++++++++++++++++ .azuredevops/tag-builds/catch2.yml | 23 +++++++ .../templates/steps/dependencies-vendor.yml | 4 +- 3 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 .azuredevops/dependencies/catch2.yml create mode 100644 .azuredevops/tag-builds/catch2.yml diff --git a/.azuredevops/dependencies/catch2.yml b/.azuredevops/dependencies/catch2.yml new file mode 100644 index 000000000..aaf1d41be --- /dev/null +++ b/.azuredevops/dependencies/catch2.yml @@ -0,0 +1,63 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: catch2Version + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: catch2_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - task: Bash@3 + displayName: Clone catch2 ${{ parameters.catch2Version }} + inputs: + targetType: inline + script: git clone https://github.com/catchorg/Catch2.git -b ${{ parameters.catch2Version }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/Catch2/build + cmakeSourceDir: $(Agent.BuildDirectory)/Catch2 + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_BUILD_TYPE=Release + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/tag-builds/catch2.yml b/.azuredevops/tag-builds/catch2.yml new file mode 100644 index 000000000..ded20ab86 --- /dev/null +++ b/.azuredevops/tag-builds/catch2.yml @@ -0,0 +1,23 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: catch2Version + type: string + default: "v3.7.0" + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/catch2.yml + parameters: + catch2Version: ${{ parameters.catch2Version }} diff --git a/.azuredevops/templates/steps/dependencies-vendor.yml b/.azuredevops/templates/steps/dependencies-vendor.yml index 615adafd8..10086e38e 100644 --- a/.azuredevops/templates/steps/dependencies-vendor.yml +++ b/.azuredevops/templates/steps/dependencies-vendor.yml @@ -8,11 +8,13 @@ parameters: type: object default: boost: 250 + catch2: 343 fmtlib: 341 grpc: 72 gtest: 73 half560: 68 lapack: 69 + libdivide: 342 spdlog: 340 steps: @@ -31,7 +33,7 @@ steps: inputs: archiveFilePatterns: '$(Pipeline.Workspace)/d/**/*.tar.gz' destinationFolder: $(Agent.BuildDirectory)/vendor - cleanDestinationFolder: true + cleanDestinationFolder: false overwriteExistingFiles: true - task: DeleteFiles@1 displayName: Clean up ${{ dependency }} From e5345a9ccad64909a03eeed0db87f9f1e38a3dca Mon Sep 17 00:00:00 2001 From: Joseph Macaranas <145489236+jayhawk-commits@users.noreply.github.com> Date: Fri, 5 Sep 2025 10:12:39 -0400 Subject: [PATCH 38/81] External CI: rocdecode downstream builds (#5254) - Trigger downstream build of rocpydecode within rocdecode pipelines. - Copying similar variables as other pipelines even though these projects are not in the super-repos. --- .azuredevops/components/rocDecode.yml | 44 +++++++++++++++++++++++++ .azuredevops/components/rocPyDecode.yml | 36 +++++++++++++++----- 2 files changed, 72 insertions(+), 8 deletions(-) diff --git a/.azuredevops/components/rocDecode.yml b/.azuredevops/components/rocDecode.yml index ee1d5ccfc..f71c3cf48 100644 --- a/.azuredevops/components/rocDecode.yml +++ b/.azuredevops/components/rocDecode.yml @@ -8,6 +8,22 @@ parameters: - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -56,10 +72,23 @@ parameters: testJobs: - { os: ubuntu2204, packageManager: apt, target: gfx942 } - { os: ubuntu2204, packageManager: apt, target: gfx90a } +- name: downstreamComponentMatrix + type: object + default: + - rocPyDecode: + name: rocPyDecode + sparseCheckoutDir: '' + skipUnifiedBuild: 'false' + buildDependsOn: + - rocDecode_build jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: ${{ parameters.componentName }}_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -83,12 +112,15 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} os: ${{ job.os }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} @@ -169,3 +201,15 @@ jobs: registerROCmPackages: true environment: test gpuTarget: ${{ job.target }} + +- ${{ if parameters.triggerDownstreamJobs }}: + - ${{ each component in parameters.downstreamComponentMatrix }}: + - ${{ if not(and(parameters.unifiedBuild, eq(component.skipUnifiedBuild, 'true'))) }}: + - template: /.azuredevops/components/${{ component.name }}.yml@pipelines_repo + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ component.sparseCheckoutDir }} + buildDependsOn: ${{ component.buildDependsOn }} + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }}+${{ parameters.componentName }} + triggerDownstreamJobs: true + unifiedBuild: ${{ parameters.unifiedBuild }} diff --git a/.azuredevops/components/rocPyDecode.yml b/.azuredevops/components/rocPyDecode.yml index 6e85a43ef..615148a49 100644 --- a/.azuredevops/components/rocPyDecode.yml +++ b/.azuredevops/components/rocPyDecode.yml @@ -5,6 +5,22 @@ parameters: - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -47,19 +63,19 @@ parameters: type: object default: buildJobs: - - gfx942: - target: gfx942 - - gfx90a: - target: gfx90a + - { os: ubuntu2204, packageManager: apt, target: gfx942 } + - { os: ubuntu2204, packageManager: apt, target: gfx90a } testJobs: - - gfx942: - target: gfx942 - - gfx90a: - target: gfx90a + - { os: ubuntu2204, packageManager: apt, target: gfx942 } + - { os: ubuntu2204, packageManager: apt, target: gfx90a } jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocPyDecode_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -74,16 +90,20 @@ jobs: parameters: aptPackages: ${{ parameters.aptPackages }} pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} gpuTarget: ${{ job.target }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - task: Bash@3 displayName: 'Save Python Package Paths' inputs: From 76fd6b22902a5494b6516b237d84f67e4f0463f8 Mon Sep 17 00:00:00 2001 From: Matt Williams Date: Fri, 5 Sep 2025 11:45:06 -0400 Subject: [PATCH 39/81] Updating broken link (#5258) --- docs/how-to/rocm-for-ai/inference-optimization/workload.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/how-to/rocm-for-ai/inference-optimization/workload.rst b/docs/how-to/rocm-for-ai/inference-optimization/workload.rst index 0580e7434..bc9463f58 100644 --- a/docs/how-to/rocm-for-ai/inference-optimization/workload.rst +++ b/docs/how-to/rocm-for-ai/inference-optimization/workload.rst @@ -939,7 +939,7 @@ hipBLASLt benchmarking The GEMM library `hipBLASLt `_ provides a benchmark tool for its supported operations. Refer to the -`documentation `_ +`documentation `_ for details. * Example 1: Benchmark mix fp8 GEMM From 4bc1bf00c600326949962c5dd86fd643b3098e87 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Fri, 5 Sep 2025 12:07:51 -0400 Subject: [PATCH 40/81] Update PyTorch training benchmark docker doc to 25.7 (#5255) * Update PyTorch training benchmark docker doc to 25.7 * update .wordlist.txt * update conf.py * update data sheet * fix sphinx warnings --- .wordlist.txt | 1 + docs/conf.py | 4 + ...torch-training-v25.6-benchmark-models.yaml | 120 ++++ .../pytorch-training-benchmark-models.yaml | 120 ++-- docs/how-to/deep-learning-rocm.rst | 90 ++- .../previous-versions/vllm-0.9.1-20250715.rst | 4 +- .../inference/benchmark-docker/vllm.rst | 4 +- docs/how-to/rocm-for-ai/install.rst | 4 +- .../megatron-lm-v24.12-dev.rst | 4 +- .../previous-versions/megatron-lm-v25.3.rst | 6 +- .../previous-versions/megatron-lm-v25.4.rst | 6 +- .../pytorch-training-history.rst | 10 +- .../pytorch-training-v25.5.rst | 5 + .../pytorch-training-v25.6.rst | 456 ++++++++++++++ .../benchmark-docker/pytorch-training.rst | 585 +++++++++++------- 15 files changed, 1079 insertions(+), 340 deletions(-) create mode 100644 docs/data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml create mode 100644 docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6.rst diff --git a/.wordlist.txt b/.wordlist.txt index 09236fa95..4eb5df599 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -918,6 +918,7 @@ toolchain toolchains toolset toolsets +torchtitan torchvision tqdm tracebacks diff --git a/docs/conf.py b/docs/conf.py index 6f3979312..6e7fa5e61 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -124,11 +124,15 @@ article_pages = [ {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.5", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.6", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-primus-migration-guide", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/primus-megatron", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/pytorch-training", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.3", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.4", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4", "os": ["linux"]}, diff --git a/docs/data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml new file mode 100644 index 000000000..df0a198d5 --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml @@ -0,0 +1,120 @@ +unified_docker: + latest: + pull_tag: rocm/pytorch-training:v25.6 + docker_hub_url: https://hub.docker.com/r/rocm/pytorch-training/tags + rocm_version: 6.4.1 + pytorch_version: 2.8.0a0+git7d205b2 + python_version: 3.10.17 + transformer_engine_version: 1.14.0+2f85f5f2 + flash_attention_version: 3.0.0.post1 + hipblaslt_version: 0.15.0-8c6919d + triton_version: 3.3.0 +model_groups: + - group: Pre-training + tag: pre-training + models: + - model: Llama 3.1 8B + mad_tag: pyt_train_llama-3.1-8b + model_repo: Llama-3.1-8B + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: BF16 + training_modes: [pretrain] + - model: Llama 3.1 70B + mad_tag: pyt_train_llama-3.1-70b + model_repo: Llama-3.1-70B + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct + precision: BF16 + training_modes: [pretrain] + - model: FLUX.1-dev + mad_tag: pyt_train_flux + model_repo: Flux + url: https://huggingface.co/black-forest-labs/FLUX.1-dev + precision: BF16 + training_modes: [pretrain] + - group: Fine-tuning + tag: fine-tuning + models: + - model: Llama 4 Scout 17B-16E + mad_tag: pyt_train_llama-4-scout-17b-16e + model_repo: Llama-4-17B_16E + url: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.3 70B + mad_tag: pyt_train_llama-3.3-70b + model_repo: Llama-3.3-70B + url: https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct + precision: BF16 + training_modes: [finetune_fw, finetune_lora, finetune_qlora] + - model: Llama 3.2 1B + mad_tag: pyt_train_llama-3.2-1b + model_repo: Llama-3.2-1B + url: https://huggingface.co/meta-llama/Llama-3.2-1B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.2 3B + mad_tag: pyt_train_llama-3.2-3b + model_repo: Llama-3.2-3B + url: https://huggingface.co/meta-llama/Llama-3.2-3B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.2 Vision 11B + mad_tag: pyt_train_llama-3.2-vision-11b + model_repo: Llama-3.2-Vision-11B + url: https://huggingface.co/meta-llama/Llama-3.2-11B-Vision + precision: BF16 + training_modes: [finetune_fw] + - model: Llama 3.2 Vision 90B + mad_tag: pyt_train_llama-3.2-vision-90b + model_repo: Llama-3.2-Vision-90B + url: https://huggingface.co/meta-llama/Llama-3.2-90B-Vision + precision: BF16 + training_modes: [finetune_fw] + - model: Llama 3.1 8B + mad_tag: pyt_train_llama-3.1-8b + model_repo: Llama-3.1-8B + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.1 70B + mad_tag: pyt_train_llama-3.1-70b + model_repo: Llama-3.1-70B + url: https://huggingface.co/meta-llama/Llama-3.1-70B + precision: BF16 + training_modes: [finetune_fw, finetune_lora, finetune_qlora] + - model: Llama 3.1 405B + mad_tag: pyt_train_llama-3.1-405b + model_repo: Llama-3.1-405B + url: https://huggingface.co/meta-llama/Llama-3.1-405B + precision: BF16 + training_modes: [finetune_qlora, HF_finetune_lora] + - model: Llama 3 8B + mad_tag: pyt_train_llama-3-8b + model_repo: Llama-3-8B + url: https://huggingface.co/meta-llama/Meta-Llama-3-8B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3 70B + mad_tag: pyt_train_llama-3-70b + model_repo: Llama-3-70B + url: https://huggingface.co/meta-llama/Meta-Llama-3-70B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 2 7B + mad_tag: pyt_train_llama-2-7b + model_repo: Llama-2-7B + url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 + precision: BF16 + training_modes: [finetune_fw, finetune_lora, finetune_qlora] + - model: Llama 2 13B + mad_tag: pyt_train_llama-2-13b + model_repo: Llama-2-13B + url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 2 70B + mad_tag: pyt_train_llama-2-70b + model_repo: Llama-2-70B + url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 + precision: BF16 + training_modes: [finetune_lora, finetune_qlora, HF_finetune_lora] diff --git a/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml index df0a198d5..dc19843be 100644 --- a/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml @@ -1,38 +1,17 @@ -unified_docker: - latest: - pull_tag: rocm/pytorch-training:v25.6 - docker_hub_url: https://hub.docker.com/r/rocm/pytorch-training/tags - rocm_version: 6.4.1 - pytorch_version: 2.8.0a0+git7d205b2 - python_version: 3.10.17 - transformer_engine_version: 1.14.0+2f85f5f2 - flash_attention_version: 3.0.0.post1 - hipblaslt_version: 0.15.0-8c6919d - triton_version: 3.3.0 +dockers: + - pull_tag: rocm/pytorch-training:v25.7 + docker_hub_url: https://hub.docker.com/layers/rocm/pytorch-training/v25.7/images/sha256-cc6fd840ab89cb81d926fc29eca6d075aee9875a55a522675a4b9231c9a0a712 + components: + ROCm: 6.4.2 + PyTorch: 2.8.0a0+gitd06a406 + Python: 3.10.18 + Transformer Engine: 2.2.0.dev0+94e53dd8 + Flash Attention: 3.0.0.post1 + hipBLASLt: 1.1.0-4b9a52edfc + Triton: 3.3.0 model_groups: - - group: Pre-training - tag: pre-training - models: - - model: Llama 3.1 8B - mad_tag: pyt_train_llama-3.1-8b - model_repo: Llama-3.1-8B - url: https://huggingface.co/meta-llama/Llama-3.1-8B - precision: BF16 - training_modes: [pretrain] - - model: Llama 3.1 70B - mad_tag: pyt_train_llama-3.1-70b - model_repo: Llama-3.1-70B - url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct - precision: BF16 - training_modes: [pretrain] - - model: FLUX.1-dev - mad_tag: pyt_train_flux - model_repo: Flux - url: https://huggingface.co/black-forest-labs/FLUX.1-dev - precision: BF16 - training_modes: [pretrain] - - group: Fine-tuning - tag: fine-tuning + - group: Meta Llama + tag: llama models: - model: Llama 4 Scout 17B-16E mad_tag: pyt_train_llama-4-scout-17b-16e @@ -75,19 +54,19 @@ model_groups: model_repo: Llama-3.1-8B url: https://huggingface.co/meta-llama/Llama-3.1-8B precision: BF16 - training_modes: [finetune_fw, finetune_lora] + training_modes: [pretrain, finetune_fw, finetune_lora, HF_pretrain] - model: Llama 3.1 70B mad_tag: pyt_train_llama-3.1-70b model_repo: Llama-3.1-70B - url: https://huggingface.co/meta-llama/Llama-3.1-70B + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct precision: BF16 - training_modes: [finetune_fw, finetune_lora, finetune_qlora] + training_modes: [pretrain, finetune_fw, finetune_lora] - model: Llama 3.1 405B mad_tag: pyt_train_llama-3.1-405b model_repo: Llama-3.1-405B url: https://huggingface.co/meta-llama/Llama-3.1-405B precision: BF16 - training_modes: [finetune_qlora, HF_finetune_lora] + training_modes: [finetune_qlora] - model: Llama 3 8B mad_tag: pyt_train_llama-3-8b model_repo: Llama-3-8B @@ -117,4 +96,67 @@ model_groups: model_repo: Llama-2-70B url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 precision: BF16 - training_modes: [finetune_lora, finetune_qlora, HF_finetune_lora] + training_modes: [finetune_lora, finetune_qlora] + - group: OpenAI + tag: openai + models: + - model: GPT OSS 20B + mad_tag: pyt_train_gpt_oss_20b + model_repo: GPT-OSS-20B + url: https://huggingface.co/openai/gpt-oss-20b + precision: BF16 + training_modes: [HF_finetune_lora] + - model: GPT OSS 120B + mad_tag: pyt_train_gpt_oss_120b + model_repo: GPT-OSS-120B + url: https://huggingface.co/openai/gpt-oss-120b + precision: BF16 + training_modes: [HF_finetune_lora] + - group: Qwen + tag: qwen + models: + - model: Qwen 3 8B + mad_tag: pyt_train_qwen3-8b + model_repo: Qwen3-8B + url: https://huggingface.co/Qwen/Qwen3-8B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Qwen 3 32B + mad_tag: pyt_train_qwen3-32b + model_repo: Qwen3-32 + url: https://huggingface.co/Qwen/Qwen3-32B + precision: BF16 + training_modes: [finetune_lora] + - model: Qwen 2.5 32B + mad_tag: pyt_train_qwen2.5-32b + model_repo: Qwen2.5-32B + url: https://huggingface.co/Qwen/Qwen2.5-32B + precision: BF16 + training_modes: [finetune_lora] + - model: Qwen 2.5 72B + mad_tag: pyt_train_qwen2.5-72b + model_repo: Qwen2.5-72B + url: https://huggingface.co/Qwen/Qwen2.5-72B + precision: BF16 + training_modes: [finetune_lora] + - model: Qwen 2 1.5B + mad_tag: pyt_train_qwen2-1.5b + model_repo: Qwen2-1.5B + url: https://huggingface.co/Qwen/Qwen2-1.5B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Qwen 2 7B + mad_tag: pyt_train_qwen2-7b + model_repo: Qwen2-7B + url: https://huggingface.co/Qwen/Qwen2-7B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - group: Flux + tag: flux + models: + - model: FLUX.1-dev + mad_tag: pyt_train_flux + model_repo: Flux + url: https://huggingface.co/black-forest-labs/FLUX.1-dev + precision: BF16 + training_modes: [pretrain] diff --git a/docs/how-to/deep-learning-rocm.rst b/docs/how-to/deep-learning-rocm.rst index 16dad363c..fb1d55a3c 100644 --- a/docs/how-to/deep-learning-rocm.rst +++ b/docs/how-to/deep-learning-rocm.rst @@ -23,93 +23,92 @@ The table below summarizes information about ROCm-enabled deep learning framewor - Installation options - GitHub - * - `PyTorch `_ + * - `PyTorch `__ - .. raw:: html - + - - - `Docker image `_ - - `Wheels package `_ - - `ROCm Base Docker image `_ - - `Upstream Docker file `_ + - `Docker image `__ + - `Wheels package `__ + - `ROCm Base Docker image `__ + - `Upstream Docker file `__ - .. raw:: html - + - - * - `TensorFlow `_ + + * - `TensorFlow `__ - .. raw:: html - + - - - `Docker image `_ - - `Wheels package `_ + - `Docker image `__ + - `Wheels package `__ - .. raw:: html - + - * - `JAX `_ + * - `JAX `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - - * - `verl `_ + + * - `verl `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - * - `Stanford Megatron-LM `_ + * - `Stanford Megatron-LM `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - - * - `DGL `_ + + * - `DGL `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - * - `Megablocks `_ + * - `Megablocks `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - - * - `Taichi `_ + + * - `Taichi `__ - .. raw:: html - + - - - `Docker image `_ - - `Wheels package `_ + - `Docker image `__ + - `Wheels package `__ - .. raw:: html - - + Learn how to use your ROCm deep learning environment for training, fine-tuning, inference, and performance optimization through the following guides. @@ -124,10 +123,3 @@ through the following guides. * :doc:`Use ROCm for AI inference optimization ` - - - - - - - diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst index 9e0f4443a..34df0359d 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst @@ -46,7 +46,7 @@ vLLM inference performance testing - {{ unified_docker.hipblaslt_version }} With this Docker image, you can quickly test the :ref:`expected -inference performance numbers ` for +inference performance numbers ` for MI300X series accelerators. What's new @@ -219,7 +219,7 @@ system's configuration. ``container_ci-{{model.mad_tag}}``. The latency and throughput reports of the model are collected in the following path: ``~/MAD/reports_{{model.precision}}/``. - Although the :ref:`available models ` are preconfigured + Although the :ref:`available models ` are preconfigured to collect latency and throughput performance data, you can also change the benchmarking parameters. See the standalone benchmarking tab for more information. diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst index 02c992620..9f3bd608d 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst @@ -39,7 +39,7 @@ vLLM inference performance testing - {{ unified_docker.hipblaslt_version }} With this Docker image, you can quickly test the :ref:`expected -inference performance numbers ` for +inference performance numbers ` for MI300X series accelerators. What's new @@ -208,7 +208,7 @@ system's configuration. and ``{{ model.mad_tag }}_serving.csv``. Although the :ref:`available models - ` are preconfigured to collect + ` are preconfigured to collect offline throughput and online serving performance data, you can also change the benchmarking parameters. See the standalone benchmarking tab for more information. diff --git a/docs/how-to/rocm-for-ai/install.rst b/docs/how-to/rocm-for-ai/install.rst index 6847d06b4..cb949cb31 100644 --- a/docs/how-to/rocm-for-ai/install.rst +++ b/docs/how-to/rocm-for-ai/install.rst @@ -22,9 +22,9 @@ If you’re new to ROCm, refer to the :doc:`ROCm quick start install guide for L `. If you’re using a Radeon GPU for graphics-accelerated applications, refer to the -`Radeon installation instructions `_. +`Radeon installation instructions `_. -You can install ROCm on :ref:`compatible systems ` via your Linux +You can install ROCm on :doc:`compatible systems ` via your Linux distribution's package manager. See the following documentation resources to get started: * :doc:`ROCm installation overview ` diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst index a9d99378e..c18b1dfea 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst @@ -18,7 +18,7 @@ Training a model with ROCm Megatron-LM The ROCm Megatron-LM framework is a specialized fork of the robust Megatron-LM, designed to enable efficient training of large-scale language models on AMD GPUs. By leveraging AMD Instinct™ MI300X accelerators, AMD Megatron-LM delivers enhanced scalability, performance, and resource utilization for AI -workloads. It is purpose-built to :ref:`support models ` +workloads. It is purpose-built to :ref:`support models ` like Meta's Llama 2, Llama 3, and Llama 3.1, enabling developers to train next-generation AI models with greater efficiency. See the GitHub repository at ``__. @@ -67,7 +67,7 @@ Megatron-LM provides the following key features to train large language models e - Pre-training -.. _amd-megatron-lm-model-support: +.. _amd-megatron-lm-model-support-24-12: The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst index 3a2f23322..e039aff8a 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst @@ -67,7 +67,7 @@ Megatron-LM provides the following key features to train large language models e - Pre-training -.. _amd-megatron-lm-model-support: +.. _amd-megatron-lm-model-support-25-3: The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator. @@ -278,7 +278,7 @@ handle a variety of input sequences, including unseen words or domain-specific t .. tab-item:: Llama :sync: llama - To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer``. + To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer``. To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``. Set the Hugging Face model link in the ``TOKENIZER_MODEL`` variable. @@ -292,7 +292,7 @@ handle a variety of input sequences, including unseen words or domain-specific t .. tab-item:: DeepSeek V2 :sync: deepseek - To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. + To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. Multi-node training ^^^^^^^^^^^^^^^^^^^ diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst index 76e5eb716..9d7c7ecd6 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst @@ -67,7 +67,7 @@ Megatron-LM provides the following key features to train large language models e - Pre-training -.. _amd-megatron-lm-model-support: +.. _amd-megatron-lm-model-support-25-4: The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. @@ -291,7 +291,7 @@ or ``${DATA_DIR}/tokenizer_llama2``. .. tab-item:: Llama :sync: llama - To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer`` + To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer`` or the default ``HuggingFaceTokenizer``. To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``. @@ -320,7 +320,7 @@ or ``${DATA_DIR}/tokenizer_llama2``. .. tab-item:: DeepSeek V2 :sync: deepseek - To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. + To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. Multi-node training ^^^^^^^^^^^^^^^^^^^ diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst index 1535f1d43..07d640159 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst @@ -16,12 +16,20 @@ previous releases of the ``ROCm/pytorch-training`` Docker image on `Docker Hub < - Components - Resources + * - v25.7 + - + * ROCm 6.4.2 + * PyTorch 2.8.0a0+gitd06a406 + - + * :doc:`Documentation <../pytorch-training>` + * `Docker Hub `__ + * - v25.6 - * ROCm 6.3.4 * PyTorch 2.8.0a0+git7d205b2 - - * :doc:`Documentation <../pytorch-training>` + * :doc:`Documentation ` * `Docker Hub `__ * - v25.5 diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst index a43297657..e68a1092b 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst @@ -437,3 +437,8 @@ Once the setup is complete, choose between two options to start benchmarking: ./pytorch_benchmark_report.sh -t HF_finetune_lora -p BF16 -m Llama-2-70B +Previous versions +================= + +See :doc:`pytorch-training-history` to find documentation for previous releases +of the ``ROCm/pytorch-training`` Docker image. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6.rst new file mode 100644 index 000000000..f9bc57a43 --- /dev/null +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6.rst @@ -0,0 +1,456 @@ +:orphan: + +.. meta:: + :description: How to train a model using PyTorch for ROCm. + :keywords: ROCm, AI, LLM, train, PyTorch, torch, Llama, flux, tutorial, docker + +************************************** +Training a model with PyTorch for ROCm +************************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm vLLM + performance benchmark documentation. See :doc:`../pytorch-training` for the latest version. + +PyTorch is an open-source machine learning framework that is widely used for +model training with GPU-optimized components for transformer-based models. + +The `PyTorch for ROCm training Docker `_ +(``rocm/pytorch-training:v25.6``) image provides a prebuilt optimized environment for fine-tuning and pretraining a +model on AMD Instinct MI325X and MI300X accelerators. It includes the following software components to accelerate +training workloads: + ++--------------------------+--------------------------------+ +| Software component | Version | ++==========================+================================+ +| ROCm | 6.3.4 | ++--------------------------+--------------------------------+ +| PyTorch | 2.8.0a0+git7d205b2 | ++--------------------------+--------------------------------+ +| Python | 3.10.17 | ++--------------------------+--------------------------------+ +| Transformer Engine | 1.14.0+2f85f5f2 | ++--------------------------+--------------------------------+ +| Flash Attention | 3.0.0.post1 | ++--------------------------+--------------------------------+ +| hipBLASLt | 0.15.0-8c6919d | ++--------------------------+--------------------------------+ +| Triton | 3.3.0 | ++--------------------------+--------------------------------+ + +.. _amd-pytorch-training-model-support-v256: + +Supported models +================ + +The following models are pre-optimized for performance on the AMD Instinct MI325X and MI300X accelerators. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml + + {% set unified_docker = data.unified_docker.latest %} + {% set model_groups = data.model_groups %} + + .. raw:: html + +
+
+
Workload
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
+ +
+
Model
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
+ + .. note:: + + Some models require an external license agreement through a third party (for example, Meta). + + .. _amd-pytorch-training-performance-measurements-v256: + + Performance measurements + ======================== + + To evaluate performance, the + `Performance results with AMD ROCm software `_ + page provides reference throughput and latency measurements for training + popular AI models. + + .. note:: + + The performance data presented in + `Performance results with AMD ROCm software `_ + should not be interpreted as the peak performance achievable by AMD + Instinct MI325X and MI300X accelerators or ROCm software. + + System validation + ================= + + Before running AI workloads, it's important to validate that your AMD hardware is configured + correctly and performing optimally. + + If you have already validated your system settings, including aspects like NUMA auto-balancing, you + can skip this step. Otherwise, complete the procedures in the :ref:`System validation and + optimization ` guide to properly configure your system settings + before starting training. + + To test for optimal performance, consult the recommended :ref:`System health benchmarks + `. This suite of tests will help you verify and fine-tune your + system's configuration. + + This Docker image is optimized for specific model configurations outlined + below. Performance can vary for other training workloads, as AMD + doesn’t validate configurations and run conditions outside those described. + + Benchmarking + ============ + + Once the setup is complete, choose between two options to start benchmarking: + + .. tab-set:: + + .. tab-item:: MAD-integrated benchmarking + + Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{ model.mad_tag }} + + For example, use this command to run the performance benchmark test on the {{ model.model }} model + using one GPU with the {{ model.precision }} data type on the host machine. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{ model.mad_tag }} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 + + MAD launches a Docker container with the name + ``container_ci-{{ model.mad_tag }}``, for example. The latency and throughput reports of the + model are collected in the following path: ``~/MAD/perf.csv``. + + {% endfor %} + {% endfor %} + + .. tab-item:: Standalone benchmarking + + .. rubric:: Download the Docker image and required packages + + Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + + Run the Docker container. + + .. code-block:: shell + + docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME:$HOME -v $HOME/.ssh:/root/.ssh --shm-size 64G --name training_env {{ unified_docker.pull_tag }} + + Use these commands if you exit the ``training_env`` container and need to return to it. + + .. code-block:: shell + + docker start training_env + docker exec -it training_env bash + + In the Docker container, clone the ``__ + repository and navigate to the benchmark scripts directory + ``/workspace/MAD/scripts/pytorch_train``. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD/scripts/pytorch_train + + .. rubric:: Prepare training datasets and dependencies + + The following benchmarking examples require downloading models and datasets + from Hugging Face. To ensure successful access to gated repos, set your + ``HF_TOKEN``. + + .. code-block:: shell + + export HF_TOKEN=$your_personal_hugging_face_access_token + + Run the setup script to install libraries and datasets needed for benchmarking. + + .. code-block:: shell + + ./pytorch_benchmark_setup.sh + + .. container:: model-doc pyt_train_llama-3.1-8b + + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 8B: + + .. list-table:: + :header-rows: 1 + + * - Library + - Reference + + * - ``accelerate`` + - `Hugging Face Accelerate `_ + + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 + + .. container:: model-doc pyt_train_llama-3.1-70b + + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 70B: + + .. list-table:: + :header-rows: 1 + + * - Library + - Reference + + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 + + * - ``torchdata`` + - `TorchData `_ + + * - ``tomli`` + - `Tomli `_ + + * - ``tiktoken`` + - `tiktoken `_ + + * - ``blobfile`` + - `blobfile `_ + + * - ``tabulate`` + - `tabulate `_ + + * - ``wandb`` + - `Weights & Biases `_ + + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 + + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 + + .. container:: model-doc pyt_train_flux + + ``pytorch_benchmark_setup.sh`` installs the following libraries for FLUX: + + .. list-table:: + :header-rows: 1 + + * - Library + - Reference + + * - ``accelerate`` + - `Hugging Face Accelerate `_ + + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 + + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 + + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 + + * - ``csvkit`` + - `csvkit `_ 2.0.1 + + * - ``deepspeed`` + - `DeepSpeed `_ 0.16.2 + + * - ``diffusers`` + - `Hugging Face Diffusers `_ 0.31.0 + + * - ``GitPython`` + - `GitPython `_ 3.1.44 + + * - ``opencv-python-headless`` + - `opencv-python-headless `_ 4.10.0.84 + + * - ``peft`` + - `PEFT `_ 0.14.0 + + * - ``protobuf`` + - `Protocol Buffers `_ 5.29.2 + + * - ``pytest`` + - `PyTest `_ 8.3.4 + + * - ``python-dotenv`` + - `python-dotenv `_ 1.0.1 + + * - ``seaborn`` + - `Seaborn `_ 0.13.2 + + * - ``transformers`` + - `Transformers `_ 4.47.0 + + ``pytorch_benchmark_setup.sh`` downloads the following datasets from Hugging Face: + + * `bghira/pseudo-camera-10k `_ + + {% for model_group in model_groups %} + {% for model in model_group.models %} + {% if model_group.tag == "pre-training" and model.mad_tag in ["pyt_train_llama-3.1-8b", "pyt_train_llama-3.1-70b", "pyt_train_flux"] %} + + .. container:: model-doc {{ model.mad_tag }} + + .. rubric:: Pretraining + + To start the pre-training benchmark, use the following command with the + appropriate options. See the following list of options and their descriptions. + + .. code-block:: shell + + ./pytorch_benchmark_report.sh -t pretrain -m {{ model.model_repo }} -p $datatype -s $sequence_length + + .. list-table:: + :header-rows: 1 + + * - Name + - Options + - Description + + {% if model.mad_tag == "pyt_train_llama-3.1-8b" %} + * - ``$datatype`` + - ``BF16`` or ``FP8`` + - Only Llama 3.1 8B supports FP8 precision. + {% else %} + * - ``$datatype`` + - ``BF16`` + - Only Llama 3.1 8B supports FP8 precision. + {% endif %} + + * - ``$sequence_length`` + - Sequence length for the language model. + - Between 2048 and 8192. 8192 by default. + + {% if model.mad_tag == "pyt_train_flux" %} + .. container:: model-doc {{ model.mad_tag }} + + .. note:: + + Occasionally, downloading the Flux dataset might fail. In the event of this + error, manually download it from Hugging Face at + `black-forest-labs/FLUX.1-dev `_ + and save it to `/workspace/FluxBenchmark`. This ensures that the test script can access + the required dataset. + {% endif %} + {% endif %} + + {% if model_group.tag == "fine-tuning" %} + .. container:: model-doc {{ model.mad_tag }} + + .. rubric:: Fine-tuning + + To start the fine-tuning benchmark, use the following command with the + appropriate options. See the following list of options and their descriptions. + + .. code-block:: shell + + ./pytorch_benchmark_report.sh -t $training_mode -m {{ model.model_repo }} -p BF16 -s $sequence_length + + .. list-table:: + :header-rows: 1 + + * - Name + - Options + - Description + + * - ``$training_mode`` + - ``finetune_fw`` + - Full weight fine-tuning (BF16 supported) + + * - + - ``finetune_lora`` + - LoRA fine-tuning (BF16 supported) + + * - + - ``finetune_qlora`` + - QLoRA fine-tuning (BF16 supported) + + * - + - ``HF_finetune_lora`` + - LoRA fine-tuning with Hugging Face PEFT + + * - ``$datatype`` + - ``BF16`` + - All models support BF16. + + * - ``$sequence_length`` + - Between 2048 and 16384. + - Sequence length for the language model. + + .. note:: + + {{ model.model }} currently supports the following fine-tuning methods: + + {% for method in model.training_modes %} + * ``{{ method }}`` + {% endfor %} + {% if model.training_modes|length < 4 %} + + The upstream `torchtune `_ repository + does not currently provide YAML configuration files for other combinations of + model to fine-tuning method + However, you can still configure your own YAML files to enable support for + fine-tuning methods not listed here by following existing patterns in the + ``/workspace/torchtune/recipes/configs`` directory. + {% endif %} + {% endif %} + {% endfor %} + {% endfor %} + + .. rubric:: Benchmarking examples + + For examples of benchmarking commands, see ``__. + +Further reading +=============== + +- To learn more about MAD and the ``madengine`` CLI, see the `MAD usage guide `__. + +- To learn more about system settings and management practices to configure your system for + AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. + +- For a list of other ready-made Docker images for AI with ROCm, see + `AMD Infinity Hub `_. + +Previous versions +================= + +See :doc:`pytorch-training-history` to find documentation for previous releases +of the ``ROCm/pytorch-training`` Docker image. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst index 46b9daf2f..e7258e07b 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst @@ -9,28 +9,25 @@ Training a model with PyTorch for ROCm PyTorch is an open-source machine learning framework that is widely used for model training with GPU-optimized components for transformer-based models. -The `PyTorch for ROCm training Docker `_ -(``rocm/pytorch-training:v25.6``) image provides a prebuilt optimized environment for fine-tuning and pretraining a -model on AMD Instinct MI325X and MI300X accelerators. It includes the following software components to accelerate -training workloads: +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml -+--------------------------+--------------------------------+ -| Software component | Version | -+==========================+================================+ -| ROCm | 6.3.4 | -+--------------------------+--------------------------------+ -| PyTorch | 2.8.0a0+git7d205b2 | -+--------------------------+--------------------------------+ -| Python | 3.10.17 | -+--------------------------+--------------------------------+ -| Transformer Engine | 1.14.0+2f85f5f2 | -+--------------------------+--------------------------------+ -| Flash Attention | 3.0.0.post1 | -+--------------------------+--------------------------------+ -| hipBLASLt | 0.15.0-8c6919d | -+--------------------------+--------------------------------+ -| Triton | 3.3.0 | -+--------------------------+--------------------------------+ + {% set dockers = data.dockers %} + {% set docker = dockers[0] %} + The `PyTorch for ROCm training Docker <{{ docker.docker_hub_url }}>`__ + (``{{ docker.pull_tag }}``) image provides a prebuilt optimized environment for fine-tuning and pretraining a + model on AMD Instinct MI325X and MI300X accelerators. It includes the following software components to accelerate + training workloads: + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + {% endfor %} .. _amd-pytorch-training-model-support: @@ -38,26 +35,27 @@ Supported models ================ The following models are pre-optimized for performance on the AMD Instinct MI325X and MI300X accelerators. +Some instructions, commands, and training recommendations in this documentation might +vary by model -- select one to get started. .. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml - {% set unified_docker = data.unified_docker.latest %} + {% set unified_docker = data.dockers[0] %} {% set model_groups = data.model_groups %} - .. raw:: html
-
Workload
+
Model group
{% for model_group in model_groups %} -
{{ model_group.group }}
+
{{ model_group.group }}
{% endfor %}
-
Model
+
Model variant
{% for model_group in model_groups %} {% set models = model_group.models %} @@ -73,84 +71,116 @@ The following models are pre-optimized for performance on the AMD Instinct MI325
- .. note:: - Some models require an external license agreement through a third party (for example, Meta). + .. _amd-pytorch-training-supported-training-modes: - .. _amd-pytorch-training-performance-measurements: + The following table lists supported training modes per model. - Performance measurements - ======================== + .. dropdown:: Supported training modes - To evaluate performance, the + .. list-table:: + :header-rows: 1 + + * - Model + - Supported training modes + + {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + * - {{ model.model }} + - ``{{ model.training_modes | join('``, ``') }}`` + + {% endfor %} + {% endfor %} + + .. note:: + + Some model and fine-tuning combinations are not listed. This is + because the `upstream torchtune repository `__ + doesn't provide default YAML configurations for them. + For advanced usage, you can create a custom configuration to enable + unlisted fine-tuning methods by using an existing file in the + ``/workspace/torchtune/recipes/configs`` directory as a template. + +.. _amd-pytorch-training-performance-measurements: + +Performance measurements +======================== + +To evaluate performance, the +`Performance results with AMD ROCm software `_ +page provides reference throughput and latency measurements for training +popular AI models. + +.. note:: + + The performance data presented in `Performance results with AMD ROCm software `_ - page provides reference throughput and latency measurements for training - popular AI models. + should not be interpreted as the peak performance achievable by AMD + Instinct MI325X and MI300X accelerators or ROCm software. - .. note:: +System validation +================= - The performance data presented in - `Performance results with AMD ROCm software `_ - should not be interpreted as the peak performance achievable by AMD - Instinct MI325X and MI300X accelerators or ROCm software. +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. - System validation - ================= +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. - Before running AI workloads, it's important to validate that your AMD hardware is configured - correctly and performing optimally. +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. - If you have already validated your system settings, including aspects like NUMA auto-balancing, you - can skip this step. Otherwise, complete the procedures in the :ref:`System validation and - optimization ` guide to properly configure your system settings - before starting training. +This Docker image is optimized for specific model configurations outlined +below. Performance can vary for other training workloads, as AMD +doesn’t test configurations and run conditions outside those described. - To test for optimal performance, consult the recommended :ref:`System health benchmarks - `. This suite of tests will help you verify and fine-tune your - system's configuration. +Run training +============ - This Docker image is optimized for specific model configurations outlined - below. Performance can vary for other training workloads, as AMD - doesn’t validate configurations and run conditions outside those described. +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml - Benchmarking - ============ + {% set unified_docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} - Once the setup is complete, choose between two options to start benchmarking: + Once the setup is complete, choose between two options to start benchmarking training: .. tab-set:: .. tab-item:: MAD-integrated benchmarking - Clone the ROCm Model Automation and Dashboarding (``__) repository to a local - directory and install the required packages on the host machine. + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. - .. code-block:: shell + .. code-block:: shell - git clone https://github.com/ROCm/MAD - cd MAD - pip install -r requirements.txt + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt {% for model_group in model_groups %} {% for model in model_group.models %} .. container:: model-doc {{ model.mad_tag }} - For example, use this command to run the performance benchmark test on the {{ model.model }} model - using one GPU with the {{ model.precision }} data type on the host machine. + 2. For example, use this command to run the performance benchmark test on the {{ model.model }} model + using one node with the {{ model.precision }} data type on the host machine. - .. code-block:: shell + .. code-block:: shell - export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" - madengine run \ - --tags {{ model.mad_tag }} \ - --keep-model-dir \ - --live-output \ - --timeout 28800 + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{ model.mad_tag }} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 - MAD launches a Docker container with the name - ``container_ci-{{ model.mad_tag }}``, for example. The latency and throughput reports of the - model are collected in the following path: ``~/MAD/perf.csv``. + MAD launches a Docker container with the name + ``container_ci-{{ model.mad_tag }}``. The latency and throughput reports of the + model are collected in ``~/MAD/perf.csv``. {% endfor %} {% endfor %} @@ -159,222 +189,213 @@ The following models are pre-optimized for performance on the AMD Instinct MI325 .. rubric:: Download the Docker image and required packages - Use the following command to pull the Docker image from Docker Hub. + 1. Use the following command to pull the Docker image from Docker Hub. - .. code-block:: shell + .. code-block:: shell - docker pull {{ unified_docker.pull_tag }} + docker pull {{ unified_docker.pull_tag }} - Run the Docker container. + 2. Run the Docker container. - .. code-block:: shell + .. code-block:: shell - docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME:$HOME -v $HOME/.ssh:/root/.ssh --shm-size 64G --name training_env {{ unified_docker.pull_tag }} + docker run -it \ + --device /dev/dri \ + --device /dev/kfd \ + --network host \ + --ipc host \ + --group-add video \ + --cap-add SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --privileged \ + -v $HOME:$HOME \ + -v $HOME/.ssh:/root/.ssh \ + --shm-size 64G \ + --name training_env \ + {{ unified_docker.pull_tag }} - Use these commands if you exit the ``training_env`` container and need to return to it. + Use these commands if you exit the ``training_env`` container and need to return to it. - .. code-block:: shell + .. code-block:: shell - docker start training_env - docker exec -it training_env bash + docker start training_env + docker exec -it training_env bash - In the Docker container, clone the ``__ - repository and navigate to the benchmark scripts directory - ``/workspace/MAD/scripts/pytorch_train``. + 3. In the Docker container, clone the ``__ + repository and navigate to the benchmark scripts directory + ``/workspace/MAD/scripts/pytorch_train``. - .. code-block:: shell + .. code-block:: shell - git clone https://github.com/ROCm/MAD - cd MAD/scripts/pytorch_train + git clone https://github.com/ROCm/MAD + cd MAD/scripts/pytorch_train .. rubric:: Prepare training datasets and dependencies - The following benchmarking examples require downloading models and datasets - from Hugging Face. To ensure successful access to gated repos, set your - ``HF_TOKEN``. + 1. The following benchmarking examples require downloading models and datasets + from Hugging Face. To ensure successful access to gated repos, set your + ``HF_TOKEN``. - .. code-block:: shell + .. code-block:: shell - export HF_TOKEN=$your_personal_hugging_face_access_token + export HF_TOKEN=$your_personal_hugging_face_access_token - Run the setup script to install libraries and datasets needed for benchmarking. + 2. Run the setup script to install libraries and datasets needed for benchmarking. - .. code-block:: shell + .. code-block:: shell - ./pytorch_benchmark_setup.sh + ./pytorch_benchmark_setup.sh - .. container:: model-doc pyt_train_llama-3.1-8b + .. container:: model-doc pyt_train_llama-3.1-8b - ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 8B: + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 8B: - .. list-table:: - :header-rows: 1 + .. list-table:: + :header-rows: 1 - * - Library - - Reference + * - Library + - Reference - * - ``accelerate`` - - `Hugging Face Accelerate `_ + * - ``accelerate`` + - `Hugging Face Accelerate `_ - * - ``datasets`` - - `Hugging Face Datasets `_ 3.2.0 + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 - .. container:: model-doc pyt_train_llama-3.1-70b + .. container:: model-doc pyt_train_llama-3.1-70b - ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 70B: + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 70B: - .. list-table:: - :header-rows: 1 + .. list-table:: + :header-rows: 1 - * - Library - - Reference + * - Library + - Reference - * - ``datasets`` - - `Hugging Face Datasets `_ 3.2.0 + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 - * - ``torchdata`` - - `TorchData `_ + * - ``torchdata`` + - `TorchData `_ - * - ``tomli`` - - `Tomli `_ + * - ``tomli`` + - `Tomli `_ - * - ``tiktoken`` - - `tiktoken `_ + * - ``tiktoken`` + - `tiktoken `_ - * - ``blobfile`` - - `blobfile `_ + * - ``blobfile`` + - `blobfile `_ - * - ``tabulate`` - - `tabulate `_ + * - ``tabulate`` + - `tabulate `_ - * - ``wandb`` - - `Weights & Biases `_ + * - ``wandb`` + - `Weights & Biases `_ - * - ``sentencepiece`` - - `SentencePiece `_ 0.2.0 + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 - * - ``tensorboard`` - - `TensorBoard `_ 2.18.0 + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 - .. container:: model-doc pyt_train_flux + .. container:: model-doc pyt_train_flux - ``pytorch_benchmark_setup.sh`` installs the following libraries for FLUX: + ``pytorch_benchmark_setup.sh`` installs the following libraries for FLUX: - .. list-table:: - :header-rows: 1 + .. list-table:: + :header-rows: 1 - * - Library - - Reference + * - Library + - Reference - * - ``accelerate`` - - `Hugging Face Accelerate `_ + * - ``accelerate`` + - `Hugging Face Accelerate `_ - * - ``datasets`` - - `Hugging Face Datasets `_ 3.2.0 + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 - * - ``sentencepiece`` - - `SentencePiece `_ 0.2.0 + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 - * - ``tensorboard`` - - `TensorBoard `_ 2.18.0 + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 - * - ``csvkit`` - - `csvkit `_ 2.0.1 + * - ``csvkit`` + - `csvkit `_ 2.0.1 - * - ``deepspeed`` - - `DeepSpeed `_ 0.16.2 + * - ``deepspeed`` + - `DeepSpeed `_ 0.16.2 - * - ``diffusers`` - - `Hugging Face Diffusers `_ 0.31.0 + * - ``diffusers`` + - `Hugging Face Diffusers `_ 0.31.0 - * - ``GitPython`` - - `GitPython `_ 3.1.44 + * - ``GitPython`` + - `GitPython `_ 3.1.44 - * - ``opencv-python-headless`` - - `opencv-python-headless `_ 4.10.0.84 + * - ``opencv-python-headless`` + - `opencv-python-headless `_ 4.10.0.84 - * - ``peft`` - - `PEFT `_ 0.14.0 + * - ``peft`` + - `PEFT `_ 0.14.0 - * - ``protobuf`` - - `Protocol Buffers `_ 5.29.2 + * - ``protobuf`` + - `Protocol Buffers `_ 5.29.2 - * - ``pytest`` - - `PyTest `_ 8.3.4 + * - ``pytest`` + - `PyTest `_ 8.3.4 - * - ``python-dotenv`` - - `python-dotenv `_ 1.0.1 + * - ``python-dotenv`` + - `python-dotenv `_ 1.0.1 - * - ``seaborn`` - - `Seaborn `_ 0.13.2 + * - ``seaborn`` + - `Seaborn `_ 0.13.2 - * - ``transformers`` - - `Transformers `_ 4.47.0 + * - ``transformers`` + - `Transformers `_ 4.47.0 - ``pytorch_benchmark_setup.sh`` downloads the following datasets from Hugging Face: + ``pytorch_benchmark_setup.sh`` downloads the following datasets from Hugging Face: - * `bghira/pseudo-camera-10k `_ + * `bghira/pseudo-camera-10k `_ {% for model_group in model_groups %} {% for model in model_group.models %} - {% if model_group.tag == "pre-training" and model.mad_tag in ["pyt_train_llama-3.1-8b", "pyt_train_llama-3.1-70b", "pyt_train_flux"] %} + {% set training_modes = model.training_modes %} + {% set training_mode_descs = { + "pretrain": "Benchmark pre-training.", + "HF_pretrain": "Llama 3.1 8B pre-training with FP8 precision." + } %} + {% set available_modes = training_modes | select("in", ["pretrain", "HF_pretrain"]) | list %} + {% if available_modes %} .. container:: model-doc {{ model.mad_tag }} - .. rubric:: Pretraining + .. rubric:: Pre-training To start the pre-training benchmark, use the following command with the appropriate options. See the following list of options and their descriptions. .. code-block:: shell - ./pytorch_benchmark_report.sh -t pretrain -m {{ model.model_repo }} -p $datatype -s $sequence_length - - .. list-table:: - :header-rows: 1 - - * - Name - - Options - - Description - - {% if model.mad_tag == "pyt_train_llama-3.1-8b" %} - * - ``$datatype`` - - ``BF16`` or ``FP8`` - - Only Llama 3.1 8B supports FP8 precision. - {% else %} - * - ``$datatype`` - - ``BF16`` - - Only Llama 3.1 8B supports FP8 precision. - {% endif %} - - * - ``$sequence_length`` - - Sequence length for the language model. - - Between 2048 and 8192. 8192 by default. + ./pytorch_benchmark_report.sh -t {% if available_modes | length == 1 %}{{ available_modes[0] }}{% else %}$training_mode{% endif %} \ + -m {{ model.model_repo }} \ + -p $datatype \ + -s $sequence_length {% if model.mad_tag == "pyt_train_flux" %} .. container:: model-doc {{ model.mad_tag }} .. note:: + Currently, FLUX models are not supported out-of-the-box on {{ unified_docker.pull_tag }}. + To use FLUX, refer to the previous version of the ``pytorch-training`` Docker: :doc:`previous-versions/pytorch-training-v25.6` + Occasionally, downloading the Flux dataset might fail. In the event of this error, manually download it from Hugging Face at `black-forest-labs/FLUX.1-dev `_ and save it to `/workspace/FluxBenchmark`. This ensures that the test script can access the required dataset. {% endif %} - {% endif %} - - {% if model_group.tag == "fine-tuning" %} - .. container:: model-doc {{ model.mad_tag }} - - .. rubric:: Fine-tuning - - To start the fine-tuning benchmark, use the following command with the - appropriate options. See the following list of options and their descriptions. - - .. code-block:: shell - - ./pytorch_benchmark_report.sh -t $training_mode -m {{ model.model_repo }} -p BF16 -s $sequence_length .. list-table:: :header-rows: 1 @@ -383,53 +404,143 @@ The following models are pre-optimized for performance on the AMD Instinct MI325 - Options - Description - * - ``$training_mode`` - - ``finetune_fw`` - - Full weight fine-tuning (BF16 supported) - - * - - - ``finetune_lora`` - - LoRA fine-tuning (BF16 supported) - - * - - - ``finetune_qlora`` - - QLoRA fine-tuning (BF16 supported) - - * - - - ``HF_finetune_lora`` - - LoRA fine-tuning with Hugging Face PEFT + {% for mode in available_modes %} + * - {% if loop.first %}``$training_mode``{% endif %} + - ``{{ mode }}`` + - {{ training_mode_descs[mode] }} + {% endfor %} * - ``$datatype`` - - ``BF16`` - - All models support BF16. + - ``BF16``{% if model.mad_tag == "pyt_train_llama-3.1-8b" %} or ``FP8``{% endif %} + - Only Llama 3.1 8B supports FP8 precision. + + * - ``$sequence_length`` + - Sequence length for the language model. + - Between 2048 and 8192. 8192 by default. + {% endif %} + + {% set training_mode_descs = { + "finetune_fw": "Full weight fine-tuning (BF16 and FP8 supported).", + "finetune_lora": "LoRA fine-tuning (BF16 supported).", + "finetune_qlora": "QLoRA fine-tuning (BF16 supported).", + "HF_finetune_lora": "LoRA fine-tuning with Hugging Face PEFT.", + } %} + {% set available_modes = training_modes | select("in", ["finetune_fw", "finetune_lora", "finetune_qlora", "HF_finetune_lora"]) | list %} + {% if available_modes %} + .. container:: model-doc {{ model.mad_tag }} + + .. rubric:: Fine-tuning + + To start the fine-tuning benchmark, use the following command with the + appropriate options. See the following list of options and their descriptions. + See :ref:`supported training modes `. + + .. code-block:: shell + + ./pytorch_benchmark_report.sh -t $training_mode \ + -m {{ model.model_repo }} \ + -p $datatype \ + -s $sequence_length + + .. list-table:: + :header-rows: 1 + + * - Name + - Options + - Description + + {% for mode in available_modes %} + * - {% if loop.first %}``$training_mode``{% endif %} + - ``{{ mode }}`` + - {{ training_mode_descs[mode] }} + {% endfor %} + + * - ``$datatype`` + - ``BF16``{% if "finetune_fw" in available_modes %} or ``FP8``{% endif %} + - All models support BF16.{% if "finetune_fw" in available_modes %} FP8 is only available for full weight fine-tuning.{% endif %} * - ``$sequence_length`` - Between 2048 and 16384. - Sequence length for the language model. + {% if model.mad_tag in ["pyt_train_llama3.2-vision-11b", "pyt_train_llama-3.2-vision-90b"] %} .. note:: - {{ model.model }} currently supports the following fine-tuning methods: + For LoRA and QLoRA support with vision models (Llama 3.2 11B and 90B), + use the following torchtune commit for compatibility: - {% for method in model.training_modes %} - * ``{{ method }}`` - {% endfor %} - {% if model.training_modes|length < 4 %} + .. code-block:: shell + + git checkout 48192e23188b1fc524dd6d127725ceb2348e7f0e + + {% elif model.mad_tag in ["pyt_train_llama-2-7b", "pyt_train_llama-2-13b", "pyt_train_llama-2-70b"] %} + .. note:: + + You might encounter the following error with Llama 2: ``ValueError: seq_len (16384) of + input tensor should be smaller than max_seq_len (4096)``. + This error indicates that an input sequence is longer than the model's maximum context window. + + Ensure your tokenized input does not exceed the model's ``max_seq_len`` (4096 + tokens in this case). You can resolve this by truncating the input or splitting + it into smaller chunks before passing it to the model. + + Note on reproducibility: The results in this guide are based on + commit ``b4c98ac`` from the upstream + ``__ repository. For the + latest updates, you can use the main branch. - The upstream `torchtune `_ repository - does not currently provide YAML configuration files for other combinations of - model to fine-tuning method - However, you can still configure your own YAML files to enable support for - fine-tuning methods not listed here by following existing patterns in the - ``/workspace/torchtune/recipes/configs`` directory. {% endif %} {% endif %} {% endfor %} {% endfor %} - .. rubric:: Benchmarking examples + .. rubric:: Benchmarking examples - For examples of benchmarking commands, see ``__. + For examples of benchmarking commands, see ``__. + +Multi-node training +------------------- + +Pre-training +~~~~~~~~~~~~ + +Multi-node training with torchtitan is supported. The provided SLURM script is pre-configured for Llama 3 70B. + +To launch the training job on a SLURM cluster for Llama 3 70B, run the following commands from the MAD repository. + +.. code-block:: shell + + # In the MAD repository + cd scripts/pytorch_train + sbatch run_slurm_train.sh + +Fine-tuning +~~~~~~~~~~~ + +Multi-node training with torchtune is supported. The provided SLURM script is pre-configured for Llama 3.3 70B. + +To launch the training job on a SLURM cluster for Llama 3.3 70B, run the following commands from the MAD repository. + +.. code-block:: shell + + huggingface-cli login # Get access to HF Llama model space + huggingface-cli download meta-llama/Llama-3.3-70B-Instruct --local-dir ./models/Llama-3.3-70B-Instruct # Download the Llama 3.3 model locally + # In the MAD repository + cd scripts/pytorch_train + sbatch Torchtune_Multinode.sh + +.. note:: + + Information regarding benchmark setup: + + * By default, Llama 3.3 70B is fine-tuned using ``alpaca_dataset``. + * You can adjust the torchtune `YAML configuration file + `__ + if you're using a different model. + * The number of nodes and other parameters can be tuned in the SLURM script ``Torchtune_Multinode.sh``. + * Set the ``mounting_paths`` inside the SLURM script. + +Once the run is finished, you can find the log files in the ``result_torchtune/`` directory. Further reading =============== From 94476f34ca445c38417e37dd98215f4def28516a Mon Sep 17 00:00:00 2001 From: Joseph Macaranas <145489236+jayhawk-commits@users.noreply.github.com> Date: Mon, 8 Sep 2025 11:32:10 -0400 Subject: [PATCH 41/81] [External CI] Add amdgpu deps to rocpydecode pipeline (#5267) --- .azuredevops/components/rocDecode.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.azuredevops/components/rocDecode.yml b/.azuredevops/components/rocDecode.yml index f71c3cf48..3b4bc6a71 100644 --- a/.azuredevops/components/rocDecode.yml +++ b/.azuredevops/components/rocDecode.yml @@ -8,6 +8,9 @@ parameters: - name: checkoutRef type: string default: '' +- name: rocPyDecodeRepo + type: string + default: rocpydecode_repo # monorepo related parameters - name: sparseCheckoutDir type: string @@ -207,7 +210,7 @@ jobs: - ${{ if not(and(parameters.unifiedBuild, eq(component.skipUnifiedBuild, 'true'))) }}: - template: /.azuredevops/components/${{ component.name }}.yml@pipelines_repo parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} + checkoutRepo: ${{ parameters.rocPyDecodeRepo }} sparseCheckoutDir: ${{ component.sparseCheckoutDir }} buildDependsOn: ${{ component.buildDependsOn }} downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }}+${{ parameters.componentName }} From 4f531836966339d217007429da9a7b0d2ccb8496 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Mon, 8 Sep 2025 21:42:56 -0400 Subject: [PATCH 42/81] docs: Add JAX MaxText benchmark v25.7 (#5182) * Update previous versions * Add data file * fix filename and anchors * add templates * update .wordlist.txt * Update template and data add missing step fix fmt * update template * fix data * add jax 0.6.0 * update history * update quantized training note --- .wordlist.txt | 2 + .../jax-maxtext-benchmark-models.yaml | 72 +++ .../training/benchmark-docker/jax-maxtext.rst | 481 ++++++++++-------- .../previous-versions/jax-maxtext-history.rst | 13 +- .../previous-versions/jax-maxtext-v25.4.rst | 2 +- .../previous-versions/jax-maxtext-v25.5.rst | 385 ++++++++++++++ 6 files changed, 734 insertions(+), 221 deletions(-) create mode 100644 docs/data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml create mode 100644 docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst diff --git a/.wordlist.txt b/.wordlist.txt index 4eb5df599..289fc276e 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -293,6 +293,7 @@ Multicore Multithreaded MyEnvironment MyST +NANOO NBIO NBIOs NCCL @@ -742,6 +743,7 @@ logits lossy macOS matchers +maxtext megatron microarchitecture migraphx diff --git a/docs/data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml new file mode 100644 index 000000000..5ca21898c --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml @@ -0,0 +1,72 @@ +dockers: + - pull_tag: rocm/jax-training:maxtext-v25.7 + docker_hub_url: https://hub.docker.com/layers/rocm/jax-training/maxtext-v25.7/images/sha256-45f4c727d4019a63fc47313d3a5f5a5105569539294ddfd2d742218212ae9025 + components: + ROCm: 6.4.1 + JAX: 0.5.0 + Python: 3.10.12 + Transformer Engine: 2.1.0+90d703dd + hipBLASLt: 1.x.x + - pull_tag: rocm/jax-training:maxtext-v25.7-jax060 + docker_hub_url: https://hub.docker.com/layers/rocm/jax-training/maxtext-v25.7/images/sha256-45f4c727d4019a63fc47313d3a5f5a5105569539294ddfd2d742218212ae9025 + components: + ROCm: 6.4.1 + JAX: 0.6.0 + Python: 3.10.12 + Transformer Engine: 2.1.0+90d703dd + hipBLASLt: 1.1.0-499ece1c21 +model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.3 70B + mad_tag: jax_maxtext_train_llama-3.3-70b + model_repo: Llama-3.3-70B + precision: bf16 + doc_options: ["single-node"] + - model: Llama 3.1 8B + mad_tag: jax_maxtext_train_llama-3.1-8b + model_repo: Llama-3.1-8B + precision: bf16 + doc_options: ["single-node"] + - model: Llama 3.1 70B + mad_tag: jax_maxtext_train_llama-3.1-70b + model_repo: Llama-3.1-70B + precision: bf16 + doc_options: ["single-node"] + - model: Llama 3 8B + mad_tag: jax_maxtext_train_llama-3-8b + multinode_training_script: llama3_8b_multinode.sh + doc_options: ["multi-node"] + - model: Llama 3 70B + mad_tag: jax_maxtext_train_llama-3-70b + multinode_training_script: llama3_70b_multinode.sh + doc_options: ["multi-node"] + - model: Llama 2 7B + mad_tag: jax_maxtext_train_llama-2-7b + model_repo: Llama-2-7B + precision: bf16 + multinode_training_script: llama2_7b_multinode.sh + doc_options: ["single-node", "multi-node"] + - model: Llama 2 70B + mad_tag: jax_maxtext_train_llama-2-70b + model_repo: Llama-2-70B + precision: bf16 + multinode_training_script: llama2_70b_multinode.sh + doc_options: ["single-node", "multi-node"] + - group: DeepSeek + tag: deepseek + models: + - model: DeepSeek-V2-Lite (16B) + mad_tag: jax_maxtext_train_deepseek-v2-lite-16b + model_repo: DeepSeek-V2-lite + precision: bf16 + doc_options: ["single-node"] + - group: Mistral AI + tag: mistral + models: + - model: Mixtral 8x7B + mad_tag: jax_maxtext_train_mixtral-8x7b + model_repo: Mixtral-8x7B + precision: bf16 + doc_options: ["single-node"] diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst index bb364e42a..a85f5af56 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst @@ -2,9 +2,9 @@ :description: How to train a model using JAX MaxText for ROCm. :keywords: ROCm, AI, LLM, train, jax, torch, Llama, flux, tutorial, docker -************************************** -Training a model with MaxText for ROCm -************************************** +****************************************** +Training a model with JAX MaxText for ROCm +****************************************** MaxText is a high-performance, open-source framework built on the Google JAX machine learning library to train LLMs at scale. The MaxText framework for @@ -12,70 +12,108 @@ ROCm is an optimized fork of the upstream ``__ enabling efficient AI workloads on AMD MI300X series accelerators. -The MaxText for ROCm training Docker (``rocm/jax-training:maxtext-v25.5``) image +The MaxText for ROCm training Docker image provides a prebuilt environment for training on AMD Instinct MI300X and MI325X accelerators, including essential components like JAX, XLA, ROCm libraries, and MaxText utilities. It includes the following software components: -+--------------------------+--------------------------------+ -| Software component | Version | -+==========================+================================+ -| ROCm | 6.3.4 | -+--------------------------+--------------------------------+ -| JAX | 0.4.35 | -+--------------------------+--------------------------------+ -| Python | 3.10.12 | -+--------------------------+--------------------------------+ -| Transformer Engine | 1.12.0.dev0+b8b92dc | -+--------------------------+--------------------------------+ -| hipBLASLt | 0.13.0-ae9c477a | -+--------------------------+--------------------------------+ +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml -Supported features and models -============================= + {% set dockers = data.dockers %} + .. tab-set:: -MaxText provides the following key features to train large language models efficiently: + {% for docker in dockers %} + {% set jax_version = docker.components["JAX"] %} + + .. tab-item:: JAX {{ jax_version }} + :sync: {{ docker.pull_tag }} + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + + {% endfor %} + {% if jax_version == "0.6.0" %} + .. note:: + + Shardy is a new config in JAX 0.6.0. You might get related errors if it's + not configured correctly. For now you can turn it off by setting + ``shardy=False`` during the training run. You can also follow the `migration + guide `__ to enable + it. + + The provided multi-node training scripts in this documentation are + not currently supported with JAX 0.6.0. For multi-node training, use the JAX 0.5.0 + Docker image. + {% endif %} + + {% endfor %} + +MaxText with on ROCm provides the following key features to train large language models efficiently: - Transformer Engine (TE) -- Flash Attention (FA) 3 +- Flash Attention (FA) 3 -- with or without sequence input packing - GEMM tuning - Multi-node support -.. _amd-maxtext-model-support: +- NANOO FP8 quantization support -The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. +.. _amd-maxtext-model-support-v257: -* Llama 3.3 70B +Supported models +================ -* Llama 3.1 8B +The following models are pre-optimized for performance on AMD Instinct MI300 +series accelerators. Some instructions, commands, and available training +configurations in this documentation might vary by model -- select one to get +started. -* Llama 3.1 70B +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml -* Llama 3 8B + {% set model_groups = data.model_groups %} + .. raw:: html -* Llama 3 70B +
+
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
-* Llama 2 7B - -* Llama 2 70B - -* DeepSeek-V2-Lite +
+
Model variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
.. note:: Some models, such as Llama 3, require an external license agreement through a third party (for example, Meta). -Unsupported features --------------------- - -Currently, MaxText's default packed input format is not supported. Using this format -with the current Docker image results in incorrect attention calculations -across different input sequences. Support for packed input format is planned for a future release. - System validation ================= @@ -98,14 +136,14 @@ This Docker image is optimized for specific model configurations outlined as follows. Performance can vary for other training workloads, as AMD doesn’t validate configurations and run conditions outside those described. -.. _amd-maxtext-multi-node-setup: +.. _amd-maxtext-multi-node-setup-v257: Multi-node setup ---------------- For multi-node environments, ensure you have all the necessary packages for your network device, such as, RDMA. If you're not using a multi-node setup -with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. +with RDMA, skip ahead to :ref:`amd-maxtext-get-started-v257`. 1. Install the following packages to build and install the RDMA driver. @@ -180,196 +218,203 @@ with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. # If using Mellanox NIC export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 -.. _amd-maxtext-download-docker: +.. _amd-maxtext-get-started-v257: -Pull the Docker image ---------------------- +Benchmarking +============ -1. Use the following command to pull the Docker image from Docker Hub. +Once the setup is complete, choose between two options to reproduce the +benchmark results: - .. code-block:: shell +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml - docker pull rocm/jax-training:maxtext-v25.5 + .. _vllm-benchmark-mad: -2. Use the following command to launch the Docker container. Note that the benchmarking scripts - used in the :ref:`following section ` automatically launch the Docker container - and execute the benchmark. + {% set dockers = data.dockers %} + {% set model_groups = data.model_groups %} + {% for model_group in model_groups %} + {% for model in model_group.models %} - .. code-block:: shell + .. container:: model-doc {{model.mad_tag}} - docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME/.ssh:/root/.ssh --shm-size 128G --name maxtext_training rocm/jax-training:maxtext-v25.5 + .. tab-set:: -.. _amd-maxtext-get-started: + {% if model.mad_tag and "single-node" in model.doc_options %} + .. tab-item:: MAD-integrated benchmarking -Getting started + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt + + 2. Use this command to run the performance benchmark test on the {{ model.model }} model + using one GPU with the :literal:`{{model.precision}}` data type on the host machine. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{model.mad_tag}} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 + + MAD launches a Docker container with the name + ``container_ci-{{model.mad_tag}}``. The latency and throughput reports of the + model are collected in the following path: ``~/MAD/perf.csv/``. + {% endif %} + + .. tab-item:: Standalone benchmarking + + .. rubric:: Download the Docker image and required scripts + + Run the JAX MaxText benchmark tool independently by starting the + Docker container as shown in the following snippet. + + .. tab-set:: + {% for docker in dockers %} + {% set jax_version = docker.components["JAX"] %} + + .. tab-item:: JAX {{ jax_version }} + :sync: {{ docker.pull_tag }} + + .. code-block:: shell + + docker pull {{ docker.pull_tag }} + {% endfor %} + + {% if model.model_repo and "single-node" in model.doc_options %} + .. rubric:: Single node training + + 1. Set up environment variables. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN= + export HF_HOME= + + ``MAD_SECRETS_HFTOKEN`` is your Hugging Face access token to access models, tokenizers, and data. + See `User access tokens `__. + + ``HF_HOME`` is where ``huggingface_hub`` will store local data. See `huggingface_hub CLI `__. + If you already have downloaded or cached Hugging Face artifacts, set this variable to that path. + Downloaded files typically get cached to ``~/.cache/huggingface``. + + 2. Launch the Docker container. + + .. tab-set:: + {% for docker in dockers %} + {% set jax_version = docker.components["JAX"] %} + + .. tab-item:: JAX {{ jax_version }} + :sync: {{ docker.pull_tag }} + + .. code-block:: shell + + docker run -it \ + --device=/dev/dri \ + --device=/dev/kfd \ + --network host \ + --ipc host \ + --group-add video \ + --cap-add=SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --privileged \ + -v $HOME:$HOME \ + -v $HOME/.ssh:/root/.ssh \ + -v $HF_HOME:/hf_cache \ + -e HF_HOME=/hf_cache \ + -e MAD_SECRETS_HFTOKEN=$MAD_SECRETS_HFTOKEN + --shm-size 64G \ + --name training_env \ + {{ docker.pull_tag }} + {% endfor %} + + 3. In the Docker container, clone the ROCm MAD repository and navigate to the + benchmark scripts directory at ``MAD/scripts/jax-maxtext``. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD/scripts/jax-maxtext + + 4. Run the setup scripts to install libraries and datasets needed + for benchmarking. + + .. code-block:: shell + + ./jax-maxtext_benchmark_setup.sh -m {{ model.model_repo }} + + 5. To run the training benchmark without quantization, use the following command: + + .. code-block:: shell + + ./jax-maxtext_benchmark_report.sh -m {{ model.model_repo }} + + For quantized training, use the following command: + + .. code-block:: shell + + ./jax-maxtext_benchmark_report.sh -m {{ model.model_repo }} -q nanoo_fp8 + + .. important:: + + Quantized training is not supported with the JAX 0.6.0 Docker image; support + will be added in a future release. For quantized training, use the JAX 0.5.0 + Docker image: ``rocm/jax-training:maxtext-v25.7``. + + {% endif %} + {% if model.multinode_training_script and "multi-node" in model.doc_options %} + .. rubric:: Multi-node training + + The following examples use SLURM to run on multiple nodes. + + .. note:: + + The following scripts will launch the Docker container and run the + benchmark. Run them outside of any Docker container. + + 1. Make sure ``$HF_HOME`` is set before running the test. See + `ROCm benchmarking `__ + for more details on downloading the Llama models before running the + benchmark. + + 2. To run multi-node training for {{ model.model }}, + use the + `multi-node training script `__ + under the ``scripts/jax-maxtext/gpu-rocm/`` directory. + + 3. Run the multi-node training benchmark script. + + .. code-block:: shell + + sbatch -N {{ model.multinode_training_script }} + + {% else %} + .. rubric:: Multi-node training + + For multi-node training examples, choose a model from :ref:`amd-maxtext-model-support-v257` + with an available `multi-node training script `__. + {% endif %} + {% endfor %} + {% endfor %} + +Further reading =============== -The following examples demonstrate how to get started with single node -and multi-node training using the benchmarking scripts provided at -``__. +- See the ROCm/maxtext benchmarking README at ``__. -.. important:: +- To learn more about MAD and the ``madengine`` CLI, see the `MAD usage guide `__. - The provided scripts launch a Docker container and execute a benchmark. Ensure you run these commands outside of any existing Docker container. +- To learn more about system settings and management practices to configure your system for + AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. -Before running any benchmarks, ensure the ``$HF_HOME`` environment variable is -set correctly and points to your Hugging Face cache directory. Refer to the -README at ``__ -for more detailed instructions. - -Single node training benchmarking examples ------------------------------------------- - -* Example 1: Single node training with Llama 2 7B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_7b.sh - -* Example 2: Single node training with Llama 2 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_70b.sh - -* Example 3: Single node training with Llama 3 8B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_8b.sh - -* Example 4: Single node training with Llama 3 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_70b.sh - -* Example 5: Single node training with Llama 3.3 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3.3_70b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3.3_70b.sh - -* Example 6: Single node training with DeepSeek V2 16B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/deepseek_v2_16b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./deepseek_v2_16b.sh - - .. note:: - - The reported TFLOP/s by MaxText for DeepSeek is not accurate. Use - the tokens/s as a performance indicator. - -Multi-node training benchmarking examples ------------------------------------------ - -The following examples use SLURM for running on multiple nodes -- the commands might need to be adjusted for your -own cluster setup. - -* Example 1: Multi-node training with Llama 2 7B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama2_7b_multinode.sh - -* Example 2: Multi-node training with Llama 2 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama2_70b_multinode.sh - -* Example 3: Multi-node training with Llama 3 8B model - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama3_8b_multinode.sh - -* Example 4: Multi-node training with Llama 3 70B model - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama3_70b_multinode.sh +- For a list of other ready-made Docker images for AI with ROCm, see + `AMD Infinity Hub `_. Previous versions ================= diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst index b67d1ac3a..e4d039356 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst @@ -17,12 +17,21 @@ previous releases of the ``ROCm/jax-training`` Docker image on `Docker Hub ` + * `Docker Hub (JAX 0.6.0) `__ + * `Docker Hub (JAX 0.5.0) `__ + + * - 25.5 - * ROCm 6.3.4 * JAX 0.4.35 - - * :doc:`Documentation <../jax-maxtext>` + * :doc:`Documentation ` * `Docker Hub `__ * - 25.4 diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst index 03836c9fc..3fe728c35 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst @@ -51,7 +51,7 @@ MaxText provides the following key features to train large language models effic - Multi-node support -.. _amd-maxtext-model-support: +.. _amd-maxtext-model-support-v254: The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst new file mode 100644 index 000000000..d5051d28c --- /dev/null +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst @@ -0,0 +1,385 @@ +:orphan: + +.. meta:: + :description: How to train a model using JAX MaxText for ROCm. + :keywords: ROCm, AI, LLM, train, jax, torch, Llama, flux, tutorial, docker + +************************************** +Training a model with MaxText for ROCm +************************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm JAX MaxText + training performance documentation. See :doc:`../jax-maxtext` for the latest version. + +MaxText is a high-performance, open-source framework built on the Google JAX +machine learning library to train LLMs at scale. The MaxText framework for +ROCm is an optimized fork of the upstream +``__ enabling efficient AI workloads +on AMD MI300X series accelerators. + +The MaxText for ROCm training Docker (``rocm/jax-training:maxtext-v25.5``) image +provides a prebuilt environment for training on AMD Instinct MI300X and MI325X accelerators, +including essential components like JAX, XLA, ROCm libraries, and MaxText utilities. +It includes the following software components: + ++--------------------------+--------------------------------+ +| Software component | Version | ++==========================+================================+ +| ROCm | 6.3.4 | ++--------------------------+--------------------------------+ +| JAX | 0.4.35 | ++--------------------------+--------------------------------+ +| Python | 3.10.12 | ++--------------------------+--------------------------------+ +| Transformer Engine | 1.12.0.dev0+b8b92dc | ++--------------------------+--------------------------------+ +| hipBLASLt | 0.13.0-ae9c477a | ++--------------------------+--------------------------------+ + +Supported features and models +============================= + +MaxText provides the following key features to train large language models efficiently: + +- Transformer Engine (TE) + +- Flash Attention (FA) 3 + +- GEMM tuning + +- Multi-node support + +.. _amd-maxtext-model-support-v255: + +The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. + +* Llama 3.3 70B + +* Llama 3.1 8B + +* Llama 3.1 70B + +* Llama 3 8B + +* Llama 3 70B + +* Llama 2 7B + +* Llama 2 70B + +* DeepSeek-V2-Lite + +.. note:: + + Some models, such as Llama 3, require an external license agreement through + a third party (for example, Meta). + +Unsupported features +-------------------- + +Currently, MaxText's default packed input format is not supported. Using this format +with the current Docker image results in incorrect attention calculations +across different input sequences. Support for packed input format is planned for a future release. + +System validation +================= + +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. + +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. + +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. + +Environment setup +================= + +This Docker image is optimized for specific model configurations outlined +as follows. Performance can vary for other training workloads, as AMD +doesn’t validate configurations and run conditions outside those described. + +.. _amd-maxtext-multi-node-setup-v255: + +Multi-node setup +---------------- + +For multi-node environments, ensure you have all the necessary packages for +your network device, such as, RDMA. If you're not using a multi-node setup +with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. + +1. Install the following packages to build and install the RDMA driver. + + .. code-block:: shell + + sudo apt install iproute2 -y + sudo apt install -y linux-headers-"$(uname-r)" libelf-dev + sudo apt install -y gcc make libtool autoconf librdmacm-dev rdmacm-utils infiniband-diags ibverbs-utils perftest ethtool libibverbs-dev rdma-core strace libibmad5 libibnetdisc5 ibverbs-providers libibumad-dev libibumad3 libibverbs1 libnl-3-dev libnl-route-3-dev + + Refer to your NIC manufacturer's documentation for further steps on + compiling and installing the RoCE driver. For example, for Broadcom, + see `Compiling Broadcom NIC software from source `_ + in `Ethernet networking guide for AMD Instinct MI300X GPU clusters `_. + +2. Set the following environment variables. + + a. Master address + + Change ``localhost`` to the master node's resolvable hostname or IP address: + + .. code-block:: bash + + export MASTER_ADDR="${MASTER_ADDR:-localhost}" + + b. Number of nodes + + Set the number of nodes you want to train on (for example, ``2``, ``4``, or ``8``): + + .. code-block:: bash + + export NNODES="${NNODES:-1}" + + c. Node ranks + + Set the rank of each node (``0`` for master, ``1`` for the first worker node, and so on) + Node ranks should be unique across all nodes in the cluster. + + .. code-block:: bash + + export NODE_RANK="${NODE_RANK:-0}" + + d. Network interface + + Update the network interface in the script to match your system's network interface. To + find your network interface, run the following (outside of any Docker container): + + .. code-block:: bash + + ip a + + Look for an active interface with an IP address in the same subnet as + your other nodes. Then, update the following variable in the script, for + example: + + .. code-block:: bash + + export NCCL_SOCKET_IFNAME=ens50f0np0 + + This variable specifies which network interface to use for inter-node communication. + Setting this variable to the incorrect interface can result in communication failures + or significantly reduced performance. + + e. RDMA interface + + Ensure the :ref:`required packages ` are installed on all nodes. + Then, set the RDMA interfaces to use for communication. + + .. code-block:: bash + + # If using Broadcom NIC + export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 + # If using Mellanox NIC + export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 + +.. _amd-maxtext-download-docker-v255: + +Pull the Docker image +--------------------- + +1. Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull rocm/jax-training:maxtext-v25.5 + +2. Use the following command to launch the Docker container. Note that the benchmarking scripts + used in the :ref:`following section ` automatically launch the Docker container + and execute the benchmark. + + .. code-block:: shell + + docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME/.ssh:/root/.ssh --shm-size 128G --name maxtext_training rocm/jax-training:maxtext-v25.5 + +.. _amd-maxtext-get-started-v255: + +Getting started +=============== + +The following examples demonstrate how to get started with single node +and multi-node training using the benchmarking scripts provided at +``__. + +.. important:: + + The provided scripts launch a Docker container and execute a benchmark. Ensure you run these commands outside of any existing Docker container. + +Before running any benchmarks, ensure the ``$HF_HOME`` environment variable is +set correctly and points to your Hugging Face cache directory. Refer to the +README at ``__ +for more detailed instructions. + +Single node training benchmarking examples +------------------------------------------ + +* Example 1: Single node training with Llama 2 7B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_7b.sh + +* Example 2: Single node training with Llama 2 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_70b.sh + +* Example 3: Single node training with Llama 3 8B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_8b.sh + +* Example 4: Single node training with Llama 3 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_70b.sh + +* Example 5: Single node training with Llama 3.3 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3.3_70b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3.3_70b.sh + +* Example 6: Single node training with DeepSeek V2 16B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/deepseek_v2_16b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./deepseek_v2_16b.sh + + .. note:: + + The reported TFLOP/s by MaxText for DeepSeek is not accurate. Use + the tokens/s as a performance indicator. + +Multi-node training benchmarking examples +----------------------------------------- + +The following examples use SLURM for running on multiple nodes -- the commands might need to be adjusted for your +own cluster setup. + +* Example 1: Multi-node training with Llama 2 7B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama2_7b_multinode.sh + +* Example 2: Multi-node training with Llama 2 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama2_70b_multinode.sh + +* Example 3: Multi-node training with Llama 3 8B model + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama3_8b_multinode.sh + +* Example 4: Multi-node training with Llama 3 70B model + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama3_70b_multinode.sh + +Previous versions +================= + +See :doc:`jax-maxtext-history` to find documentation for previous releases +of the ``ROCm/jax-training`` Docker image. From db43d18c3725ba53c00544971139ba9b743f1536 Mon Sep 17 00:00:00 2001 From: anisha-amd Date: Tue, 9 Sep 2025 11:02:30 -0400 Subject: [PATCH 43/81] Docs: frameworks compatibility- ray and llama.cpp (#5273) --- .wordlist.txt | 1 + .../compatibility-matrix-historical-6.0.csv | 2 + docs/compatibility/compatibility-matrix.rst | 2 + .../llama-cpp-compatibility.rst | 151 ++++++++++++++++++ .../ml-compatibility/ray-compatibility.rst | 105 ++++++++++++ docs/conf.py | 2 + docs/how-to/deep-learning-rocm.rst | 22 +++ docs/sphinx/_toc.yml.in | 12 +- 8 files changed, 293 insertions(+), 4 deletions(-) create mode 100644 docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst create mode 100644 docs/compatibility/ml-compatibility/ray-compatibility.rst diff --git a/.wordlist.txt b/.wordlist.txt index 289fc276e..5370f4752 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -501,6 +501,7 @@ Unhandled VALU VBIOS VCN +verl's VGPR VGPRs VM diff --git a/docs/compatibility/compatibility-matrix-historical-6.0.csv b/docs/compatibility/compatibility-matrix-historical-6.0.csv index b8f7b6ba2..54f5ceb50 100644 --- a/docs/compatibility/compatibility-matrix-historical-6.0.csv +++ b/docs/compatibility/compatibility-matrix-historical-6.0.csv @@ -35,6 +35,8 @@ ROCm Version,6.4.3,6.4.2,6.4.1,6.4.0,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6 :doc:`DGL <../compatibility/ml-compatibility/dgl-compatibility>` [#dgl_compat]_,N/A,N/A,N/A,2.4.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A, :doc:`Megablocks <../compatibility/ml-compatibility/megablocks-compatibility>` [#megablocks_compat]_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.7.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A :doc:`Taichi <../compatibility/ml-compatibility/taichi-compatibility>` [#taichi_compat]_,N/A,N/A,N/A,N/A,N/A,1.8.0b1,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A + :doc:`Ray <../compatibility/ml-compatibility/ray-compatibility>` [#ray_compat]_,N/A,N/A,2.48.0.post0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A + :doc:`llama.cpp <../compatibility/ml-compatibility/llama-cpp-compatibility>` [#llama-cpp_compat]_,N/A,N/A,N/A,b5997,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A `ONNX Runtime `_,1.2,1.2,1.2,1.2,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1 ,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,, diff --git a/docs/compatibility/compatibility-matrix.rst b/docs/compatibility/compatibility-matrix.rst index 797e2894e..fb1ffad43 100644 --- a/docs/compatibility/compatibility-matrix.rst +++ b/docs/compatibility/compatibility-matrix.rst @@ -246,6 +246,8 @@ Expand for full historical view of: .. [#dgl_compat] DGL is only supported on ROCm 6.4.0. .. [#megablocks_compat] Megablocks is only supported on ROCm 6.3.0. .. [#taichi_compat] Taichi is only supported on ROCm 6.3.2. + .. [#ray_compat] Ray is only supported on ROCm 6.4.1. + .. [#llama-cpp_compat] llama.cpp is only supported on ROCm 6.4.0. .. [#kfd_support-past-60] As of ROCm 6.4.0, forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software is provided up to a year apart. For earlier ROCm releases, the compatibility is provided for +/- 2 releases. The tested user space versions on this page were accurate as of the time of initial ROCm release. For the most up-to-date information, see the latest version of this information at `User and kernel-space support matrix `_. .. [#ROCT-rocr-past-60] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package. diff --git a/docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst b/docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst new file mode 100644 index 000000000..fd1356d32 --- /dev/null +++ b/docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst @@ -0,0 +1,151 @@ +:orphan: + +.. meta:: + :description: llama.cpp deep learning framework compatibility + :keywords: GPU, GGML, llama.cpp compatibility + +.. version-set:: rocm_version latest + +******************************************************************************** +llama.cpp compatibility +******************************************************************************** + +`llama.cpp `__ is an open-source framework +for Large Language Model (LLM) inference that runs on both central processing units +(CPUs) and graphics processing units (GPUs). It is written in plain C/C++, providing +a simple, dependency-free setup. + +The framework supports multiple quantization options, from 1.5-bit to 8-bit integers, +to speed up inference and reduce memory usage. Originally built as a CPU-first library, +llama.cpp is easy to integrate with other programming environments and is widely +adopted across diverse platforms, including consumer devices. + +ROCm support for llama.cpp is upstreamed, and you can build the official source code +with ROCm support: + +- ROCm support for llama.cpp is hosted in the official `https://github.com/ROCm/llama.cpp + `_ repository. + +- Due to independent compatibility considerations, this location differs from the + `https://github.com/ggml-org/llama.cpp `_ upstream repository. + +- To install llama.cpp, use the prebuilt :ref:`Docker image `, + which includes ROCm, llama.cpp, and all required dependencies. + + - See the :doc:`ROCm llama.cpp installation guide ` + to install and get started. + + - See the `Installation guide `__ + in the upstream llama.cpp documentation. + +.. note:: + + llama.cpp is supported on ROCm 6.4.0. + +Supported devices +================================================================================ + +**Officially Supported**: AMD Instinct™ MI300X, MI210 + + +Use cases and recommendations +================================================================================ + +llama.cpp can be applied in a variety of scenarios, particularly when you need to meet one or more of the following requirements: + +- Plain C/C++ implementation with no external dependencies +- Support for 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory usage +- Custom HIP (Heterogeneous-compute Interface for Portability) kernels for running large language models (LLMs) on AMD GPUs (graphics processing units) +- CPU (central processing unit) + GPU (graphics processing unit) hybrid inference for partially accelerating models larger than the total available VRAM (video random-access memory) + +llama.cpp is also used in a range of real-world applications, including: + +- Games such as `Lucy's Labyrinth `__: + A simple maze game where AI-controlled agents attempt to trick the player. +- Tools such as `Styled Lines `__: + A proprietary, asynchronous inference wrapper for Unity3D game development, including pre-built mobile and web platform wrappers and a model example. +- Various other AI applications use llama.cpp as their inference engine; + for a detailed list, see the `user interfaces (UIs) section `__. + +Refer to the `AMD ROCm blog `_, +where you can search for llama.cpp examples and best practices to optimize your workloads on AMD GPUs. + +.. _llama-cpp-docker-compat: + +Docker image compatibility +================================================================================ + +.. |docker-icon| raw:: html + + + +AMD validates and publishes `ROCm llama.cpp Docker images `__ +with ROCm backends on Docker Hub. The following Docker image tags and associated +inventories were tested on `ROCm 6.4.0 `__. +Click |docker-icon| to view the image on Docker Hub. + +.. important:: + + Tag endings of ``_full``, ``_server``, and ``_light`` serve different purposes for entrypoints as follows: + + - Full: This image includes both the main executable file and the tools to convert ``LLaMA`` models into ``ggml`` and convert into 4-bit quantization. + - Server: This image only includes the server executable file. + - Light: This image only includes the main executable file. + +.. list-table:: + :header-rows: 1 + :class: docker-image-compatibility + + * - Full Docker + - Server Docker + - Light Docker + - llama.cpp + - Ubuntu + + * - .. raw:: html + + rocm/llama.cpp + - .. raw:: html + + rocm/llama.cpp + - .. raw:: html + + rocm/llama.cpp + - `b5997 `__ + - 24.04 + +Key ROCm libraries for llama.cpp +================================================================================ + +llama.cpp functionality on ROCm is determined by its underlying library +dependencies. These ROCm components affect the capabilities, performance, and +feature set available to developers. + +.. list-table:: + :header-rows: 1 + + * - ROCm library + - Version + - Purpose + - Usage + * - `hipBLAS `__ + - :version-ref:`hipBLAS rocm_version` + - Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for + matrix and vector operations. + - Supports operations such as matrix multiplication, matrix-vector + products, and tensor contractions. Utilized in both dense and batched + linear algebra operations. + * - `hipBLASLt `__ + - :version-ref:`hipBLASLt rocm_version` + - hipBLASLt is an extension of the hipBLAS library, providing additional + features like epilogues fused into the matrix multiplication kernel or + use of integer tensor cores. + - By setting the flag ``ROCBLAS_USE_HIPBLASLT``, you can dispatch hipblasLt + kernels where possible. + * - `rocWMMA `__ + - :version-ref:`rocWMMA rocm_version` + - Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix + multiplication (GEMM) and accumulation operations with mixed precision + support. + - Can be used to enhance the flash attention performance on AMD compute, by enabling + the flag during compile time. \ No newline at end of file diff --git a/docs/compatibility/ml-compatibility/ray-compatibility.rst b/docs/compatibility/ml-compatibility/ray-compatibility.rst new file mode 100644 index 000000000..c5a2ed39f --- /dev/null +++ b/docs/compatibility/ml-compatibility/ray-compatibility.rst @@ -0,0 +1,105 @@ +:orphan: + +.. meta:: + :description: Ray deep learning framework compatibility + :keywords: GPU, Ray compatibility + +.. version-set:: rocm_version latest + +******************************************************************************* +Ray compatibility +******************************************************************************* + +Ray is a unified framework for scaling AI and Python applications from your laptop +to a full cluster, without changing your code. Ray consists of `a core distributed +runtime `_ and a set of +`AI libraries `_ for +simplifying machine learning computations. + +Ray is a general-purpose framework that runs many types of workloads efficiently. +Any Python application can be scaled with Ray, without extra infrastructure. + +ROCm support for Ray is upstreamed, and you can build the official source code +with ROCm support: + +- ROCm support for Ray is hosted in the official `https://github.com/ROCm/ray + `_ repository. + +- Due to independent compatibility considerations, this location differs from the + `https://github.com/ray-project/ray `_ upstream repository. + +- To install Ray, use the prebuilt :ref:`Docker image ` + which includes ROCm, Ray, and all required dependencies. + + - See the :doc:`ROCm Ray installation guide ` + for instructions to get started. + + - See the `Installation section `_ + in the upstream Ray documentation. + + - The Docker image provided is based on the upstream Ray `Daily Release (Nightly) wheels `__ + corresponding to commit `005c372 `__. + +.. note:: + + Ray is supported on ROCm 6.4.1. + +Supported devices +================================================================================ + +**Officially Supported**: AMD Instinct™ MI300X, MI210 + + +Use cases and recommendations +================================================================================ + +* The `Reinforcement Learning from Human Feedback on AMD GPUs with verl and ROCm + Integration `__ + blog provides an overview of Volcano Engine Reinforcement Learning (verl) + for large language models (LLMs) and discusses its benefits in large-scale + reinforcement learning from human feedback (RLHF). It uses Ray as part of a + hybrid orchestration engine to schedule and coordinate training and inference + tasks in parallel, enabling optimized resource utilization and potential overlap + between these phases. This dynamic resource allocation strategy significantly + improves overall system efficiency. The blog presents verl’s performance results, + focusing on throughput and convergence accuracy achieved on AMD Instinct™ MI300X + GPUs. Follow this guide to get started with verl on AMD Instinct GPUs and + accelerate your RLHF training with ROCm-optimized performance. + +For more use cases and recommendations, see the AMD GPU tabs in the `Accelerator Support +topic `_ +of the Ray core documentation and refer to the `AMD ROCm blog `_, +where you can search for Ray examples and best practices to optimize your workloads on AMD GPUs. + +.. _ray-docker-compat: + +Docker image compatibility +================================================================================ + +.. |docker-icon| raw:: html + + + +AMD validates and publishes ready-made `ROCm Ray Docker images `__ +with ROCm backends on Docker Hub. The following Docker image tags and +associated inventories represent the latest Ray version from the official Docker Hub and are validated for +`ROCm 6.4.1 `_. Click the |docker-icon| +icon to view the image on Docker Hub. + +.. list-table:: + :header-rows: 1 + :class: docker-image-compatibility + + * - Docker image + - Ray + - Pytorch + - Ubuntu + - Python + + * - .. raw:: html + + rocm/ray + - `2.48.0.post0 `_ + - 2.6.0+git684f6f2 + - 24.04 + - `3.12.10 `_ diff --git a/docs/conf.py b/docs/conf.py index 6e7fa5e61..f852b6697 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -108,6 +108,8 @@ article_pages = [ {"file": "compatibility/ml-compatibility/dgl-compatibility", "os": ["linux"]}, {"file": "compatibility/ml-compatibility/megablocks-compatibility", "os": ["linux"]}, {"file": "compatibility/ml-compatibility/taichi-compatibility", "os": ["linux"]}, + {"file": "compatibility/ml-compatibility/ray-compatibility", "os": ["linux"]}, + {"file": "compatibility/ml-compatibility/llama-cpp-compatibility", "os": ["linux"]}, {"file": "how-to/deep-learning-rocm", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/index", "os": ["linux"]}, diff --git a/docs/how-to/deep-learning-rocm.rst b/docs/how-to/deep-learning-rocm.rst index fb1d55a3c..accb2e546 100644 --- a/docs/how-to/deep-learning-rocm.rst +++ b/docs/how-to/deep-learning-rocm.rst @@ -110,6 +110,28 @@ The table below summarizes information about ROCm-enabled deep learning framewor + * - `Ray `__ + - .. raw:: html + + + - + - `Docker image `__ + - `Wheels package `__ + - `ROCm Base Docker image `__ + - .. raw:: html + + + + * - `llama.cpp `__ + - .. raw:: html + + + - + - `Docker image `__ + - .. raw:: html + + + Learn how to use your ROCm deep learning environment for training, fine-tuning, inference, and performance optimization through the following guides. diff --git a/docs/sphinx/_toc.yml.in b/docs/sphinx/_toc.yml.in index 1bb9177f0..732aab15e 100644 --- a/docs/sphinx/_toc.yml.in +++ b/docs/sphinx/_toc.yml.in @@ -32,19 +32,23 @@ subtrees: - file: compatibility/ml-compatibility/pytorch-compatibility.rst title: PyTorch compatibility - file: compatibility/ml-compatibility/tensorflow-compatibility.rst - title: TensorFlow compatibility + title: TensorFlow compatibility - file: compatibility/ml-compatibility/jax-compatibility.rst title: JAX compatibility - file: compatibility/ml-compatibility/verl-compatibility.rst - title: verl compatibility + title: verl compatibility - file: compatibility/ml-compatibility/stanford-megatron-lm-compatibility.rst title: Stanford Megatron-LM compatibility - file: compatibility/ml-compatibility/dgl-compatibility.rst - title: DGL compatibility + title: DGL compatibility - file: compatibility/ml-compatibility/megablocks-compatibility.rst title: Megablocks compatibility - file: compatibility/ml-compatibility/taichi-compatibility.rst - title: Taichi compatibility + title: Taichi compatibility + - file: compatibility/ml-compatibility/ray-compatibility.rst + title: Ray compatibility + - file: compatibility/ml-compatibility/llama-cpp-compatibility.rst + title: llama.cpp compatibility - file: how-to/build-rocm.rst title: Build ROCm from source From f25e27acf0bb312001b1f157520c00a10a59b75c Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Tue, 9 Sep 2025 12:22:04 -0400 Subject: [PATCH 44/81] Update roctracer pipeline ID and branch --- .azuredevops/templates/steps/dependencies-rocm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 86d1b58e9..f62b973df 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -251,8 +251,8 @@ parameters: developBranch: develop hasGpuTarget: true roctracer: - pipelineId: 141 - developBranch: amd-staging + pipelineId: 331 + developBranch: develop hasGpuTarget: true rocWMMA: pipelineId: 109 From 985786e98d68374169391e93a23626c2afcf2a07 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Tue, 9 Sep 2025 15:22:50 -0400 Subject: [PATCH 45/81] Add sqlalchemy to dependencies in rocprofiler-compute --- .azuredevops/components/rocprofiler-compute.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index d15414469..bccb51f67 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -55,6 +55,7 @@ parameters: - pymongo - pyyaml - setuptools + - sqlalchemy - tabulate - textual - textual_plotext From 3c37ae88f077849bc3d16e55c15250cde6516b82 Mon Sep 17 00:00:00 2001 From: Ibrahim Wani <113864060+ibrahimw1@users.noreply.github.com> Date: Tue, 9 Sep 2025 15:13:54 -0600 Subject: [PATCH 46/81] Add origami CI pipelines (#5256) * Add origami yaml pipeline. * Unindent lines. * Add cmake dependency step to origami yml. * Add pybind dep * Fix pipeline failures. * Quick fix * Fix pybind11 dep for almalinux * Fix pybind11 dep for almalinux again * Test * [Ex CI] don't create symlink if more than one sparse checkout dir * hipBLASLt multi sparse * Replace pybind with nanobind. * Quick fix * Testing nanobind install in pipelines * Run origami binding tests * Change build path for tests * Change build path for tests again * Add missing dep for CI * Add archs to buildJobs * Fix CI error. * Test * Test job target * Adding job target to hipblaslt dependant builds * Check devices on machine * Add gpu to pipeline * Add more gpu targets * test * Add test job to origami * Update test jobs * Finding test dir * Fix sparse checkout * Find build dir * Try to find build dir * Clean up * Test * Change test dir * Build origami in test job * Try removing job.target from params * Package bindings in build artifacts * Download build as artifact. * Comment out block * Fix checkout in test job * Test1 * Echo to list dir * Sparse checkout origami/python * Download python bindings as artifact * Try ctest instead of running test files directly * Only download artifacts for ubuntu * Add missing cd * Run individual tests not ctest. * Fix hipblaslt build failures * Resolve more ci failures in hipblaslt * Add old changes back in * Fix hipblaslt ci errors * Clean up * Add nanobind to array * Add nanobind to array correctly * Remove nanobind install script * Quick fix * Add pip module installs to test job --------- Co-authored-by: Daniel Su --- .azuredevops/components/hipBLASLt.yml | 4 +- .azuredevops/components/origami.yml | 236 ++++++++++++++++++++++++++ 2 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 .azuredevops/components/origami.yml diff --git a/.azuredevops/components/hipBLASLt.yml b/.azuredevops/components/hipBLASLt.yml index b2633e84d..6364380a5 100644 --- a/.azuredevops/components/hipBLASLt.yml +++ b/.azuredevops/components/hipBLASLt.yml @@ -178,7 +178,7 @@ jobs: mkdir -p $(Agent.BuildDirectory)/temp-deps cd $(Agent.BuildDirectory)/temp-deps # position-independent LAPACK is required for almalinux8 builds - cmake -DBUILD_GTEST=OFF -DBUILD_LAPACK=ON -DCMAKE_POSITION_INDEPENDENT_CODE=ON $(Agent.BuildDirectory)/s/deps + cmake -DBUILD_GTEST=OFF -DBUILD_LAPACK=ON -DCMAKE_POSITION_INDEPENDENT_CODE=ON $(Agent.BuildDirectory)/sparse/projects/hipblaslt/deps make -j sudo make install - script: | @@ -197,6 +197,8 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} + cmakeSourceDir: $(Agent.BuildDirectory)/sparse/projects/hipblaslt + cmakeBuildDir: $(Agent.BuildDirectory)/sparse/projects/hipblaslt/build extraBuildFlags: >- -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/vendor -DCMAKE_INCLUDE_PATH=$(Agent.BuildDirectory)/rocm/llvm/include diff --git a/.azuredevops/components/origami.yml b/.azuredevops/components/origami.yml new file mode 100644 index 000000000..b55cd67aa --- /dev/null +++ b/.azuredevops/components/origami.yml @@ -0,0 +1,236 @@ +parameters: +- name: componentName + type: string + default: origami +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false +# set to true if doing full build of ROCm stack +# and dependencies are pulled from same pipeline +- name: aggregatePipeline + type: boolean + default: false +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + - wget + - python3 + - python3-dev + - python3-pip +- name: pipModules + type: object + default: + - nanobind>=2.0.0 +- name: rocmDependencies + type: object + default: + - clr + - llvm-project + - rocm-cmake + - rocminfo + - ROCR-Runtime + - rocprofiler-register +- name: rocmTestDependencies + type: object + default: + - clr + - llvm-project + - rocm-cmake + - rocminfo + - ROCR-Runtime + - rocprofiler-register + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt } + - { os: almalinux8, packageManager: dnf } + testJobs: + - { os: ubuntu2204, packageManager: apt, target: gfx942 } + - { os: ubuntu2204, packageManager: apt, target: gfx90a } +- name: downstreamComponentMatrix + type: object + default: + - hipBLASLt: + name: hipBLASLt + sparseCheckoutDir: projects/hipblaslt + skipUnifiedBuild: 'false' + buildDependsOn: + - origami_build + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: origami_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + - name: ROCM_PATH + value: $(Agent.BuildDirectory)/rocm + pool: + vmImage: ${{ variables.BASE_BUILD_POOL }} + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmDependencies }} + os: ${{ job.os }} + aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + extraBuildFlags: >- + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++ + -DORIGAMI_BUILD_SHARED_LIBS=ON + -DORIGAMI_ENABLE_PYTHON=ON + -DORIGAMI_BUILD_TESTING=ON + -GNinja + - ${{ if ne(job.os, 'almalinux8') }}: + - task: PublishPipelineArtifact@1 + displayName: 'Publish Build Directory Artifact' + inputs: + targetPath: '$(Agent.BuildDirectory)/s/build' + artifact: '${{ parameters.componentName }}_${{ job.os }}_build_dir' + publishLocation: 'pipeline' + - task: PublishPipelineArtifact@1 + displayName: 'Publish Python Source Artifact' + inputs: + targetPath: '$(Agent.BuildDirectory)/s/python' + artifact: '${{ parameters.componentName }}_${{ job.os }}_python_src' + publishLocation: 'pipeline' + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml + parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} + componentName: ${{ parameters.componentName }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml + +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: origami_test_${{ job.os }}_${{ job.target }} + timeoutInMinutes: 120 + dependsOn: origami_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + preTargetFilter: ${{ parameters.componentName }} + os: ${{ job.os }} + - task: DownloadPipelineArtifact@2 + displayName: 'Download Build Directory Artifact' + inputs: + artifact: '${{ parameters.componentName }}_${{ job.os }}_build_dir' + path: '$(Agent.BuildDirectory)/s/build' + - task: DownloadPipelineArtifact@2 + displayName: 'Download Python Source Artifact' + inputs: + artifact: '${{ parameters.componentName }}_${{ job.os }}_python_src' + path: '$(Agent.BuildDirectory)/s/python' + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + os: ${{ job.os }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - script: | + export PYTHONPATH=$(Agent.BuildDirectory)/s/build/python:$PYTHONPATH + + echo "--- Running origami_test.py ---" + python3 $(Agent.BuildDirectory)/s/python/origami_test.py + + echo "--- Running origami_grid_test.py ---" + python3 $(Agent.BuildDirectory)/s/python/origami_grid_test.py + displayName: 'Run Python Binding Tests' + condition: succeeded() + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} + +- ${{ if parameters.triggerDownstreamJobs }}: + - ${{ each component in parameters.downstreamComponentMatrix }}: + - ${{ if not(and(parameters.unifiedBuild, eq(component.skipUnifiedBuild, 'true'))) }}: + - template: /.azuredevops/components/${{ component.name }}.yml@pipelines_repo + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ component.sparseCheckoutDir }} + buildDependsOn: ${{ component.buildDependsOn }} + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }}+${{ parameters.componentName }} + triggerDownstreamJobs: true + unifiedBuild: ${{ parameters.unifiedBuild }} From 05a66f75fea71fe19ba29f694c7c22854187e334 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Tue, 9 Sep 2025 17:41:11 -0400 Subject: [PATCH 47/81] add qwen3 30b a3b to vllm-benchmark-models (#5280) --- .../how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml index 714534ef1..a522e61a6 100644 --- a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml @@ -78,7 +78,11 @@ vllm_benchmark: model_repo: Qwen/QwQ-32B url: https://huggingface.co/Qwen/QwQ-32B precision: float16 - tunableop: true + - model: Qwen3 30B A3B + mad_tag: pyt_vllm_qwen3-30b-a3b + model_repo: Qwen/Qwen3-30B-A3B + url: https://huggingface.co/Qwen/Qwen3-30B-A3B + precision: float16 - group: Microsoft Phi tag: phi models: From 68f505e375e9d0a7500aad927bcd77c6aea1b972 Mon Sep 17 00:00:00 2001 From: Pratik Basyal Date: Wed, 10 Sep 2025 10:07:55 -0400 Subject: [PATCH 48/81] Taichi removed (#5283) --- RELEASE.md | 1 - 1 file changed, 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index be1527030..327a74c0a 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -59,7 +59,6 @@ ROCm documentation continues to be updated to provide clearer and more comprehen * ROCm provides a comprehensive ecosystem for deep learning development. For more details, see [Deep learning frameworks for ROCm](https://rocm.docs.amd.com/en/docs-6.4.3/how-to/deep-learning-rocm.html). AMD ROCm adds support for the following deep learning frameworks: - * Taichi is an open-source, imperative, and parallel programming language designed for high-performance numerical computation. Embedded in Python, it leverages just-in-time (JIT) compilation frameworks such as LLVM to accelerate compute-intensive Python code by compiling it to native GPU or CPU instructions. It is currently supported on ROCm 6.3.2. For more information, see [Taichi compatibility](https://rocm.docs.amd.com/en/docs-6.4.3/compatibility/ml-compatibility/taichi-compatibility.html). * Megablocks is a light-weight library for mixture-of-experts (MoE) training. The core of the system is efficient "dropless-MoE" and standard MoE layers. Megablocks is integrated with Megatron-LM, where data and pipeline parallel training of MoEs is supported. It is currently supported on ROCm 6.3.0. For more information, see [Megablocks compatibility](https://rocm.docs.amd.com/en/docs-6.4.3/compatibility/ml-compatibility/megablocks-compatibility.html). * The [Data types and precision support](https://rocm.docs.amd.com/en/latest/reference/precision-support.html) topic now includes new hardware and library support information. From 3b5019e03f0bb35d41061d2061df0307e4107b2a Mon Sep 17 00:00:00 2001 From: Pratik Basyal Date: Wed, 10 Sep 2025 10:53:25 -0400 Subject: [PATCH 49/81] Minor correction (#5285) --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 327a74c0a..9d8835de8 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -57,7 +57,7 @@ ROCm documentation continues to be updated to provide clearer and more comprehen For more information about the changes, see [Changelog for the AI Developer Hub](https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/changelog.html). -* ROCm provides a comprehensive ecosystem for deep learning development. For more details, see [Deep learning frameworks for ROCm](https://rocm.docs.amd.com/en/docs-6.4.3/how-to/deep-learning-rocm.html). AMD ROCm adds support for the following deep learning frameworks: +* ROCm provides a comprehensive ecosystem for deep learning development. For more details, see [Deep learning frameworks for ROCm](https://rocm.docs.amd.com/en/docs-6.4.3/how-to/deep-learning-rocm.html). AMD ROCm adds support for the following deep learning framework: * Megablocks is a light-weight library for mixture-of-experts (MoE) training. The core of the system is efficient "dropless-MoE" and standard MoE layers. Megablocks is integrated with Megatron-LM, where data and pipeline parallel training of MoEs is supported. It is currently supported on ROCm 6.3.0. For more information, see [Megablocks compatibility](https://rocm.docs.amd.com/en/docs-6.4.3/compatibility/ml-compatibility/megablocks-compatibility.html). From daa0184d2e768a5e9e618b6e9ff1cc82b544e7ad Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Tue, 9 Sep 2025 16:21:53 -0400 Subject: [PATCH 50/81] [Ex CI] enable rocm-core monorepo --- .azuredevops/components/rocm-core.yml | 28 +++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/.azuredevops/components/rocm-core.yml b/.azuredevops/components/rocm-core.yml index f36252320..714518781 100644 --- a/.azuredevops/components/rocm-core.yml +++ b/.azuredevops/components/rocm-core.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocm-core - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -27,6 +46,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocm_core_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: ${{ if eq(job.os, 'ubuntu2404') }}: vmImage: 'ubuntu-24.04' @@ -50,8 +73,10 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -65,9 +90,12 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml From 0840c14b6dce9a337e350d4debd929a1deeb1b56 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 11:56:06 -0400 Subject: [PATCH 51/81] [Ex CI] update rocm-core pipeline ID to monorepo --- .azuredevops/templates/steps/dependencies-rocm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index f62b973df..cd68fe411 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -171,8 +171,8 @@ parameters: developBranch: develop hasGpuTarget: false rocm-core: - pipelineId: 103 - developBranch: master + pipelineId: 349 + developBranch: develop hasGpuTarget: false rocm-examples: pipelineId: 216 From 3ca9cb1fcc0914edddd153ee6a50bc11dd6ed247 Mon Sep 17 00:00:00 2001 From: anisha-amd Date: Wed, 10 Sep 2025 15:02:03 -0400 Subject: [PATCH 52/81] Docs: adding ray and llama.cpp live blog links (#5290) --- .wordlist.txt | 1 + .../ml-compatibility/llama-cpp-compatibility.rst | 7 ++++++- .../ml-compatibility/ray-compatibility.rst | 10 ++++++++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.wordlist.txt b/.wordlist.txt index 5370f4752..8cc6399b6 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -156,6 +156,7 @@ GEMMs GFLOPS GFortran GFXIP +GGUF Gemma GiB GIM diff --git a/docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst b/docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst index fd1356d32..1ae246931 100644 --- a/docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst +++ b/docs/compatibility/ml-compatibility/llama-cpp-compatibility.rst @@ -67,9 +67,14 @@ llama.cpp is also used in a range of real-world applications, including: - Various other AI applications use llama.cpp as their inference engine; for a detailed list, see the `user interfaces (UIs) section `__. -Refer to the `AMD ROCm blog `_, +For more use cases and recommendations, refer to the `AMD ROCm blog `__, where you can search for llama.cpp examples and best practices to optimize your workloads on AMD GPUs. +- The `Llama.cpp Meets Instinct: A New Era of Open-Source AI Acceleration `__, + blog post outlines how the open-source llama.cpp framework enables efficient LLM inference—including interactive inference with ``llama-cli``, + server deployment with ``llama-server``, GGUF model preparation and quantization, performance benchmarking, and optimizations tailored for + AMD Instinct GPUs within the ROCm ecosystem. + .. _llama-cpp-docker-compat: Docker image compatibility diff --git a/docs/compatibility/ml-compatibility/ray-compatibility.rst b/docs/compatibility/ml-compatibility/ray-compatibility.rst index c5a2ed39f..2f5c83589 100644 --- a/docs/compatibility/ml-compatibility/ray-compatibility.rst +++ b/docs/compatibility/ml-compatibility/ray-compatibility.rst @@ -66,9 +66,15 @@ Use cases and recommendations GPUs. Follow this guide to get started with verl on AMD Instinct GPUs and accelerate your RLHF training with ROCm-optimized performance. +* The `Exploring Use Cases for Scalable AI: Implementing Ray with ROCm Support for Efficient ML Workflows + `__ + blog post describes key use cases such as training and inference for large language models (LLMs), + model serving, hyperparameter tuning, reinforcement learning, and the orchestration of large-scale + workloads using Ray in the ROCm environment. + For more use cases and recommendations, see the AMD GPU tabs in the `Accelerator Support -topic `_ -of the Ray core documentation and refer to the `AMD ROCm blog `_, +topic `__ +of the Ray core documentation and refer to the `AMD ROCm blog `__, where you can search for Ray examples and best practices to optimize your workloads on AMD GPUs. .. _ray-docker-compat: From 88f1493b687905cc252c24f9b32641c4f25a540f Mon Sep 17 00:00:00 2001 From: Haresh Sivasuntharampillai Date: Wed, 10 Sep 2025 19:16:35 +0000 Subject: [PATCH 53/81] [Ex CI] enable rocminfo monorepo --- .azuredevops/components/rocminfo.yml | 147 ++++++++++++++++----------- 1 file changed, 90 insertions(+), 57 deletions(-) diff --git a/.azuredevops/components/rocminfo.yml b/.azuredevops/components/rocminfo.yml index aada773ca..f3e87bf57 100644 --- a/.azuredevops/components/rocminfo.yml +++ b/.azuredevops/components/rocminfo.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocminfo - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -40,7 +59,11 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: rocminfo_build_${{ job.os }} + - job: ${{ parameters.componentName }}_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: vmImage: 'ubuntu-22.04' ${{ if eq(job.os, 'almalinux8') }}: @@ -62,14 +85,18 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} aggregatePipeline: ${{ parameters.aggregatePipeline }} os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -78,65 +105,71 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocminfo_test_${{ job.target }} - dependsOn: rocminfo_build_${{ job.os }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - packageManager: ${{ job.packageManager }} - registerROCmPackages: true - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - parameters: - runRocminfo: false - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocminfo - testDir: '$(Agent.BuildDirectory)' - testExecutable: './rocm/bin/rocminfo' - testParameters: '' - testPublishResults: false - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocm_agent_enumerator - testDir: '$(Agent.BuildDirectory)' - testExecutable: './rocm/bin/rocm_agent_enumerator' - testParameters: '' - testPublishResults: false - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - registerROCmPackages: true - environment: test - gpuTarget: ${{ job.target }} +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: rocminfo_test_${{ job.target }} + dependsOn: rocminfo_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + registerROCmPackages: true + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + parameters: + runRocminfo: false + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: '$(Agent.BuildDirectory)' + testExecutable: './rocm/bin/rocminfo' + testParameters: '' + testPublishResults: false + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: rocm_agent_enumerator + testDir: '$(Agent.BuildDirectory)' + testExecutable: './rocm/bin/rocm_agent_enumerator' + testParameters: '' + testPublishResults: false + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + registerROCmPackages: true + environment: test + gpuTarget: ${{ job.target }} From 56f566c1dca6c3d934fb106d004d0b18f829fb86 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 16:45:35 -0400 Subject: [PATCH 54/81] [Ex CI] update rocminfo pipeline ID to monorepo --- .azuredevops/templates/steps/dependencies-rocm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index cd68fe411..efd5a81ae 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -179,8 +179,8 @@ parameters: developBranch: amd-staging hasGpuTarget: true rocminfo: - pipelineId: 91 - developBranch: amd-staging + pipelineId: 356 + developBranch: develop hasGpuTarget: false rocMLIR: pipelineId: 229 From d3fe7439cff7b1f2b1ab69c57cb334e68906d6af Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 20:37:18 +0000 Subject: [PATCH 55/81] [Ex CI] enable rocm-smi-lib monorepo --- .azuredevops/components/rocm_smi_lib.yml | 110 ++++++++++++++--------- 1 file changed, 70 insertions(+), 40 deletions(-) diff --git a/.azuredevops/components/rocm_smi_lib.yml b/.azuredevops/components/rocm_smi_lib.yml index 31459a868..101ed0bd1 100644 --- a/.azuredevops/components/rocm_smi_lib.yml +++ b/.azuredevops/components/rocm_smi_lib.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocm_smi_lib - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -31,7 +50,11 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: rocm_smi_lib_build_${{ job.os }} + - job: ${{ parameters.componentName }}_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: ${{ if eq(job.os, 'ubuntu2404') }}: vmImage: 'ubuntu-24.04' @@ -55,8 +78,10 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -65,51 +90,56 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml # parameters: # aptPackages: ${{ parameters.aptPackages }} -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocm_smi_lib_test_${{ job.os }}_${{ job.target }} - dependsOn: rocm_smi_lib_build_${{ job.os }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - packageManager: ${{ job.packageManager }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - parameters: - runRocminfo: false - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocm_smi_lib - testDir: '$(Agent.BuildDirectory)' - testExecutable: 'sudo ./rocm/share/rocm_smi/rsmitst_tests/rsmitst' - testParameters: '--gtest_output=xml:./test_output.xml --gtest_color=yes' - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - environment: test - gpuTarget: ${{ job.target }} +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} + dependsOn: ${{ parameters.componentName }}_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + parameters: + runRocminfo: false + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: '$(Agent.BuildDirectory)' + testExecutable: 'sudo ./rocm/share/rocm_smi/rsmitst_tests/rsmitst' + testParameters: '--gtest_output=xml:./test_output.xml --gtest_color=yes' + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + environment: test + gpuTarget: ${{ job.target }} \ No newline at end of file From 964a7cd0b51dc12ef16048cc2277fdefaf82b1ab Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 20:43:28 +0000 Subject: [PATCH 56/81] fixed component name --- .azuredevops/components/rocm_smi_lib.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.azuredevops/components/rocm_smi_lib.yml b/.azuredevops/components/rocm_smi_lib.yml index 101ed0bd1..138bc559e 100644 --- a/.azuredevops/components/rocm_smi_lib.yml +++ b/.azuredevops/components/rocm_smi_lib.yml @@ -1,7 +1,7 @@ parameters: - name: componentName type: string - default: rocm_smi_lib + default: rocm-smi-lib - name: checkoutRepo type: string default: 'self' @@ -50,7 +50,7 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: ${{ parameters.componentName }}_build_${{ job.os }} + - job: rocm_smi_lib_build_${{ job.os }} ${{ if parameters.buildDependsOn }}: dependsOn: - ${{ each build in parameters.buildDependsOn }}: @@ -104,8 +104,8 @@ jobs: - ${{ if eq(parameters.unifiedBuild, False) }}: - ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} - dependsOn: ${{ parameters.componentName }}_build_${{ job.os }} + - job: rocm_smi_lib_test_${{ job.os }}_${{ job.target }} + dependsOn: rocm_smi_lib_build_${{ job.os }} condition: and(succeeded(), eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), From 10f60868197a4d13591ac6949a077c05121c28b6 Mon Sep 17 00:00:00 2001 From: Joseph Macaranas <145489236+jayhawk-commits@users.noreply.github.com> Date: Thu, 11 Sep 2025 12:53:11 -0400 Subject: [PATCH 57/81] [External CI] Updates to rocm-libraries pipelines (#5300) - Add msgpack python module dependency for hipsparselt pipeline. - Change CMake dirs for rocblas pipeline to allow relative-path access to shared/tensile directory. --- .azuredevops/components/hipSPARSELt.yml | 1 + .azuredevops/components/rocBLAS.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/.azuredevops/components/hipSPARSELt.yml b/.azuredevops/components/hipSPARSELt.yml index 104e0ee6c..02e258f78 100644 --- a/.azuredevops/components/hipSPARSELt.yml +++ b/.azuredevops/components/hipSPARSELt.yml @@ -44,6 +44,7 @@ parameters: type: object default: - joblib + - msgpack - name: rocmDependencies type: object default: diff --git a/.azuredevops/components/rocBLAS.yml b/.azuredevops/components/rocBLAS.yml index 6aab7ebb3..ca3577b5b 100644 --- a/.azuredevops/components/rocBLAS.yml +++ b/.azuredevops/components/rocBLAS.yml @@ -179,6 +179,8 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} + cmakeSourceDir: $(Agent.BuildDirectory)/sparse/projects/rocblas + cmakeBuildDir: $(Agent.BuildDirectory)/sparse/projects/rocblas/build extraBuildFlags: >- -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm/llvm;$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/vendor -DCMAKE_BUILD_TYPE=Release From 7098bdc03bf51bb79a483dc1b4fd5f0d6aa98813 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Thu, 11 Sep 2025 15:01:17 -0400 Subject: [PATCH 58/81] Update vLLM inference benchmark doc for 0909 release (and Sphinx fixes) (#5289) --- .wordlist.txt | 3 + ...vllm_0.10.0_20250812-benchmark-models.yaml | 91 ++++ ...vllm_0.9.1_20250715-benchmark-models.yaml} | 0 .../inference/sglang-benchmark-models.yaml | 33 +- .../inference/vllm-benchmark-models.yaml | 280 +++++++---- .../vllm-0.10.0-20250812.rst | 445 ++++++++++++++++++ .../previous-versions/vllm-0.9.1-20250715.rst | 6 +- .../previous-versions/vllm-history.rst | 2 +- .../benchmark-docker/pytorch-inference.rst | 42 +- .../inference/benchmark-docker/sglang.rst | 26 +- .../inference/benchmark-docker/vllm.rst | 363 +++++++------- .../training/benchmark-docker/jax-maxtext.rst | 30 +- .../training/benchmark-docker/megatron-lm.rst | 28 +- .../previous-versions/jax-maxtext-v25.5.rst | 6 +- .../benchmark-docker/primus-megatron.rst | 28 +- .../benchmark-docker/pytorch-training.rst | 46 +- docs/sphinx/static/css/vllm-benchmark.css | 46 +- 17 files changed, 1041 insertions(+), 434 deletions(-) create mode 100644 docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml rename docs/data/how-to/rocm-for-ai/inference/previous-versions/{vllm_0.9.1_20250715-benchmark_models.yaml => vllm_0.9.1_20250715-benchmark-models.yaml} (100%) create mode 100644 docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812.rst diff --git a/.wordlist.txt b/.wordlist.txt index 8cc6399b6..cf9f990d4 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -673,6 +673,7 @@ github globals gnupg grayscale +gx gzip heterogenous hipBLAS @@ -783,6 +784,7 @@ parallelizing param parameterization passthrough +pe perfcounter performant perl @@ -812,6 +814,7 @@ profiler profilers protobuf pseudorandom +px py pytorch recommender diff --git a/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml new file mode 100644 index 000000000..418415319 --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml @@ -0,0 +1,91 @@ +vllm_benchmark: + unified_docker: + latest: + pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.0_20250812 + docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.0_20250812/images/sha256-4c277ad39af3a8c9feac9b30bf78d439c74d9b4728e788a419d3f1d0c30cacaa + rocm_version: 6.4.1 + vllm_version: 0.10.0 (0.10.1.dev395+g340ea86df.rocm641) + pytorch_version: 2.7.0+gitf717b2a + hipblaslt_version: 0.15 + model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.1 8B + mad_tag: pyt_vllm_llama-3.1-8b + model_repo: meta-llama/Llama-3.1-8B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: float16 + - model: Llama 3.1 70B + mad_tag: pyt_vllm_llama-3.1-70b + model_repo: meta-llama/Llama-3.1-70B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct + precision: float16 + - model: Llama 3.1 405B + mad_tag: pyt_vllm_llama-3.1-405b + model_repo: meta-llama/Llama-3.1-405B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct + precision: float16 + - model: Llama 2 70B + mad_tag: pyt_vllm_llama-2-70b + model_repo: meta-llama/Llama-2-70b-chat-hf + url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf + precision: float16 + - model: Llama 3.1 8B FP8 + mad_tag: pyt_vllm_llama-3.1-8b_fp8 + model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV + precision: float8 + - model: Llama 3.1 70B FP8 + mad_tag: pyt_vllm_llama-3.1-70b_fp8 + model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV + precision: float8 + - model: Llama 3.1 405B FP8 + mad_tag: pyt_vllm_llama-3.1-405b_fp8 + model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV + precision: float8 + - group: Mistral AI + tag: mistral + models: + - model: Mixtral MoE 8x7B + mad_tag: pyt_vllm_mixtral-8x7b + model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 + precision: float16 + - model: Mixtral MoE 8x22B + mad_tag: pyt_vllm_mixtral-8x22b + model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 + precision: float16 + - model: Mixtral MoE 8x7B FP8 + mad_tag: pyt_vllm_mixtral-8x7b_fp8 + model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + precision: float8 + - model: Mixtral MoE 8x22B FP8 + mad_tag: pyt_vllm_mixtral-8x22b_fp8 + model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + precision: float8 + - group: Qwen + tag: qwen + models: + - model: QwQ-32B + mad_tag: pyt_vllm_qwq-32b + model_repo: Qwen/QwQ-32B + url: https://huggingface.co/Qwen/QwQ-32B + precision: float16 + - model: Qwen3 30B A3B + mad_tag: pyt_vllm_qwen3-30b-a3b + model_repo: Qwen/Qwen3-30B-A3B + url: https://huggingface.co/Qwen/Qwen3-30B-A3B + precision: float16 + - group: Microsoft Phi + tag: phi + models: + - model: Phi-4 + mad_tag: pyt_vllm_phi-4 + model_repo: microsoft/phi-4 + url: https://huggingface.co/microsoft/phi-4 diff --git a/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml similarity index 100% rename from docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml rename to docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml diff --git a/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml index cc832dffb..8f80424d3 100644 --- a/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml @@ -1,17 +1,16 @@ -sglang_benchmark: - unified_docker: - latest: - pull_tag: lmsysorg/sglang:v0.4.5-rocm630 - docker_hub_url: https://hub.docker.com/layers/lmsysorg/sglang/v0.4.5-rocm630/images/sha256-63d2cb760a237125daf6612464cfe2f395c0784e21e8b0ea37d551cd10d3c951 - rocm_version: 6.3.0 - sglang_version: 0.4.5 (0.4.5-rocm) - pytorch_version: 2.6.0a0+git8d4926e - model_groups: - - group: DeepSeek - tag: deepseek - models: - - model: DeepSeek-R1-Distill-Qwen-32B - mad_tag: pyt_sglang_deepseek-r1-distill-qwen-32b - model_repo: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B - url: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B - precision: bfloat16 +dockers: + - pull_tag: lmsysorg/sglang:v0.4.5-rocm630 + docker_hub_url: https://hub.docker.com/layers/lmsysorg/sglang/v0.4.5-rocm630/images/sha256-63d2cb760a237125daf6612464cfe2f395c0784e21e8b0ea37d551cd10d3c951 + components: + ROCm: 6.3.0 + SGLang: 0.4.5 (0.4.5-rocm) + PyTorch: 2.6.0a0+git8d4926e +model_groups: + - group: DeepSeek + tag: deepseek + models: + - model: DeepSeek-R1-Distill-Qwen-32B + mad_tag: pyt_sglang_deepseek-r1-distill-qwen-32b + model_repo: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B + url: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B + precision: bfloat16 diff --git a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml index a522e61a6..99d9b773b 100644 --- a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml @@ -1,92 +1,188 @@ -vllm_benchmark: - unified_docker: - latest: - # TODO: update me - pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.0_20250812 - docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.0_20250812/images/sha256-4c277ad39af3a8c9feac9b30bf78d439c74d9b4728e788a419d3f1d0c30cacaa - rocm_version: 6.4.1 - vllm_version: 0.10.0 (0.10.1.dev395+g340ea86df.rocm641) - pytorch_version: 2.7.0+gitf717b2a (2.7.0+gitf717b2a) - hipblaslt_version: 0.15 - model_groups: - - group: Meta Llama - tag: llama - models: - - model: Llama 3.1 8B - mad_tag: pyt_vllm_llama-3.1-8b - model_repo: meta-llama/Llama-3.1-8B-Instruct - url: https://huggingface.co/meta-llama/Llama-3.1-8B - precision: float16 - - model: Llama 3.1 70B - mad_tag: pyt_vllm_llama-3.1-70b - model_repo: meta-llama/Llama-3.1-70B-Instruct - url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct - precision: float16 - - model: Llama 3.1 405B - mad_tag: pyt_vllm_llama-3.1-405b - model_repo: meta-llama/Llama-3.1-405B-Instruct - url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct - precision: float16 - - model: Llama 2 70B - mad_tag: pyt_vllm_llama-2-70b - model_repo: meta-llama/Llama-2-70b-chat-hf - url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf - precision: float16 - - model: Llama 3.1 8B FP8 - mad_tag: pyt_vllm_llama-3.1-8b_fp8 - model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV - url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV - precision: float8 - - model: Llama 3.1 70B FP8 - mad_tag: pyt_vllm_llama-3.1-70b_fp8 - model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV - url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV - precision: float8 - - model: Llama 3.1 405B FP8 - mad_tag: pyt_vllm_llama-3.1-405b_fp8 - model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV - url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV - precision: float8 - - group: Mistral AI - tag: mistral - models: - - model: Mixtral MoE 8x7B - mad_tag: pyt_vllm_mixtral-8x7b - model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1 - url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 - precision: float16 - - model: Mixtral MoE 8x22B - mad_tag: pyt_vllm_mixtral-8x22b - model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 - url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 - precision: float16 - - model: Mixtral MoE 8x7B FP8 - mad_tag: pyt_vllm_mixtral-8x7b_fp8 - model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV - url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV - precision: float8 - - model: Mixtral MoE 8x22B FP8 - mad_tag: pyt_vllm_mixtral-8x22b_fp8 - model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV - url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV - precision: float8 - - group: Qwen - tag: qwen - models: - - model: QwQ-32B - mad_tag: pyt_vllm_qwq-32b - model_repo: Qwen/QwQ-32B - url: https://huggingface.co/Qwen/QwQ-32B - precision: float16 - - model: Qwen3 30B A3B - mad_tag: pyt_vllm_qwen3-30b-a3b - model_repo: Qwen/Qwen3-30B-A3B - url: https://huggingface.co/Qwen/Qwen3-30B-A3B - precision: float16 - - group: Microsoft Phi - tag: phi - models: - - model: Phi-4 - mad_tag: pyt_vllm_phi-4 - model_repo: microsoft/phi-4 - url: https://huggingface.co/microsoft/phi-4 +dockers: + - pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.1_20250909 + docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.1_20250909/images/sha256-1113268572e26d59b205792047bea0e61e018e79aeadceba118b7bf23cb3715c + components: + ROCm: 6.4.1 + vLLM: 0.10.1 (0.10.1rc2.dev409+g0b6bf6691.rocm641) + PyTorch: 2.7.0+gitf717b2a + hipBLASLt: 0.15 +model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.1 8B + mad_tag: pyt_vllm_llama-3.1-8b + model_repo: meta-llama/Llama-3.1-8B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: float16 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 70B + mad_tag: pyt_vllm_llama-3.1-70b + model_repo: meta-llama/Llama-3.1-70B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 405B + mad_tag: pyt_vllm_llama-3.1-405b + model_repo: meta-llama/Llama-3.1-405B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 2 70B + mad_tag: pyt_vllm_llama-2-70b + model_repo: meta-llama/Llama-2-70b-chat-hf + url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 4096 + max_num_batched_tokens: 4096 + max_model_len: 4096 + - model: Llama 3.1 8B FP8 + mad_tag: pyt_vllm_llama-3.1-8b_fp8 + model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV + precision: float8 + config: + tp: 1 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 70B FP8 + mad_tag: pyt_vllm_llama-3.1-70b_fp8 + model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 405B FP8 + mad_tag: pyt_vllm_llama-3.1-405b_fp8 + model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - group: Mistral AI + tag: mistral + models: + - model: Mixtral MoE 8x7B + mad_tag: pyt_vllm_mixtral-8x7b + model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 32768 + max_num_batched_tokens: 32768 + max_model_len: 8192 + - model: Mixtral MoE 8x22B + mad_tag: pyt_vllm_mixtral-8x22b + model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 65536 + max_num_batched_tokens: 65536 + max_model_len: 8192 + - model: Mixtral MoE 8x7B FP8 + mad_tag: pyt_vllm_mixtral-8x7b_fp8 + model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 32768 + max_num_batched_tokens: 32768 + max_model_len: 8192 + - model: Mixtral MoE 8x22B FP8 + mad_tag: pyt_vllm_mixtral-8x22b_fp8 + model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 65536 + max_num_batched_tokens: 65536 + max_model_len: 8192 + - group: Qwen + tag: qwen + models: + - model: QwQ-32B + mad_tag: pyt_vllm_qwq-32b + model_repo: Qwen/QwQ-32B + url: https://huggingface.co/Qwen/QwQ-32B + precision: float16 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Qwen3 30B A3B + mad_tag: pyt_vllm_qwen3-30b-a3b + model_repo: Qwen/Qwen3-30B-A3B + url: https://huggingface.co/Qwen/Qwen3-30B-A3B + precision: float16 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 32768 + max_num_batched_tokens: 32768 + max_model_len: 8192 + - group: Microsoft Phi + tag: phi + models: + - model: Phi-4 + mad_tag: pyt_vllm_phi-4 + model_repo: microsoft/phi-4 + url: https://huggingface.co/microsoft/phi-4 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 16384 + max_num_batched_tokens: 16384 + max_model_len: 8192 diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812.rst new file mode 100644 index 000000000..68d7f66e7 --- /dev/null +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812.rst @@ -0,0 +1,445 @@ +:orphan: + +.. meta:: + :description: Learn how to validate LLM inference performance on MI300X accelerators using AMD MAD and the + ROCm vLLM Docker image. + :keywords: model, MAD, automation, dashboarding, validate + +********************************** +vLLM inference performance testing +********************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm vLLM + inference performance documentation. See :doc:`../vllm` for the latest version. + +.. _vllm-benchmark-unified-docker-812: + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + The `ROCm vLLM Docker <{{ unified_docker.docker_hub_url }}>`_ image offers + a prebuilt, optimized environment for validating large language model (LLM) + inference performance on AMD Instinct™ MI300X series accelerators. This ROCm vLLM + Docker image integrates vLLM and PyTorch tailored specifically for MI300X series + accelerators and includes the following components: + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + * - `ROCm `__ + - {{ unified_docker.rocm_version }} + + * - `vLLM `__ + - {{ unified_docker.vllm_version }} + + * - `PyTorch `__ + - {{ unified_docker.pytorch_version }} + + * - `hipBLASLt `__ + - {{ unified_docker.hipblaslt_version }} + +With this Docker image, you can quickly test the :ref:`expected +inference performance numbers ` for +MI300X series accelerators. + +What's new +========== + +The following is summary of notable changes since the :doc:`previous ROCm/vLLM Docker release `. + +* Upgraded to vLLM v0.10. + +* FP8 KV cache support via AITER. + +* Full graph capture support via AITER. + +Supported models +================ + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + .. _vllm-benchmark-available-models-812: + + The following models are supported for inference performance benchmarking + with vLLM and ROCm. Some instructions, commands, and recommendations in this + documentation might vary by model -- select one to get started. + + .. raw:: html + +
+
+
Model group
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
+ +
+
Model
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
+ + .. _vllm-benchmark-vllm-812: + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{model.mad_tag}} + + .. note:: + + See the `{{ model.model }} model card on Hugging Face <{{ model.url }}>`_ to learn more about your selected model. + Some models require access authorization prior to use via an external license agreement through a third party. + + {% endfor %} + {% endfor %} + +.. note:: + + vLLM is a toolkit and library for LLM inference and serving. AMD implements + high-performance custom kernels and modules in vLLM to enhance performance. + See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for + more information. + +.. _vllm-benchmark-performance-measurements-812: + +Performance measurements +======================== + +To evaluate performance, the +`Performance results with AMD ROCm software `_ +page provides reference throughput and serving measurements for inferencing popular AI models. + +.. important:: + + The performance data presented in + `Performance results with AMD ROCm software `_ + only reflects the latest version of this inference benchmarking environment. + The listed measurements should not be interpreted as the peak performance achievable by AMD Instinct MI325X and MI300X accelerators or ROCm software. + +System validation +================= + +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. + +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. + +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + Pull the Docker image + ===================== + + Download the `ROCm vLLM Docker image <{{ unified_docker.docker_hub_url }}>`_. + Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + + Benchmarking + ============ + + Once the setup is complete, choose between two options to reproduce the + benchmark results: + + .. _vllm-benchmark-mad-812: + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{model.mad_tag}} + + .. tab-set:: + + .. tab-item:: MAD-integrated benchmarking + + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt + + 2. Use this command to run the performance benchmark test on the `{{model.model}} <{{ model.url }}>`_ model + using one GPU with the :literal:`{{model.precision}}` data type on the host machine. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{model.mad_tag}} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 + + MAD launches a Docker container with the name + ``container_ci-{{model.mad_tag}}``. The throughput and serving reports of the + model are collected in the following paths: ``{{ model.mad_tag }}_throughput.csv`` + and ``{{ model.mad_tag }}_serving.csv``. + + Although the :ref:`available models + ` are preconfigured to collect + offline throughput and online serving performance data, you can + also change the benchmarking parameters. See the standalone + benchmarking tab for more information. + + {% if model.tunableop %} + + .. note:: + + For improved performance, consider enabling :ref:`PyTorch TunableOp `. + TunableOp automatically explores different implementations and configurations of certain PyTorch + operators to find the fastest one for your hardware. + + By default, ``{{model.mad_tag}}`` runs with TunableOp disabled (see + ``__). To enable it, include + the ``--tunableop on`` argument in your run. + + Enabling TunableOp triggers a two-pass run -- a warm-up followed by the + performance-collection run. + + {% endif %} + + .. tab-item:: Standalone benchmarking + + .. rubric:: Download the Docker image and required scripts + + 1. Run the vLLM benchmark tool independently by starting the + `Docker container <{{ unified_docker.docker_hub_url }}>`_ + as shown in the following snippet. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + docker run -it \ + --device=/dev/kfd \ + --device=/dev/dri \ + --group-add video \ + --shm-size 16G \ + --security-opt seccomp=unconfined \ + --security-opt apparmor=unconfined \ + --cap-add=SYS_PTRACE \ + -v $(pwd):/workspace \ + --env HUGGINGFACE_HUB_CACHE=/workspace \ + --name test \ + {{ unified_docker.pull_tag }} + + 2. In the Docker container, clone the ROCm MAD repository and navigate to the + benchmark scripts directory at ``~/MAD/scripts/vllm``. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD/scripts/vllm + + 3. To start the benchmark, use the following command with the appropriate options. + + .. code-block:: + + ./run.sh \ + --config $CONFIG_CSV \ + --model_repo {{ model.model_repo }} \ + + + .. dropdown:: Benchmark options + :open: + + .. list-table:: + :header-rows: 1 + :align: center + + * - Name + - Options + - Description + + * - ``--config`` + - ``configs/default.csv`` + - Run configs from the CSV for the chosen model repo and benchmark. + + * - + - ``configs/extended.csv`` + - + + * - + - ``configs/performance.csv`` + - + + * - ``--benchmark`` + - ``throughput`` + - Measure offline end-to-end throughput. + + * - + - ``serving`` + - Measure online serving performance. + + * - + - ``all`` + - Measure both throughput and serving. + + * - `` + - See `run.sh `__ for more info. + - Additional overrides to the config CSV. + + The input sequence length, output sequence length, and tensor parallel (TP) are + already configured. You don't need to specify them with this script. + + .. note:: + + For best performance, it's recommended to run with ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1``. + + If you encounter the following error, pass your access-authorized Hugging + Face token to the gated models. + + .. code-block:: + + OSError: You are trying to access a gated repo. + + # pass your HF_TOKEN + export HF_TOKEN=$your_personal_hf_token + + .. rubric:: Benchmarking examples + + Here are some examples of running the benchmark with various options: + + * Throughput benchmark + + Use this command to benchmark the throughput of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. + + .. code-block:: shell + + export MAD_MODEL_NAME={{ model.mad_tag }} + ./run.sh \ + --config configs/default.csv \ + --model_repo {{model.model_repo}} \ + --benchmark throughput + + Find the throughput benchmark report at ``./{{ model.mad_tag }}_throughput.csv``. + + * Serving benchmark + + Use this command to benchmark the serving performance of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. + + .. code-block:: + + export MAD_MODEL_NAME={{ model.mad_tag }} + ./run.sh \ + --config configs/default.csv \ + --model_repo {{model.model_repo}} \ + --benchmark serving + + Find the serving benchmark report at ``./{{ model.mad_tag }}_serving.csv``. + + .. raw:: html + + + + .. note:: + + Throughput is calculated as: + + - .. math:: throughput\_tot = requests \times (\mathsf{\text{input lengths}} + \mathsf{\text{output lengths}}) / elapsed\_time + + - .. math:: throughput\_gen = requests \times \mathsf{\text{output lengths}} / elapsed\_time + {% endfor %} + {% endfor %} + +Advanced usage +============== + +For information on experimental features and known issues related to ROCm optimization efforts on vLLM, +see the developer's guide at ``__. + +Reproducing the Docker image +---------------------------- + +To reproduce this ROCm/vLLM Docker image release, follow these steps: + +1. Clone the `vLLM repository `__. + + .. code-block:: shell + + git clone https://github.com/ROCm/vllm.git + +2. Checkout the specific release commit. + + .. code-block:: shell + + cd vllm + git checkout 340ea86dfe5955d6f9a9e767d6abab5aacf2c978 + +3. Build the Docker image. Replace ``vllm-rocm`` with your desired image tag. + + .. code-block:: shell + + docker build -f docker/Dockerfile.rocm -t vllm-rocm . + +Further reading +=============== + +- To learn more about the options for latency and throughput benchmark scripts, + see ``_. + +- To learn more about MAD and the ``madengine`` CLI, see the `MAD usage guide `__. + +- To learn more about system settings and management practices to configure your system for + AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. + +- For application performance optimization strategies for HPC and AI workloads, + including inference with vLLM, see :doc:`/how-to/rocm-for-ai/inference-optimization/workload`. + +- To learn how to run community models from Hugging Face on AMD GPUs, see + :doc:`Running models from Hugging Face `. + +- To learn how to fine-tune LLMs and optimize inference, see + :doc:`Fine-tuning LLMs and inference optimization `. + +- For a list of other ready-made Docker images for AI with ROCm, see + `AMD Infinity Hub `_. + +Previous versions +================= + +See :doc:`vllm-history` to find documentation for previous releases +of the ``ROCm/vllm`` Docker image. diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst index 34df0359d..9f6d001ad 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst @@ -16,7 +16,7 @@ vLLM inference performance testing .. _vllm-benchmark-unified-docker-715: -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} @@ -69,7 +69,7 @@ The following is summary of notable changes since the :doc:`previous ROCm/vLLM D Supported models ================ -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} @@ -162,7 +162,7 @@ To test for optimal performance, consult the recommended :ref:`System health ben `. This suite of tests will help you verify and fine-tune your system's configuration. -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst index 6f87670ec..857a1ee0b 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst @@ -7,7 +7,7 @@ vLLM inference performance testing version history This table lists previous versions of the ROCm vLLM inference Docker image for inference performance testing. For detailed information about available models for benchmarking, see the version-specific documentation. You can find tagged -previous releases of the ``ROCm/vllm`` Docker image on `Docker Hub `__. +previous releases of the ``ROCm/vllm`` Docker image on `Docker Hub `__. .. list-table:: :header-rows: 1 diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst index b9e22bf33..ad8db53c4 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst @@ -31,26 +31,30 @@ PyTorch inference performance testing .. raw:: html
-
-
Model
-
- {% for model_group in model_groups %} -
{{ model_group.group }}
- {% endfor %} -
-
- - + +
{% for model_group in model_groups %} diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst index 340ef975e..1722b2018 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst @@ -2,19 +2,19 @@ :description: Learn how to validate LLM inference performance on MI300X accelerators using AMD MAD and SGLang :keywords: model, MAD, automation, dashboarding, validate -************************************ -SGLang inference performance testing -************************************ +***************************************************************** +SGLang inference performance testing DeepSeek-R1-Distill-Qwen-32B +***************************************************************** .. _sglang-benchmark-unified-docker: .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml - {% set unified_docker = data.sglang_benchmark.unified_docker.latest %} + {% set docker = data.dockers[0] %} `SGLang `__ is a high-performance inference and serving engine for large language models (LLMs) and vision models. The - ROCm-enabled `SGLang Docker image <{{ unified_docker.docker_hub_url }}>`__ + ROCm-enabled `SGLang Docker image <{{ docker.docker_hub_url }}>`__ bundles SGLang with PyTorch, optimized for AMD Instinct MI300X series accelerators. It includes the following software components: @@ -24,14 +24,10 @@ SGLang inference performance testing * - Software component - Version - * - `ROCm `__ - - {{ unified_docker.rocm_version }} - - * - `SGLang `__ - - {{ unified_docker.sglang_version }} - - * - `PyTorch `__ - - {{ unified_docker.pytorch_version }} + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + {% endfor %} System validation ================= @@ -50,8 +46,8 @@ system's configuration. .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml - {% set unified_docker = data.sglang_benchmark.unified_docker.latest %} - {% set model_groups = data.sglang_benchmark.model_groups %} + {% set unified_docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} Pull the Docker image ===================== diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst index 9f3bd608d..f2b060ebd 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst @@ -7,14 +7,13 @@ vLLM inference performance testing ********************************** -.. _vllm-benchmark-unified-docker-812: +.. _vllm-benchmark-unified-docker-909: .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml - {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} - {% set model_groups = data.vllm_benchmark.model_groups %} + {% set docker = data.dockers[0] %} - The `ROCm vLLM Docker <{{ unified_docker.docker_hub_url }}>`_ image offers + The `ROCm vLLM Docker <{{ docker.docker_hub_url }}>`_ image offers a prebuilt, optimized environment for validating large language model (LLM) inference performance on AMD Instinctâ„¢ MI300X series accelerators. This ROCm vLLM Docker image integrates vLLM and PyTorch tailored specifically for MI300X series @@ -26,20 +25,13 @@ vLLM inference performance testing * - Software component - Version - * - `ROCm `__ - - {{ unified_docker.rocm_version }} - - * - `vLLM `__ - - {{ unified_docker.vllm_version }} - - * - `PyTorch `__ - - {{ unified_docker.pytorch_version }} - - * - `hipBLASLt `__ - - {{ unified_docker.hipblaslt_version }} + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + {% endfor %} With this Docker image, you can quickly test the :ref:`expected -inference performance numbers ` for +inference performance numbers ` for MI300X series accelerators. What's new @@ -47,21 +39,23 @@ What's new The following is summary of notable changes since the :doc:`previous ROCm/vLLM Docker release `. -* Upgraded to vLLM v0.10. +* Upgraded to vLLM v0.10.1. -* FP8 KV cache support via AITER. +* Set ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1`` by default for better performance. -* Full graph capture support via AITER. +* Set ``VLLM_ROCM_USE_AITER_RMSNORM=0`` by default to avoid various issues with torch compile. + +.. _vllm-benchmark-supported-models-909: Supported models ================ .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml - {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} - {% set model_groups = data.vllm_benchmark.model_groups %} + {% set docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} - .. _vllm-benchmark-available-models-812: + .. _vllm-benchmark-available-models-909: The following models are supported for inference performance benchmarking with vLLM and ROCm. Some instructions, commands, and recommendations in this @@ -70,55 +64,51 @@ Supported models .. raw:: html
-
-
Model group
-
- {% for model_group in model_groups %} -
{{ model_group.group }}
- {% endfor %} -
-
- -
-
Model
-
- {% for model_group in model_groups %} - {% set models = model_group.models %} - {% for model in models %} - {% if models|length % 3 == 0 %} -
{{ model.model }}
- {% else %} -
{{ model.model }}
- {% endif %} +
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
{% endfor %} - {% endfor %} +
+
+ +
+
Variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
-
- .. _vllm-benchmark-vllm-812: + .. _vllm-benchmark-vllm-909: {% for model_group in model_groups %} {% for model in model_group.models %} - .. container:: model-doc {{model.mad_tag}} + .. container:: model-doc {{ model.mad_tag }} .. note:: See the `{{ model.model }} model card on Hugging Face <{{ model.url }}>`_ to learn more about your selected model. Some models require access authorization prior to use via an external license agreement through a third party. + {% if model.precision == "float8" and model.model_repo.startswith("amd") %} + This model uses FP8 quantization via `AMD Quark `__ for efficient inference on AMD accelerators. + {% endif %} {% endfor %} {% endfor %} -.. note:: - - vLLM is a toolkit and library for LLM inference and serving. AMD implements - high-performance custom kernels and modules in vLLM to enhance performance. - See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for - more information. - -.. _vllm-benchmark-performance-measurements-812: +.. _vllm-benchmark-performance-measurements-909: Performance measurements ======================== @@ -151,18 +141,18 @@ system's configuration. .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml - {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} - {% set model_groups = data.vllm_benchmark.model_groups %} + {% set docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} Pull the Docker image ===================== - Download the `ROCm vLLM Docker image <{{ unified_docker.docker_hub_url }}>`_. + Download the `ROCm vLLM Docker image <{{ docker.docker_hub_url }}>`_. Use the following command to pull the Docker image from Docker Hub. .. code-block:: shell - docker pull {{ unified_docker.pull_tag }} + docker pull {{ docker.pull_tag }} Benchmarking ============ @@ -170,7 +160,7 @@ system's configuration. Once the setup is complete, choose between two options to reproduce the benchmark results: - .. _vllm-benchmark-mad-812: + .. _vllm-benchmark-mad-909: {% for model_group in model_groups %} {% for model in model_group.models %} @@ -181,6 +171,9 @@ system's configuration. .. tab-item:: MAD-integrated benchmarking + The following run command is tailored to {{ model.model }}. + See :ref:`vllm-benchmark-supported-models-909` to switch to another available model. + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local directory and install the required packages on the host machine. @@ -208,7 +201,7 @@ system's configuration. and ``{{ model.mad_tag }}_serving.csv``. Although the :ref:`available models - ` are preconfigured to collect + ` are preconfigured to collect offline throughput and online serving performance data, you can also change the benchmarking parameters. See the standalone benchmarking tab for more information. @@ -232,132 +225,143 @@ system's configuration. .. tab-item:: Standalone benchmarking - .. rubric:: Download the Docker image and required scripts + The following commands are optimized for {{ model.model }}. + See :ref:`vllm-benchmark-supported-models-909` to switch to another available model. - 1. Run the vLLM benchmark tool independently by starting the - `Docker container <{{ unified_docker.docker_hub_url }}>`_ - as shown in the following snippet. + .. seealso:: + + For more information on configuration, see the `config files + `__ + in the MAD repository. Refer to the `vLLM engine `__ + for descriptions of available configuration options + and `Benchmarking vLLM `__ for + additional benchmarking information. + + .. rubric:: Launch the container + + You can run the vLLM benchmark tool independently by starting the + `Docker container <{{ docker.docker_hub_url }}>`_ as shown + in the following snippet. + + .. code-block:: shell + + docker pull {{ docker.pull_tag }} + docker run -it \ + --device=/dev/kfd \ + --device=/dev/dri \ + --group-add video \ + --shm-size 16G \ + --security-opt seccomp=unconfined \ + --security-opt apparmor=unconfined \ + --cap-add=SYS_PTRACE \ + -v $(pwd):/workspace \ + --env HUGGINGFACE_HUB_CACHE=/workspace \ + --name test \ + {{ docker.pull_tag }} + + .. rubric:: Throughput command + + Use the following command to start the throughput benchmark. + + .. code-block:: shell + + model={{ model.model_repo }} + tp={{ model.config.tp }} + num_prompts=1024 + in=128 + out=128 + dtype={{ model.config.dtype }} + kv_cache_dtype={{ model.config.kv_cache_dtype }} + max_num_seqs=1024 + max_seq_len_to_capture={{ model.config.max_seq_len_to_capture }} + max_num_batched_tokens={{ model.config.max_num_batched_tokens }} + max_model_len={{ model.config.max_model_len }} + + vllm bench throughput --model $model \ + -tp $tp \ + --num-prompts $num_prompts \ + --input-len $in \ + --output-len $out \ + --dtype $dtype \ + --kv-cache-dtype $kv_cache_dtype \ + --max-num-seqs $max_num_seqs \ + --max-seq-len-to-capture $max_seq_len_to_capture \ + --max-num-batched-tokens $max_num_batched_tokens \ + --max-model-len $max_model_len \ + --trust-remote-code \ + --output-json ${model}_throughput.json \ + --gpu-memory-utilization 0.9 + + .. rubric:: Serving command + + 1. Start the server using the following command: .. code-block:: shell - docker pull {{ unified_docker.pull_tag }} - docker run -it \ - --device=/dev/kfd \ - --device=/dev/dri \ - --group-add video \ - --shm-size 16G \ - --security-opt seccomp=unconfined \ - --security-opt apparmor=unconfined \ - --cap-add=SYS_PTRACE \ - -v $(pwd):/workspace \ - --env HUGGINGFACE_HUB_CACHE=/workspace \ - --name test \ - {{ unified_docker.pull_tag }} + model={{ model.model_repo }} + tp={{ model.config.tp }} + dtype={{ model.config.dtype }} + kv_cache_dtype={{ model.config.kv_cache_dtype }} + max_num_seqs=256 + max_seq_len_to_capture={{ model.config.max_seq_len_to_capture }} + max_num_batched_tokens={{ model.config.max_num_batched_tokens }} + max_model_len={{ model.config.max_model_len }} - 2. In the Docker container, clone the ROCm MAD repository and navigate to the - benchmark scripts directory at ``~/MAD/scripts/vllm``. + vllm serve $model \ + -tp $tp \ + --dtype $dtype \ + --kv-cache-dtype $kv_cache_dtype \ + --max-num-seqs $max_num_seqs \ + --max-seq-len-to-capture $max_seq_len_to_capture \ + --max-num-batched-tokens $max_num_batched_tokens \ + --max-model-len $max_model_len \ + --no-enable-prefix-caching \ + --swap-space 16 \ + --disable-log-requests \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 + + Wait until the model has loaded and the server is ready to accept requests. + + 2. On another terminal on the same machine, run the benchmark: .. code-block:: shell - git clone https://github.com/ROCm/MAD - cd MAD/scripts/vllm + # Connect to the container + docker exec -it test bash - 3. To start the benchmark, use the following command with the appropriate options. + # Wait for the server to start + until curl -s http://localhost:8000/v1/models; do sleep 30; done + + # Run the benchmark + model={{ model.model_repo }} + max_concurrency=1 + num_prompts=10 + in=128 + out=128 + vllm bench serve --model $model \ + --percentile-metrics "ttft,tpot,itl,e2el" \ + --dataset-name random \ + --ignore-eos \ + --max-concurrency $max_concurrency \ + --num-prompts $num_prompts \ + --random-input-len $in \ + --random-output-len $out \ + --trust-remote-code \ + --save-result \ + --result-filename ${model}_serving.json + + .. note:: + + If you encounter the following error, pass your access-authorized Hugging + Face token to the gated models. .. code-block:: - ./run.sh \ - --config $CONFIG_CSV \ - --model_repo {{ model.model_repo }} \ - + OSError: You are trying to access a gated repo. - .. dropdown:: Benchmark options - :open: - - .. list-table:: - :header-rows: 1 - :align: center - - * - Name - - Options - - Description - - * - ``--config`` - - ``configs/default.csv`` - - Run configs from the CSV for the chosen model repo and benchmark. - - * - - - ``configs/extended.csv`` - - - - * - - - ``configs/performance.csv`` - - - - * - ``--benchmark`` - - ``throughput`` - - Measure offline end-to-end throughput. - - * - - - ``serving`` - - Measure online serving performance. - - * - - - ``all`` - - Measure both throughput and serving. - - * - `` - - See `run.sh `__ for more info. - - Additional overrides to the config CSV. - - The input sequence length, output sequence length, and tensor parallel (TP) are - already configured. You don't need to specify them with this script. - - .. note:: - - For best performance, it's recommended to run with ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1``. - - If you encounter the following error, pass your access-authorized Hugging - Face token to the gated models. - - .. code-block:: - - OSError: You are trying to access a gated repo. - - # pass your HF_TOKEN - export HF_TOKEN=$your_personal_hf_token - - .. rubric:: Benchmarking examples - - Here are some examples of running the benchmark with various options: - - * Throughput benchmark - - Use this command to benchmark the throughput of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. - - .. code-block:: shell - - export MAD_MODEL_NAME={{ model.mad_tag }} - ./run.sh \ - --config configs/default.csv \ - --model_repo {{model.model_repo}} \ - --benchmark throughput - - Find the throughput benchmark report at ``./{{ model.mad_tag }}_throughput.csv``. - - * Serving benchmark - - Use this command to benchmark the serving performance of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. - - .. code-block:: - - export MAD_MODEL_NAME={{ model.mad_tag }} - ./run.sh \ - --config configs/default.csv \ - --model_repo {{model.model_repo}} \ - --benchmark serving - - Find the serving benchmark report at ``./{{ model.mad_tag }}_serving.csv``. + # pass your HF_TOKEN + export HF_TOKEN=$your_personal_hf_token .. raw:: html @@ -382,7 +386,7 @@ Advanced usage ============== For information on experimental features and known issues related to ROCm optimization efforts on vLLM, -see the developer's guide at ``__. +see the developer's guide at ``__. Reproducing the Docker image ---------------------------- @@ -400,7 +404,7 @@ To reproduce this ROCm/vLLM Docker image release, follow these steps: .. code-block:: shell cd vllm - git checkout 340ea86dfe5955d6f9a9e767d6abab5aacf2c978 + git checkout 6663000a391911eba96d7864a26ac42b07f6ef29 3. Build the Docker image. Replace ``vllm-rocm`` with your desired image tag. @@ -419,15 +423,12 @@ Further reading - To learn more about system settings and management practices to configure your system for AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. +- See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for + a brief introduction to vLLM and optimization strategies. + - For application performance optimization strategies for HPC and AI workloads, including inference with vLLM, see :doc:`/how-to/rocm-for-ai/inference-optimization/workload`. -- To learn how to run community models from Hugging Face on AMD GPUs, see - :doc:`Running models from Hugging Face `. - -- To learn how to fine-tune LLMs and optimize inference, see - :doc:`Fine-tuning LLMs and inference optimization `. - - For a list of other ready-made Docker images for AI with ROCm, see `AMD Infinity Hub `_. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst index a85f5af56..76c3582e7 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst @@ -82,32 +82,32 @@ started. {% set model_groups = data.model_groups %} .. raw:: html -
-
-
Model
-
+
+
+
Model
+
{% for model_group in model_groups %} -
{{ model_group.group }}
+
{{ model_group.group }}
{% endfor %} -
-
+
+
-
-
Model variant
-
+
+
Variant
+
{% for model_group in model_groups %} {% set models = model_group.models %} {% for model in models %} {% if models|length % 3 == 0 %} -
{{ model.model }}
+
{{ model.model }}
{% else %} -
{{ model.model }}
+
{{ model.model }}
{% endif %} {% endfor %} {% endfor %} -
-
+
+
.. note:: @@ -208,7 +208,7 @@ with RDMA, skip ahead to :ref:`amd-maxtext-get-started-v257`. e. RDMA interface - Ensure the :ref:`required packages ` are installed on all nodes. + Ensure the :ref:`required packages ` are installed on all nodes. Then, set the RDMA interfaces to use for communication. .. code-block:: bash diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst index 687cc514f..4df1da960 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst @@ -70,32 +70,32 @@ workloads: {% set model_groups = data.model_groups %} .. raw:: html -
-
-
Model
-
+
+
+
Model
+
{% for model_group in model_groups %} -
{{ model_group.group }}
+
{{ model_group.group }}
{% endfor %} -
-
+
+
-
-
Model variant
-
+
+
Variant
+
{% for model_group in model_groups %} {% set models = model_group.models %} {% for model in models %} {% if models|length % 3 == 0 %} -
{{ model.model }}
+
{{ model.model }}
{% else %} -
{{ model.model }}
+
{{ model.model }}
{% endif %} {% endfor %} {% endfor %} -
-
+
+
.. note:: diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst index d5051d28c..9bd7081d2 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst @@ -112,7 +112,7 @@ Multi-node setup For multi-node environments, ensure you have all the necessary packages for your network device, such as, RDMA. If you're not using a multi-node setup -with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. +with RDMA, skip ahead to :ref:`amd-maxtext-download-docker-v255`. 1. Install the following packages to build and install the RDMA driver. @@ -177,7 +177,7 @@ with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. e. RDMA interface - Ensure the :ref:`required packages ` are installed on all nodes. + Ensure the :ref:`required packages ` are installed on all nodes. Then, set the RDMA interfaces to use for communication. .. code-block:: bash @@ -199,7 +199,7 @@ Pull the Docker image docker pull rocm/jax-training:maxtext-v25.5 2. Use the following command to launch the Docker container. Note that the benchmarking scripts - used in the :ref:`following section ` automatically launch the Docker container + used in the :ref:`following section ` automatically launch the Docker container and execute the benchmark. .. code-block:: shell diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst index 0a80c7c9b..81ec4ed50 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst @@ -55,32 +55,32 @@ vary by model -- select one to get started. {% set model_groups = data.model_groups %} .. raw:: html -
-
-
Model
-
+
+
+
Model
+
{% for model_group in model_groups %} -
{{ model_group.group }}
+
{{ model_group.group }}
{% endfor %} -
-
+
+
-
-
Model variant
-
+
+
Variant
+
{% for model_group in model_groups %} {% set models = model_group.models %} {% for model in models %} {% if models|length % 3 == 0 %} -
{{ model.model }}
+
{{ model.model }}
{% else %} -
{{ model.model }}
+
{{ model.model }}
{% endif %} {% endfor %} {% endfor %} -
-
+
+
.. note:: diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst index e7258e07b..d8ab01318 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst @@ -45,30 +45,30 @@ vary by model -- select one to get started. .. raw:: html
-
-
Model group
-
- {% for model_group in model_groups %} -
{{ model_group.group }}
- {% endfor %} -
-
- -
-
Model variant
-
- {% for model_group in model_groups %} - {% set models = model_group.models %} - {% for model in models %} - {% if models|length % 3 == 0 %} -
{{ model.model }}
- {% else %} -
{{ model.model }}
- {% endif %} +
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
{% endfor %} - {% endfor %} -
-
+
+
+ +
+
Variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
diff --git a/docs/sphinx/static/css/vllm-benchmark.css b/docs/sphinx/static/css/vllm-benchmark.css index 4c10b1ffb..231bb2cac 100644 --- a/docs/sphinx/static/css/vllm-benchmark.css +++ b/docs/sphinx/static/css/vllm-benchmark.css @@ -7,15 +7,14 @@ html { --compat-head-color: var(--pst-color-surface); --compat-param-hover-color: var(--pst-color-link-hover); --compat-param-selected-color: var(--pst-color-primary); + --compat-border-color: var(--pst-color-border); } html[data-theme="light"] { - --compat-border-color: var(--pst-gray-500); --compat-param-disabled-color: var(--pst-gray-300); } html[data-theme="dark"] { - --compat-border-color: var(--pst-gray-600); --compat-param-disabled-color: var(--pst-gray-600); } @@ -23,6 +22,7 @@ div#vllm-benchmark-ud-params-picker.container-fluid { padding: 0 0 1rem 0; } +div[data-param-k="model-group"], div[data-param-k="model"] { background-color: var(--compat-bg-color); padding: 2px; @@ -31,40 +31,19 @@ div[data-param-k="model"] { cursor: pointer; } +div[data-param-k="model-group"][data-param-state="selected"], div[data-param-k="model"][data-param-state="selected"] { background-color: var(--compat-param-selected-color); color: var(--compat-fg-color); } -div[data-param-k="model"][data-param-state="latest-version"] { - background-color: var(--compat-param-selected-color); - color: var(--compat-fg-color); -} - -div[data-param-k="model"][data-param-state="disabled"] { - background-color: var(--compat-param-disabled-color); - text-decoration: line-through; - /* text-decoration-color: var(--pst-color-danger); */ - cursor: auto; -} - -div[data-param-k="model"]:not([data-param-state]):hover { +div[data-param-k="model-group"]:hover, +div[data-param-k="model"]:hover { background-color: var(--compat-param-hover-color); -} - -div[data-param-k="model-group"] { - background-color: var(--compat-bg-color); - padding: 2px; - border: solid 1px var(--compat-border-color); - font-weight: 500; - cursor: pointer; -} - -div[data-param-k="model-group"][data-param-state="selected"] { - background-color: var(--compat-param-selected-color); color: var(--compat-fg-color); } +/* div[data-param-k="model-group"][data-param-state="latest-version"] { background-color: var(--compat-param-selected-color); color: var(--compat-fg-color); @@ -73,26 +52,19 @@ div[data-param-k="model-group"][data-param-state="latest-version"] { div[data-param-k="model-group"][data-param-state="disabled"] { background-color: var(--compat-param-disabled-color); text-decoration: line-through; - /* text-decoration-color: var(--pst-color-danger); */ + text-decoration-color: var(--pst-color-danger); cursor: auto; } - -div[data-param-k="model-group"]:not([data-param-state]):hover { - background-color: var(--compat-param-hover-color); -} +*/ .model-param-head { background-color: var(--compat-head-color); padding: 0.15rem 0.15rem 0.15rem 0.67rem; - /* margin: 2px; */ - border-right: solid 2px var(--compat-accent-color); + border-right: solid 4px var(--compat-accent-color); font-weight: 600; } .model-param { - /* padding: 2px; */ - /* margin: 0 2px 0 2px; */ - /* margin: 2px; */ border: solid 1px var(--compat-border-color); font-weight: 500; } From 0d790615efd8311f6a10fcd2f40fcc15dabb5cc9 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 3 Sep 2025 12:03:02 -0400 Subject: [PATCH 59/81] [Ex CI] Update pipeline Id for rocprofiler-compute to monorepo --- .azuredevops/templates/steps/dependencies-rocm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index efd5a81ae..4090d07be 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -207,7 +207,7 @@ parameters: developBranch: develop hasGpuTarget: true rocprofiler-compute: - pipelineId: 257 + pipelineId: 334 developBranch: develop hasGpuTarget: true rocprofiler-register: From 61f09e2ab935c91b6bf02c51552c6e0b045064ac Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Tue, 9 Sep 2025 15:27:00 -0400 Subject: [PATCH 60/81] Update pipelineId for rocprofiler-compute --- .azuredevops/templates/steps/dependencies-rocm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 4090d07be..e6e6db966 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -207,7 +207,7 @@ parameters: developBranch: develop hasGpuTarget: true rocprofiler-compute: - pipelineId: 334 + pipelineId: 344 developBranch: develop hasGpuTarget: true rocprofiler-register: From a6fbf60594ed67a1a54d1a4733928210a33b1a9f Mon Sep 17 00:00:00 2001 From: Haresh Sivasuntharampillai Date: Wed, 10 Sep 2025 17:54:37 +0000 Subject: [PATCH 61/81] [Ex CI] enable rocr-runtime monorepo --- .azuredevops/components/ROCR-Runtime.yml | 219 +++++++++++++---------- 1 file changed, 126 insertions(+), 93 deletions(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index 0358dd335..7d5d07eba 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocr-runtime - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -45,6 +64,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: ROCR_Runtime_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: vmImage: 'ubuntu-22.04' ${{ if eq(job.os, 'almalinux8') }}: @@ -65,14 +88,18 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} aggregatePipeline: ${{ parameters.aggregatePipeline }} os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -82,105 +109,111 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml # parameters: # aptPackages: ${{ parameters.aptPackages }} -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ROCR_Runtime_test_${{ job.os }}_${{ job.target }} - dependsOn: ROCR_Runtime_build_${{ job.os }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - packageManager: ${{ job.packageManager }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - parameters: - runRocminfo: false - - task: Bash@3 - displayName: Build kfdtest - inputs: - targetType: 'inline' - workingDirectory: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest - script: | - if [ -e /opt/rh/gcc-toolset-14/enable ]; then - source /opt/rh/gcc-toolset-14/enable - fi - mkdir build && cd build - cmake -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm .. - make - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: kfdtest - testExecutable: BIN_DIR=$(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/build ./run_kfdtest.sh - testParameters: '-p core --gtest_output=xml:./test_output.xml --gtest_color=yes' - testDir: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/scripts - os: ${{ job.os }} - - task: Bash@3 - displayName: Build rocrtst - inputs: - targetType: 'inline' - workingDirectory: $(Build.SourcesDirectory)/rocrtst/suites/test_common - script: | - echo $(Build.SourcesDirectory)/rocrtst/thirdparty/lib | sudo tee -a /etc/ld.so.conf.d/rocm-ci.conf - sudo cat /etc/ld.so.conf.d/rocm-ci.conf - sudo ldconfig -v - ldconfig -p - if [ -e /opt/rh/gcc-toolset-14/enable ]; then - source /opt/rh/gcc-toolset-14/enable - fi - BASE_CLANG_DIR=$(Agent.BuildDirectory)/rocm/llvm/lib/clang - export NEWEST_CLANG_VER=$(ls -1 $BASE_CLANG_DIR | sort -V | tail -n 1) - mkdir build && cd build - cmake .. \ - -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm \ - -DTARGET_DEVICES=${{ job.target }} \ - -DROCM_DIR=$(Agent.BuildDirectory)/rocm \ - -DLLVM_DIR=$(Agent.BuildDirectory)/rocm/llvm/bin \ - -DOPENCL_INC_DIR=$BASE_CLANG_DIR/$NEWEST_CLANG_VER/include - make - make rocrtst_kernels - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocrtst - testExecutable: ./rocrtst64 - testParameters: '--gtest_filter="-rocrtstNeg.Memory_Negative_Tests:rocrtstFunc.Memory_Max_Mem" --gtest_output=xml:./test_output.xml --gtest_color=yes' - testDir: $(Build.SourcesDirectory)/rocrtst/suites/test_common/build/${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - environment: test - gpuTarget: ${{ job.target }} - # docker image will be missing libhwloc5 +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ROCR_Runtime_test_${{ job.os }}_${{ job.target }} + dependsOn: ROCR_Runtime_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + parameters: + runRocminfo: false + - task: Bash@3 + displayName: Build kfdtest + inputs: + targetType: 'inline' + workingDirectory: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest + script: | + if [ -e /opt/rh/gcc-toolset-14/enable ]; then + source /opt/rh/gcc-toolset-14/enable + fi + mkdir build && cd build + cmake -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm .. + make + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: kfdtest + testExecutable: BIN_DIR=$(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/build ./run_kfdtest.sh + testParameters: '-p core --gtest_output=xml:./test_output.xml --gtest_color=yes' + testDir: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/scripts + os: ${{ job.os }} + - task: Bash@3 + displayName: Build rocrtst + inputs: + targetType: 'inline' + workingDirectory: $(Build.SourcesDirectory)/rocrtst/suites/test_common + script: | + echo $(Build.SourcesDirectory)/rocrtst/thirdparty/lib | sudo tee -a /etc/ld.so.conf.d/rocm-ci.conf + sudo cat /etc/ld.so.conf.d/rocm-ci.conf + sudo ldconfig -v + ldconfig -p + if [ -e /opt/rh/gcc-toolset-14/enable ]; then + source /opt/rh/gcc-toolset-14/enable + fi + BASE_CLANG_DIR=$(Agent.BuildDirectory)/rocm/llvm/lib/clang + export NEWEST_CLANG_VER=$(ls -1 $BASE_CLANG_DIR | sort -V | tail -n 1) + mkdir build && cd build + cmake .. \ + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm \ + -DTARGET_DEVICES=${{ job.target }} \ + -DROCM_DIR=$(Agent.BuildDirectory)/rocm \ + -DLLVM_DIR=$(Agent.BuildDirectory)/rocm/llvm/bin \ + -DOPENCL_INC_DIR=$BASE_CLANG_DIR/$NEWEST_CLANG_VER/include + make + make rocrtst_kernels + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: rocrtst + testExecutable: ./rocrtst64 + testParameters: '--gtest_filter="-rocrtstNeg.Memory_Negative_Tests:rocrtstFunc.Memory_Max_Mem" --gtest_output=xml:./test_output.xml --gtest_color=yes' + testDir: $(Build.SourcesDirectory)/rocrtst/suites/test_common/build/${{ job.target }} + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + environment: test + gpuTarget: ${{ job.target }} + # docker image will be missing libhwloc5 From 91f21d890fdf7a5b2bb02541aad0b8d111b1d539 Mon Sep 17 00:00:00 2001 From: Haresh Sivasuntharampillai Date: Wed, 10 Sep 2025 18:44:18 +0000 Subject: [PATCH 62/81] Fixed SparseCheckout --- .azuredevops/components/ROCR-Runtime.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index 7d5d07eba..cdf935e2a 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -158,6 +158,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml parameters: runRocminfo: false From 26ddf7e6ac1914241426b6da61683263c8306105 Mon Sep 17 00:00:00 2001 From: Haresh Sivasuntharampillai Date: Wed, 10 Sep 2025 19:48:53 +0000 Subject: [PATCH 63/81] test commit --- .azuredevops/components/ROCR-Runtime.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index cdf935e2a..3482c0a18 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -8,7 +8,7 @@ parameters: - name: checkoutRef type: string default: '' -# monorepo related parameters +# monorepo related parameters test - name: sparseCheckoutDir type: string default: '' From 8617b653f8a2ca00ed7f4556dc5321161f36a90e Mon Sep 17 00:00:00 2001 From: Haresh Sivasuntharampillai Date: Wed, 10 Sep 2025 19:53:30 +0000 Subject: [PATCH 64/81] test commit --- .azuredevops/components/ROCR-Runtime.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index 3482c0a18..cdf935e2a 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -8,7 +8,7 @@ parameters: - name: checkoutRef type: string default: '' -# monorepo related parameters test +# monorepo related parameters - name: sparseCheckoutDir type: string default: '' From 9b2b1d3a661de72fa5d29a6606c79441e17fa87f Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 19:56:04 +0000 Subject: [PATCH 65/81] User test --- .azuredevops/components/ROCR-Runtime.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index cdf935e2a..3482c0a18 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -8,7 +8,7 @@ parameters: - name: checkoutRef type: string default: '' -# monorepo related parameters +# monorepo related parameters test - name: sparseCheckoutDir type: string default: '' From c4b4abe3543ab36e23777d1a9bb9f6d6144a2c31 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 19:58:20 +0000 Subject: [PATCH 66/81] User Test Commit --- .azuredevops/components/ROCR-Runtime.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index 3482c0a18..cdf935e2a 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -8,7 +8,7 @@ parameters: - name: checkoutRef type: string default: '' -# monorepo related parameters test +# monorepo related parameters - name: sparseCheckoutDir type: string default: '' From 2383edc1fee1faa80bf94bd60f6151d68ce32e97 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Thu, 11 Sep 2025 18:12:55 +0000 Subject: [PATCH 67/81] Fixed WorkingDir in TestJobs --- .azuredevops/components/ROCR-Runtime.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index cdf935e2a..098efb5aa 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -166,7 +166,7 @@ jobs: displayName: Build kfdtest inputs: targetType: 'inline' - workingDirectory: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest + workingDirectory: $(Agent.BuildDirectory)/s/libhsakmt/tests/kfdtest script: | if [ -e /opt/rh/gcc-toolset-14/enable ]; then source /opt/rh/gcc-toolset-14/enable @@ -177,17 +177,17 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml parameters: componentName: kfdtest - testExecutable: BIN_DIR=$(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/build ./run_kfdtest.sh + testExecutable: BIN_DIR=$(Agent.BuildDirectory)/s/libhsakmt/tests/kfdtest/build ./run_kfdtest.sh testParameters: '-p core --gtest_output=xml:./test_output.xml --gtest_color=yes' - testDir: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/scripts + testDir: $(Agent.BuildDirectory)/s/libhsakmt/tests/kfdtest/scripts os: ${{ job.os }} - task: Bash@3 displayName: Build rocrtst inputs: targetType: 'inline' - workingDirectory: $(Build.SourcesDirectory)/rocrtst/suites/test_common + workingDirectory: $(Agent.BuildDirectory)/s/rocrtst/suites/test_common script: | - echo $(Build.SourcesDirectory)/rocrtst/thirdparty/lib | sudo tee -a /etc/ld.so.conf.d/rocm-ci.conf + echo $(Agent.BuildDirectory)/s/rocrtst/thirdparty/lib | sudo tee -a /etc/ld.so.conf.d/rocm-ci.conf sudo cat /etc/ld.so.conf.d/rocm-ci.conf sudo ldconfig -v ldconfig -p From 957005f596cc55e99597e53f3900801cf56102f4 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Thu, 11 Sep 2025 18:56:55 +0000 Subject: [PATCH 68/81] Updated rocrtst testDir --- .azuredevops/components/ROCR-Runtime.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index 098efb5aa..fea5ae3d0 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -210,7 +210,7 @@ jobs: componentName: rocrtst testExecutable: ./rocrtst64 testParameters: '--gtest_filter="-rocrtstNeg.Memory_Negative_Tests:rocrtstFunc.Memory_Max_Mem" --gtest_output=xml:./test_output.xml --gtest_color=yes' - testDir: $(Build.SourcesDirectory)/rocrtst/suites/test_common/build/${{ job.target }} + testDir: $(Agent.BuildDirectory)/s//rocrtst/suites/test_common/build/${{ job.target }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml parameters: From 8c1df97e34836783d27d6910e39af3263252aa28 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 22:40:35 +0000 Subject: [PATCH 69/81] [Ex CI] Enable rocprofiler-sdk monorepo --- .azuredevops/components/rocprofiler-sdk.yml | 39 +++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/.azuredevops/components/rocprofiler-sdk.yml b/.azuredevops/components/rocprofiler-sdk.yml index 7dea99f0e..7ccfd39db 100644 --- a/.azuredevops/components/rocprofiler-sdk.yml +++ b/.azuredevops/components/rocprofiler-sdk.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocprofiler-sdk - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -73,6 +92,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocprofiler_sdk_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -89,6 +112,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: @@ -96,6 +120,8 @@ jobs: dependencyList: ${{ parameters.rocmDependencies }} gpuTarget: ${{ job.target }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - task: Bash@3 displayName: Add Python site-packages binaries to path inputs: @@ -105,6 +131,7 @@ jobs: echo "##vso[task.prependpath]$USER_BASE/bin" - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} extraBuildFlags: >- -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm -DROCPROFILER_BUILD_TESTS=ON @@ -114,9 +141,12 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml @@ -126,13 +156,14 @@ jobs: # gpuTarget: ${{ job.target }} # registerROCmPackages: true +- ${{ if eq(parameters.unifiedBuild, False) }}: - ${{ each job in parameters.jobMatrix.testJobs }}: - job: rocprofiler_sdk_test_${{ job.target }} dependsOn: rocprofiler_sdk_build_${{ job.target }} condition: and(succeeded(), eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), eq(${{ parameters.aggregatePipeline }}, False) ) variables: @@ -150,6 +181,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} checkoutRepo: ${{ parameters.checkoutRepo }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml @@ -157,6 +189,8 @@ jobs: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - task: Bash@3 displayName: Add Python and ROCm binaries to path inputs: @@ -167,6 +201,7 @@ jobs: echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} extraBuildFlags: >- -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm -DROCPROFILER_BUILD_TESTS=ON @@ -177,7 +212,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH}}/steps/gpu-diagnostics.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml parameters: - componentName: rocprofiler-sdk + componentName: ${{ parameters.componentName }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml parameters: aptPackages: ${{ parameters.aptPackages }} From e71b8212f9046f1ba096b80a32aaacbdb91fa30e Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 22:42:02 +0000 Subject: [PATCH 70/81] Fixed Indentation --- .azuredevops/components/rocprofiler-sdk.yml | 126 ++++++++++---------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/.azuredevops/components/rocprofiler-sdk.yml b/.azuredevops/components/rocprofiler-sdk.yml index 7ccfd39db..3f1656040 100644 --- a/.azuredevops/components/rocprofiler-sdk.yml +++ b/.azuredevops/components/rocprofiler-sdk.yml @@ -157,66 +157,66 @@ jobs: # registerROCmPackages: true - ${{ if eq(parameters.unifiedBuild, False) }}: -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocprofiler_sdk_test_${{ job.target }} - dependsOn: rocprofiler_sdk_build_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - registerROCmPackages: true - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - checkoutRepo: ${{ parameters.checkoutRepo }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmDependencies }} - gpuTarget: ${{ job.target }} - ${{ if parameters.triggerDownstreamJobs }}: - downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - - task: Bash@3 - displayName: Add Python and ROCm binaries to path - inputs: - targetType: inline - script: | - USER_BASE=$(python3 -m site --user-base) - echo "##vso[task.prependpath]$USER_BASE/bin" - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml - parameters: - componentName: ${{ parameters.componentName }} - extraBuildFlags: >- - -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm - -DROCPROFILER_BUILD_TESTS=ON - -DROCPROFILER_BUILD_SAMPLES=ON - -DROCPROFILER_BUILD_RELEASE=ON - -DGPU_TARGETS=${{ job.target }} - -GNinja - - template: ${{ variables.CI_TEMPLATE_PATH}}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: ${{ parameters.componentName }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - gpuTarget: ${{ job.target }} - registerROCmPackages: true + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: rocprofiler_sdk_test_${{ job.target }} + dependsOn: rocprofiler_sdk_build_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + registerROCmPackages: true + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmDependencies }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - task: Bash@3 + displayName: Add Python and ROCm binaries to path + inputs: + targetType: inline + script: | + USER_BASE=$(python3 -m site --user-base) + echo "##vso[task.prependpath]$USER_BASE/bin" + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + componentName: ${{ parameters.componentName }} + extraBuildFlags: >- + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DROCPROFILER_BUILD_TESTS=ON + -DROCPROFILER_BUILD_SAMPLES=ON + -DROCPROFILER_BUILD_RELEASE=ON + -DGPU_TARGETS=${{ job.target }} + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH}}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} + registerROCmPackages: true From c9c41a34c204359b7389a785a20c035ae2c92040 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Thu, 11 Sep 2025 20:11:31 +0000 Subject: [PATCH 71/81] [Ex CI] enable hip-tests monorepo --- .azuredevops/components/hip-tests.yml | 133 ++++++++++++++++---------- 1 file changed, 83 insertions(+), 50 deletions(-) diff --git a/.azuredevops/components/hip-tests.yml b/.azuredevops/components/hip-tests.yml index c88465a6d..388ac4170 100644 --- a/.azuredevops/components/hip-tests.yml +++ b/.azuredevops/components/hip-tests.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: hip-tests - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -60,6 +79,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: hip_tests_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -76,15 +99,18 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} # compile hip-tests - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: - componentName: hip-tests + componentName: ${{ parameters.componentName }} cmakeSourceDir: '../catch' customBuildTarget: build_tests extraBuildFlags: >- @@ -96,9 +122,12 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml @@ -108,52 +137,56 @@ jobs: extraEnvVars: - HIP_ROCCLR_HOME:::/home/user/workspace/rocm -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: hip_tests_test_${{ job.target }} - timeoutInMinutes: 240 - dependsOn: hip_tests_build_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - gpuTarget: ${{ job.target }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - - task: Bash@3 - displayName: Symlink rocm_agent_enumerator - inputs: - targetType: inline - script: | - # Assuming that /opt is no longer persistent across runs, test environments are fully ephemeral - sudo mkdir -p /opt/rocm/bin - sudo ln -s $(Agent.BuildDirectory)/rocm/bin/rocm_agent_enumerator /opt/rocm/bin/rocm_agent_enumerator - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: hip_tests - testDir: $(Agent.BuildDirectory)/rocm/share/hip - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - environment: test - gpuTarget: ${{ job.target }} - optSymLink: true +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: hip_tests_test_${{ job.target }} + timeoutInMinutes: 240 + dependsOn: hip_tests_build_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + gpuTarget: ${{ job.target }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - task: Bash@3 + displayName: Symlink rocm_agent_enumerator + inputs: + targetType: inline + script: | + # Assuming that /opt is no longer persistent across runs, test environments are fully ephemeral + sudo mkdir -p /opt/rocm/bin + sudo ln -s $(Agent.BuildDirectory)/rocm/bin/rocm_agent_enumerator /opt/rocm/bin/rocm_agent_enumerator + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: $(Agent.BuildDirectory)/rocm/share/hip + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + environment: test + gpuTarget: ${{ job.target }} + optSymLink: true From 17be0ce7aadecbbe326bca9871aaa04094715311 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Thu, 11 Sep 2025 16:33:27 -0400 Subject: [PATCH 72/81] [Ex CI] Update pipeline Id for rocprofiler-sdk to monorepo --- .azuredevops/templates/steps/dependencies-rocm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index e6e6db966..3db025e8c 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -215,8 +215,8 @@ parameters: developBranch: develop hasGpuTarget: false rocprofiler-sdk: - pipelineId: 246 - developBranch: amd-staging + pipelineId: 347 + developBranch: develop hasGpuTarget: true rocprofiler-systems: pipelineId: 255 From 9a3fc8c773a0b7249d596c77a8970c1e3f8cd924 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 10 Sep 2025 17:55:19 -0400 Subject: [PATCH 73/81] [Ex CI] Update pipeline Id for rocm-smi-lib to monorepo --- .azuredevops/templates/steps/dependencies-rocm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 3db025e8c..8f846b2ac 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -195,8 +195,8 @@ parameters: developBranch: master hasGpuTarget: false rocm_smi_lib: - pipelineId: 96 - developBranch: amd-staging + pipelineId: 358 + developBranch: develop hasGpuTarget: false rocPRIM: pipelineId: 273 From b3c566f6b984ba3ba1c254fffde51c893d857665 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Thu, 11 Sep 2025 16:50:49 -0400 Subject: [PATCH 74/81] [Ex CI] Update pipeline Id for hip-tests to monorepo --- .azuredevops/templates/steps/dependencies-rocm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 8f846b2ac..0b46ed37f 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -63,8 +63,8 @@ parameters: developBranch: develop hasGpuTarget: false hip-tests: - pipelineId: 233 - developBranch: amd-staging + pipelineId: 362 + developBranch: develop hasGpuTarget: false hipBLAS: pipelineId: 317 From 355feae2e2f9da98e2c139432c224d7d837db281 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Thu, 11 Sep 2025 16:37:00 -0400 Subject: [PATCH 75/81] [Ex CI] Update pipeline Id for rocr-runtime to monorepo --- .azuredevops/templates/steps/dependencies-rocm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 0b46ed37f..5fbb57bb5 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -227,8 +227,8 @@ parameters: developBranch: develop hasGpuTarget: true ROCR-Runtime: - pipelineId: 10 - developBranch: amd-staging + pipelineId: 354 + developBranch: develop hasGpuTarget: false rocRAND: pipelineId: 274 From 33bc3c5e2be176a4a12cd8859a4866b0bf3da365 Mon Sep 17 00:00:00 2001 From: Joseph Macaranas <145489236+jayhawk-commits@users.noreply.github.com> Date: Thu, 11 Sep 2025 22:08:43 -0400 Subject: [PATCH 76/81] [External CI] Match component name for ROCR to match expected downstream (#5306) --- .azuredevops/components/ROCR-Runtime.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index fea5ae3d0..af3e87fce 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -1,7 +1,7 @@ parameters: - name: componentName type: string - default: rocr-runtime + default: ROCR-Runtime - name: checkoutRepo type: string default: 'self' From 0af430d1cb97cf2816e27d501a61b343808778b7 Mon Sep 17 00:00:00 2001 From: Joseph Macaranas <145489236+jayhawk-commits@users.noreply.github.com> Date: Thu, 11 Sep 2025 22:50:56 -0400 Subject: [PATCH 77/81] [External CI] Another fix for downstream jobs (#5307) --- .azuredevops/components/rocm_smi_lib.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azuredevops/components/rocm_smi_lib.yml b/.azuredevops/components/rocm_smi_lib.yml index 138bc559e..6d605888c 100644 --- a/.azuredevops/components/rocm_smi_lib.yml +++ b/.azuredevops/components/rocm_smi_lib.yml @@ -1,7 +1,7 @@ parameters: - name: componentName type: string - default: rocm-smi-lib + default: rocm_smi_lib - name: checkoutRepo type: string default: 'self' From 8882410560e489c7b6f326cff651b1dcf5d0cfa8 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 3 Sep 2025 14:08:14 -0400 Subject: [PATCH 78/81] Enabled rocprofiler-systems monorepo --- .../components/rocprofiler-systems.yml | 197 ++++++++++-------- 1 file changed, 114 insertions(+), 83 deletions(-) diff --git a/.azuredevops/components/rocprofiler-systems.yml b/.azuredevops/components/rocprofiler-systems.yml index 0c90f53f2..ba6c2e035 100644 --- a/.azuredevops/components/rocprofiler-systems.yml +++ b/.azuredevops/components/rocprofiler-systems.yml @@ -6,6 +6,25 @@ parameters: - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: componentName + type: string + default: rocprofiler-systems +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -86,7 +105,11 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: rocprofiler_systems_build_${{ job.target }} + - job: ${{ parameters.componentName }}_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -105,6 +128,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: @@ -136,12 +160,16 @@ jobs: -DCMAKE_CXX_FLAGS=-I$(Agent.BuildDirectory)/rocm/include/rocjpeg -DGPU_TARGETS=${{ job.target }} -GNinja + componentName: ${{ parameters.componentName }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: gpuTarget: ${{ job.target }} + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: gpuTarget: ${{ job.target }} + componentName: ${{ parameters.componentName }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml parameters: @@ -151,85 +179,88 @@ jobs: registerROCmPackages: true extraPaths: /home/user/workspace/rocm/bin:/home/user/workspace/rocm/llvm/bin -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocprofiler_systems_test_${{ job.target }} - dependsOn: rocprofiler_systems_build_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - timeoutInMinutes: 180 - variables: - - group: common - - template: /.azuredevops/variables-global.yml - - name: ROCM_PATH - value: $(Agent.BuildDirectory)/rocm - pool: - name: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - registerROCmPackages: true - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmDependencies }} - gpuTarget: ${{ job.target }} - - task: Bash@3 - displayName: Add ROCm binaries to PATH - inputs: - targetType: inline - script: | - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin" - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml - parameters: - # build flags reference: https://rocm.docs.amd.com/projects/omnitrace/en/latest/install/install.html - extraBuildFlags: >- - -DROCPROFSYS_BUILD_TESTING=ON - -DROCPROFSYS_BUILD_DYNINST=ON - -DROCPROFSYS_BUILD_LIBUNWIND=ON - -DROCPROFSYS_DISABLE_EXAMPLES="openmp-target" - -DDYNINST_BUILD_TBB=ON - -DDYNINST_BUILD_ELFUTILS=ON - -DDYNINST_BUILD_LIBIBERTY=ON - -DDYNINST_BUILD_BOOST=ON - -DROCPROFSYS_USE_PAPI=ON - -DROCPROFSYS_USE_MPI=ON - -DCMAKE_CXX_FLAGS=-I$(Agent.BuildDirectory)/rocm/include/rocjpeg - -DGPU_TARGETS=${{ job.target }} - -GNinja - - task: Bash@3 - displayName: Set up rocprofiler-systems env - inputs: - targetType: inline - script: source share/rocprofiler-systems/setup-env.sh - workingDirectory: build - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocprofiler-systems - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml - parameters: - gpuTarget: ${{ job.target }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml - parameters: - gpuTarget: ${{ job.target }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - registerROCmPackages: true - gpuTarget: ${{ job.target }} - extraPaths: /home/user/workspace/rocm/bin:/home/user/workspace/rocm/llvm/bin +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ${{ parameters.componentName }}_test_${{ job.target }} + dependsOn: ${{ parameters.componentName }}_build_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + timeoutInMinutes: 180 + variables: + - group: common + - template: /.azuredevops/variables-global.yml + - name: ROCM_PATH + value: $(Agent.BuildDirectory)/rocm + pool: + name: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + registerROCmPackages: true + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmDependencies }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - task: Bash@3 + displayName: Add ROCm binaries to PATH + inputs: + targetType: inline + script: | + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin" + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + # build flags reference: https://rocm.docs.amd.com/projects/omnitrace/en/latest/install/install.html + extraBuildFlags: >- + -DROCPROFSYS_BUILD_TESTING=ON + -DROCPROFSYS_BUILD_DYNINST=ON + -DROCPROFSYS_BUILD_LIBUNWIND=ON + -DROCPROFSYS_DISABLE_EXAMPLES="openmp-target" + -DDYNINST_BUILD_TBB=ON + -DDYNINST_BUILD_ELFUTILS=ON + -DDYNINST_BUILD_LIBIBERTY=ON + -DDYNINST_BUILD_BOOST=ON + -DROCPROFSYS_USE_PAPI=ON + -DROCPROFSYS_USE_MPI=ON + -DCMAKE_CXX_FLAGS=-I$(Agent.BuildDirectory)/rocm/include/rocjpeg + -DGPU_TARGETS=${{ job.target }} + -GNinja + - task: Bash@3 + displayName: Set up rocprofiler-systems env + inputs: + targetType: inline + script: source share/rocprofiler-systems/setup-env.sh + workingDirectory: build + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml + parameters: + gpuTarget: ${{ job.target }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + gpuTarget: ${{ job.target }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + registerROCmPackages: true + gpuTarget: ${{ job.target }} + extraPaths: /home/user/workspace/rocm/bin:/home/user/workspace/rocm/llvm/bin From a2785d2b5a416bb420e913cce6da810ea2b82dd7 Mon Sep 17 00:00:00 2001 From: amd-hsivasun Date: Wed, 3 Sep 2025 14:14:52 -0400 Subject: [PATCH 79/81] Fixed componentName calls for test and build jobs --- .azuredevops/components/rocprofiler-systems.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.azuredevops/components/rocprofiler-systems.yml b/.azuredevops/components/rocprofiler-systems.yml index ba6c2e035..0840da028 100644 --- a/.azuredevops/components/rocprofiler-systems.yml +++ b/.azuredevops/components/rocprofiler-systems.yml @@ -105,7 +105,7 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: ${{ parameters.componentName }}_build_${{ job.target }} + - job: rocprofiler_systems_build_${{ job.target }} ${{ if parameters.buildDependsOn }}: dependsOn: - ${{ each build in parameters.buildDependsOn }}: @@ -181,8 +181,8 @@ jobs: - ${{ if eq(parameters.unifiedBuild, False) }}: - ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ${{ parameters.componentName }}_test_${{ job.target }} - dependsOn: ${{ parameters.componentName }}_build_${{ job.target }} + - job: rocprofiler_systems_test_${{ job.target }} + dependsOn: rocprofiler_systems_build_${{ job.target }} condition: and(succeeded(), eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), From 2fe270beb37a26ab690a2a2e51068e65df2a79d8 Mon Sep 17 00:00:00 2001 From: Jeffrey Novotny Date: Mon, 15 Sep 2025 15:16:17 -0400 Subject: [PATCH 80/81] Fix links to MIT licenses (#5311) --- docs/about/license.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/about/license.md b/docs/about/license.md index 91dbca114..93c031249 100644 --- a/docs/about/license.md +++ b/docs/about/license.md @@ -50,7 +50,7 @@ additional licenses. Please review individual repositories for more information. | [llvm-project](https://github.com/ROCm/llvm-project/) | [Apache](https://github.com/ROCm/llvm-project/blob/amd-staging/LICENSE.TXT) | | [llvm-project/flang](https://github.com/ROCm/llvm-project/tree/amd-staging/flang) | [Apache 2.0](https://github.com/ROCm/llvm-project/blob/amd-staging/flang/LICENSE.TXT) | | [MIGraphX](https://github.com/ROCm/AMDMIGraphX/) | [MIT](https://github.com/ROCm/AMDMIGraphX/blob/develop/LICENSE) | -| [MIOpen](https://github.com/ROCm/MIOpen/) | [MIT](https://github.com/ROCm/MIOpen/blob/develop/LICENSE.txt) | +| [MIOpen](https://github.com/ROCm/MIOpen/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/miopen/LICENSE.md) | | [MIVisionX](https://github.com/ROCm/MIVisionX/) | [MIT](https://github.com/ROCm/MIVisionX/blob/develop/LICENSE.txt) | | [rocAL](https://github.com/ROCm/rocAL) | [MIT](https://github.com/ROCm/rocAL/blob/develop/LICENSE.txt) | | [rocALUTION](https://github.com/ROCm/rocALUTION/) | [MIT](https://github.com/ROCm/rocALUTION/blob/develop/LICENSE.md) | @@ -67,15 +67,15 @@ additional licenses. Please review individual repositories for more information. | [ROCm Communication Collectives Library (RCCL)](https://github.com/ROCm/rccl/) | [Custom](https://github.com/ROCm/rccl/blob/develop/LICENSE.txt) | | [ROCm-Core](https://github.com/ROCm/rocm-core) | [MIT](https://github.com/ROCm/rocm-core/blob/master/copyright) | | [ROCm Compute Profiler](https://github.com/ROCm/rocprofiler-compute) | [MIT](https://github.com/ROCm/rocprofiler-compute/blob/amd-staging/LICENSE) | -| [ROCm Data Center (RDC)](https://github.com/ROCm/rdc/) | [MIT](https://github.com/ROCm/rdc/blob/amd-staging/LICENSE) | +| [ROCm Data Center (RDC)](https://github.com/ROCm/rdc/) | [MIT](https://github.com/ROCm/rdc/blob/amd-staging/LICENSE.md) | | [ROCm-Device-Libs](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/device-libs) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/device-libs/LICENSE.TXT) | | [ROCm-OpenCL-Runtime](https://github.com/ROCm/clr/tree/amd-staging/opencl) | [MIT](https://github.com/ROCm/clr/blob/amd-staging/opencl/LICENSE.txt) | | [ROCm Performance Primitives (RPP)](https://github.com/ROCm/rpp) | [MIT](https://github.com/ROCm/rpp/blob/develop/LICENSE) | -| [ROCm SMI Lib](https://github.com/ROCm/rocm_smi_lib/) | [MIT](https://github.com/ROCm/rocm_smi_lib/blob/amd-staging/License.txt) | -| [ROCm Systems Profiler](https://github.com/ROCm/rocprofiler-systems) | [MIT](https://github.com/ROCm/rocprofiler-systems/blob/amd-staging/LICENSE) | +| [ROCm SMI Lib](https://github.com/ROCm/rocm_smi_lib/) | [MIT](https://github.com/ROCm/rocm_smi_lib/blob/amd-staging/LICENSE.md) | +| [ROCm Systems Profiler](https://github.com/ROCm/rocprofiler-systems) | [MIT](https://github.com/ROCm/rocprofiler-systems/blob/amd-staging/LICENSE.md) | | [ROCm Validation Suite](https://github.com/ROCm/ROCmValidationSuite/) | [MIT](https://github.com/ROCm/ROCmValidationSuite/blob/master/LICENSE) | | [rocPRIM](https://github.com/ROCm/rocPRIM/) | [MIT](https://github.com/ROCm/rocPRIM/blob/develop/LICENSE.txt) | -| [ROCProfiler](https://github.com/ROCm/rocprofiler/) | [MIT](https://github.com/ROCm/rocprofiler/blob/amd-staging/LICENSE) | +| [ROCProfiler](https://github.com/ROCm/rocprofiler/) | [MIT](https://github.com/ROCm/rocprofiler/blob/amd-staging/LICENSE.md) | | [ROCprofiler-SDK](https://github.com/ROCm/rocprofiler-sdk) | [MIT](https://github.com/ROCm/rocprofiler-sdk/blob/amd-mainline/LICENSE) | | [rocPyDecode](https://github.com/ROCm/rocPyDecode) | [MIT](https://github.com/ROCm/rocPyDecode/blob/develop/LICENSE.txt) | | [rocRAND](https://github.com/ROCm/rocRAND/) | [MIT](https://github.com/ROCm/rocRAND/blob/develop/LICENSE.txt) | From b07ae4ba6ce94d7bd0c8f8d364fe1165945fdf49 Mon Sep 17 00:00:00 2001 From: Jeffrey Novotny Date: Mon, 15 Sep 2025 15:53:29 -0400 Subject: [PATCH 81/81] Fix links to MIT license for AQLprofile (#5312) --- docs/about/license.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/about/license.md b/docs/about/license.md index 93c031249..f4e4b8776 100644 --- a/docs/about/license.md +++ b/docs/about/license.md @@ -29,7 +29,7 @@ additional licenses. Please review individual repositories for more information. | [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/amd-staging/LICENSE) | | [aomp](https://github.com/ROCm/aomp/) | [Apache 2.0](https://github.com/ROCm/aomp/blob/aomp-dev/LICENSE) | | [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) | -| [AQLprofile] | [MIT](https://github.com/ROCm/aqlprofile/blob/amd-staging/LICENSE) | +| [AQLprofile] | [MIT](https://github.com/ROCm/aqlprofile/blob/amd-staging/LICENSE.md) | | [Code Object Manager (Comgr)](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/comgr) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/comgr/LICENSE.txt) | | [Composable Kernel](https://github.com/ROCm/composable_kernel) | [MIT](https://github.com/ROCm/composable_kernel/blob/develop/LICENSE) | | [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/rocm/LICENSE.txt) |