Compare commits

..

18 Commits

Author SHA1 Message Date
Peter Park
744eff47a0 improve title 2024-12-16 12:57:17 -05:00
Peter Park
8c3181295b wording
words

words
2024-12-16 12:45:53 -05:00
Peter Park
267eda26ea add megatron training doc
update toc

add images

update formatting and wording

formatting

update formatting

update conf.py

update formatting

update docker img

tweak formatting

Fix stuff

fix mock-data/data-path

add specific commit hash to checkout

update docker pull tag

fix docker run cmd and examples path

fix docker cmd
2024-12-13 22:19:52 -05:00
Joseph Macaranas
bacb49681e External CI: Typo in rocPyDecode Pipeline Parameter (#4157) 2024-12-13 14:35:35 -05:00
Jeffrey Novotny
04fdc08328 Change reference to kernel-mode GPU compute driver in ROCm (#4147)
* Change reference to kernel-mode GPU compute driver in ROCm

* More changes for kernel-mode terminology

* Fix linting
2024-12-13 11:46:02 -05:00
darren-amd
1b33f1d7da External CI: llvm project comgr disable spirv (#4154)
External CI: add flag to disable SPIRV from comgr build in llvm-project
2024-12-13 10:51:36 -05:00
Joseph Macaranas
fd067f7b3b External CI: HIP shared library symlinks for rocPyDecode (#4153)
- Modifying the HIP shared libraries installed to follow the Linux symbolic link convention resolves test failures in rocPyDecode.
2024-12-13 09:58:25 -05:00
spolifroni-amd
2a7520f08a Added MIGraphX changes (#4150)
* Added MIGraphX changes

* removed gfx support

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

* Update RELEASE.md

* Update RELEASE.md

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>

---------

Co-authored-by: Jeffrey Novotny <jnovotny@amd.com>
2024-12-12 11:19:28 -05:00
Joseph Macaranas
5271c2c82d External CI: MIOpen Test Parameters (#4148)
- Exclude lone, consistently failing MIOpen test.
- test_rnn_seq_api is the only ctest failure, so let's filter it out for now to easily identify new failures.
2024-12-11 13:31:12 -05:00
JeniferC99
59a928f3a7 Merge pull request #4137 from ROCm/JeniferC99-patch-1
Update default.xml
2024-12-10 11:47:38 -06:00
David Galiffi
22572a9857 Add TransferBench and hipBLAS-common 2024-12-09 18:37:20 -05:00
randyh62
49e50b93c6 Update index.md (#4144) (#4146)
Remove Programming Guide topic from "How to"
2024-12-09 12:17:54 -08:00
Istvan Kiss
3354099b9c Remove GPU memory page 2024-12-09 17:23:57 +01:00
David Galiffi
794b34f40e Update default.xml
Fixed merge conflicts
2024-12-09 11:18:39 -05:00
David Galiffi
25ef417b31 Merge branch 'develop' into JeniferC99-patch-1 2024-12-09 11:17:17 -05:00
Peter Park
78f9adc6ec fix rccl hip streams section in workload tuning guide (#4140) 2024-12-09 11:06:12 -05:00
David Galiffi
4abcae54a8 Update default.xml (#4136)
Add rocJPEG
Rename omniperf to rocprofiler-compute
Rename omnitrace to rocprofiler-compute
2024-12-09 10:56:07 -05:00
JeniferC99
2690506e64 Update default.xml
SWDEV-502858
Rename  Omnitrace and Omniperf
2024-12-06 23:01:56 -06:00
90 changed files with 747 additions and 1075 deletions

View File

@@ -197,3 +197,4 @@ jobs:
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
parameters:
componentName: MIOpen
testParameters: '-VV --output-on-failure --force-new-ctest-process --output-junit test_output.xml --exclude-regex test_rnn_seq_api'

View File

@@ -126,6 +126,7 @@ jobs:
componentName: comgr
extraBuildFlags: >-
-DCMAKE_PREFIX_PATH="$(Build.SourcesDirectory)/llvm/build;$(Build.SourcesDirectory)/amd/device-libs/build"
-DCOMGR_DISABLE_SPIRV=1
-DCMAKE_BUILD_TYPE=Release
cmakeBuildDir: 'amd/comgr/build'
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml

View File

@@ -181,6 +181,7 @@ jobs:
parameters:
dependencyList: ${{ parameters.rocmDependencies }}
gpuTarget: $(JOB_GPU_TARGET)
setupHIPLibrarySymlinks: true
${{ if eq(parameters.checkoutRef, '') }}:
dependencySource: staging
${{ elseif ne(parameters.checkoutRef, '') }}:

View File

@@ -165,6 +165,11 @@ parameters:
- name: skipLlvmSymlink
type: boolean
default: false
# set to true if dlopen calls for HIP libraries are causing failures
# because they do not follow shared library symlink convention
- name: setupHIPLibrarySymlinks
type: boolean
default: false
# some ROCm components can specify GPU target and this will affect downloads
- name: gpuTarget
type: string
@@ -280,6 +285,37 @@ steps:
for file in amdclang amdclang++ amdclang-cl amdclang-cpp amdflang amdlld aompcc mygpu mycpu offload-arch; do
sudo ln -s $(Agent.BuildDirectory)/rocm/llvm/bin/$file $(Agent.BuildDirectory)/rocm/bin/$file
done
# dlopen calls within a ctest or pytest sequence runs into issues when shared library symlink convention is not followed
# the convention is as follows:
# unversioned .so is a symlink to major version .so
# major version .so is a symlink to detailed version .so
# HIP libraries do not follow this convention, and each .so is a copy of each other
# changing the library structure to follow the symlink convention resolves some test failures
- ${{ if eq(parameters.setupHIPLibrarySymlinks, true) }}:
- task: Bash@3
displayName: Setup symlinks for hip libraries
inputs:
targetType: inline
workingDirectory: $(Agent.BuildDirectory)/rocm/lib
script: |
LIBRARIES=("libamdhip64" "libhiprtc-builtins" "libhiprtc")
for LIB_NAME in "${LIBRARIES[@]}"; do
VERSIONED_SO=$(ls ${LIB_NAME}.so.* 2>/dev/null | grep -E "${LIB_NAME}\.so\.[0-9]+\.[0-9]+\.[0-9]+(-.*)?" | sort -V | tail -n 1)
if [[ -z "$VERSIONED_SO" ]]; then
continue
fi
MAJOR_VERSION=$(echo "$VERSIONED_SO" | grep -oP "${LIB_NAME}\.so\.\K[0-9]+")
if [[ -e "${LIB_NAME}.so.${MAJOR_VERSION}" && ! -L "${LIB_NAME}.so.${MAJOR_VERSION}" ]]; then
rm -f "${LIB_NAME}.so.${MAJOR_VERSION}"
fi
if [[ -e "${LIB_NAME}.so" && ! -L "${LIB_NAME}.so" ]]; then
rm -f "${LIB_NAME}.so"
fi
ln -sf "$VERSIONED_SO" "${LIB_NAME}.so.${MAJOR_VERSION}"
ln -sf "${LIB_NAME}.so.${MAJOR_VERSION}" "${LIB_NAME}.so"
echo "Symlinks created for $LIB_NAME:"
ls -l ${LIB_NAME}.so*
done
- task: Bash@3
displayName: 'List downloaded ROCm files'
inputs:

183
.gitmodules vendored
View File

@@ -1,183 +0,0 @@
[submodule "libs/ROCK-Kernel-Driver"]
path = libs/ROCK-Kernel-Driver
url = ../ROCK-Kernel-Driver
[submodule "libs/ROCT-Thunk-Interface"]
path = libs/ROCT-Thunk-Interface
url = ../ROCT-Thunk-Interface
[submodule "libs/ROCR-Runtime"]
path = libs/ROCR-Runtime
url = ../ROCR-Runtime
[submodule "libs/amdsmi"]
path = libs/amdsmi
url = ../amdsmi
[submodule "libs/rocm_smi_lib"]
path = libs/rocm_smi_lib
url = ../rocm_smi_lib
[submodule "libs/rocm-core"]
path = libs/rocm-core
url = ../rocm-core
[submodule "libs/rocm-cmake"]
path = libs/rocm-cmake
url = ../rocm-cmake
[submodule "libs/rocminfo"]
path = libs/rocminfo
url = ../rocminfo
[submodule "libs/rocm_bandwidth_test"]
path = libs/rocm_bandwidth_test
url = ../rocm_bandwidth_test
[submodule "libs/rocprofiler"]
path = libs/rocprofiler
url = ../rocprofiler
[submodule "libs/roctracer"]
path = libs/roctracer
url = ../roctracer
[submodule "libs/rdc"]
path = libs/rdc
url = ../rdc
[submodule "libs/HIP"]
path = libs/HIP
url = ../HIP
[submodule "libs/clr"]
path = libs/clr
url = ../clr
[submodule "libs/hipother"]
path = libs/hipother
url = ../hipother
[submodule "libs/HIPIFY"]
path = libs/HIPIFY
url = ../HIPIFY
[submodule "libs/llvm-project"]
path = libs/llvm-project
url = ../llvm-project
[submodule "libs/half"]
path = libs/half
url = ../half
[submodule "libs/ROCgdb"]
path = libs/ROCgdb
url = ../ROCgdb
[submodule "libs/ROCdbgapi"]
path = libs/ROCdbgapi
url = ../ROCdbgapi
[submodule "libs/rocr_debug_agent"]
path = libs/rocr_debug_agent
url = ../rocr_debug_agent
[submodule "libs/rocBLAS"]
path = libs/rocBLAS
url = ../rocBLAS
[submodule "libs/Tensile"]
path = libs/Tensile
url = ../Tensile
[submodule "libs/hipTensor"]
path = libs/hipTensor
url = ../hipTensor
[submodule "libs/hipBLAS"]
path = libs/hipBLAS
url = ../hipBLAS
[submodule "libs/hipBLASLt"]
path = libs/hipBLASLt
url = ../hipBLASLt
[submodule "libs/rocFFT"]
path = libs/rocFFT
url = ../rocFFT
[submodule "libs/hipFFT"]
path = libs/hipFFT
url = ../hipFFT
[submodule "libs/rocRAND"]
path = libs/rocRAND
url = ../rocRAND
[submodule "libs/hipRAND"]
path = libs/hipRAND
url = ../hipRAND
[submodule "libs/rocSPARSE"]
path = libs/rocSPARSE
url = ../rocSPARSE
[submodule "libs/hipSPARSELt"]
path = libs/hipSPARSELt
url = ../hipSPARSELt
[submodule "libs/rocSOLVER"]
path = libs/rocSOLVER
url = ../rocSOLVER
[submodule "libs/hipSOLVER"]
path = libs/hipSOLVER
url = ../hipSOLVER
[submodule "libs/hipSPARSE"]
path = libs/hipSPARSE
url = ../hipSPARSE
[submodule "libs/rocALUTION"]
path = libs/rocALUTION
url = ../rocALUTION
[submodule "libs/rocThrust"]
path = libs/rocThrust
url = ../rocThrust
[submodule "libs/hipCUB"]
path = libs/hipCUB
url = ../hipCUB
[submodule "libs/rocPRIM"]
path = libs/rocPRIM
url = ../rocPRIM
[submodule "libs/rocWMMA"]
path = libs/rocWMMA
url = ../rocWMMA
[submodule "libs/rccl"]
path = libs/rccl
url = ../rccl
[submodule "libs/MIOpen"]
path = libs/MIOpen
url = ../MIOpen
[submodule "libs/composable_kernel"]
path = libs/composable_kernel
url = ../composable_kernel
[submodule "libs/MIVisionX"]
path = libs/MIVisionX
url = ../MIVisionX
[submodule "libs/rpp"]
path = libs/rpp
url = ../rpp
[submodule "libs/hipfort"]
path = libs/hipfort
url = ../hipfort
[submodule "libs/AMDMIGraphX"]
path = libs/AMDMIGraphX
url = ../AMDMIGraphX
[submodule "libs/ROCmValidationSuite"]
path = libs/ROCmValidationSuite
url = ../ROCmValidationSuite
[submodule "libs/openmp-extras/aomp"]
path = libs/openmp-extras/aomp
url = ../aomp
[submodule "libs/openmp-extras/aomp-extras"]
path = libs/openmp-extras/aomp-extras
url = ../aomp-extras
[submodule "libs/openmp-extras/flang"]
path = libs/openmp-extras/flang
url = ../flang
[submodule "libs/rocDecode"]
path = libs/rocDecode
url = ../rocDecode
[submodule "libs/rocprofiler-sdk"]
path = libs/rocprofiler-sdk
url = ../rocprofiler-sdk
[submodule "libs/rocm-examples"]
path = libs/rocm-examples
url = ../rocm-examples
[submodule "libs/rocPyDecode"]
path = libs/rocPyDecode
url = ../rocPyDecode
[submodule "libs/rocAL"]
path = libs/rocAL
url = ../rocAL
[submodule "libs/rocprofiler-register"]
path = libs/rocprofiler-register
url = ../rocprofiler-register
[submodule "libs/rocprofiler-compute"]
path = libs/rocprofiler-compute
url = ../rocprofiler-compute
[submodule "libs/rocprofiler-systems"]
path = libs/rocprofiler-systems
url = ../rocprofiler-systems
[submodule "libs/hip-tests"]
path = libs/hip-tests
url = ../hip-tests
[submodule "libs/rocJPEG"]
path = libs/rocJPEG
url = ../rocJPEG

View File

@@ -189,6 +189,7 @@ Jupyter
KFD
KFDTest
KiB
KMD
KV
KVM
Keras
@@ -591,6 +592,7 @@ hpp
hsa
hsakmt
hyperparameter
hyperparameters
iDRAC
ib_core
inband

View File

@@ -21,7 +21,19 @@ source software compilers, debuggers, and libraries. ROCm is fully integrated in
## Getting the ROCm Source Code
AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. The source code for ROCm components can be cloned from each of the GitHub repositories using git. For easy access to download the correct versions of each of these tools, the ROCm repository contains submodules that point to the correct versions of each of the ROCm components. They can be found in the `/libs` directory of the ROCm repository.
AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. The source code for ROCm components can be cloned from each of the GitHub repositories using git. For easy access to download the correct versions of each of these tools, the ROCm repository contains a repo manifest file called [default.xml](./default.xml). You can use this manifest file to download the source code for ROCm software.
### Installing the repo tool
The repo tool from Google allows you to manage multiple git repositories simultaneously. Run the following commands to install the repo tool:
```bash
mkdir -p ~/bin/
curl https://storage.googleapis.com/git-repo-downloads/repo > ~/bin/repo
chmod a+x ~/bin/repo
```
**Note:** The ```~/bin/``` folder is used as an example. You can specify a different folder to install the repo tool into if you desire.
### Installing git-lfs
@@ -33,12 +45,17 @@ sudo apt-get install git-lfs
### Downloading the ROCm source code
The following example shows how to download the ROCm source from this repository.
The following example shows how to use the repo tool to download the ROCm source code. If you choose a directory other than ~/bin/ to install the repo tool, you must use that chosen directory in the code as shown below:
```bash
git clone https://github.com/ROCm/ROCm -b submodules-6-3-0 --recurse-submodules
mkdir -p ~/ROCm/
cd ~/ROCm/
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x
~/bin/repo sync
```
**Note:** Using this sample code will cause the repo tool to download the open source code associated with the specified ROCm release. Ensure that you have ssh-keys configured on your machine for your GitHub ID prior to the download as explained at [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh).
## Building the ROCm source code
Each ROCm component repository contains directions for building that component, such as the rocSPARSE documentation [Installation and Building for Linux](https://rocm.docs.amd.com/projects/rocSPARSE/en/latest/install/Linux_Install_Guide.html). Refer to the specific component documentation for instructions on building the repository.
@@ -59,8 +76,9 @@ The Build time will reduce significantly if we limit the GPU Architecture/s agai
mkdir -p ~/WORKSPACE/ # Or any folder name other than WORKSPACE
cd ~/WORKSPACE/
export ROCM_VERSION=6.3.0 # or 6.2.0 6.1.0
git clone https://github.com/ROCm/ROCm -b submodules-${ROCM_VERSION} --recurse-submodules
export ROCM_VERSION=6.3.0
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
~/bin/repo sync
# --------------------------------------
# Step 2: Prepare build environment
@@ -149,6 +167,12 @@ Note: [Overview for ROCm.mk](tools/rocm-build/README.md)
## ROCm documentation
This repository contains the [manifest file](https://gerrit.googlesource.com/git-repo/+/HEAD/docs/manifest-format.md)
for ROCm releases, changelogs, and release information.
The `default.xml` file contains information for all repositories and the associated commit used to build
the current ROCm release; `default.xml` uses the [Manifest Format repository](https://gerrit.googlesource.com/git-repo/).
Source code for our documentation is located in the `/docs` folder of most ROCm repositories. The
`develop` branch of our repositories contains content for the next ROCm release.

View File

@@ -232,7 +232,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/AMDMIGraphX/en/docs-6.3.0/index.html">MIGraphX</a></td>
<td>2.11.0</td>
<td>2.10.0&nbsp;&Rightarrow;&nbsp;<a href="#migraphx-2-11-0">2.11.0</a></td>
<td><a href="https://github.com/ROCm/AMDMIGraphX"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
@@ -897,6 +897,78 @@ See the full [AMD SMI changelog](https://github.com/ROCm/amdsmi/blob/6.3.x/CHANG
srcLane, width)` function when one of the parameters to the function is undefined along some path
to the function. See [issue #3499](https://github.com/ROCm/ROCm/issues/3499) on GitHub.
### **MIGraphX** (2.11.0)
#### Added
* Initial code to run on Windows
* Support for `FP8` and `INT4`
* Support for the Log2 internal operator
* Support for the GCC 14 compiler
* The `BitwiseAnd`, `Scan`, `SoftmaxCrossEntropyLoss`, `GridSample`, and `NegativeLogLikelihoodLoss` ONNX operators
* The `MatMulNBits`, `QuantizeLinear`/`DequantizeLinear`, `GroupQueryAttention`, `SkipSimplifiedLayerNormalization`, and `SimpliedLayerNormalizationMicrosoft` Contrib operators
* Dynamic batch parameter support to `OneHot` operator
* Split-K as an optional performance improvement
* Scripts to validate ONNX models from the ONNX Model Zoo
* GPU Pooling Kernel
* `--mlir` flag the migraphx-driver program to offload entire module to MLIR
* Fusing split-reduce with MLIR
* Multiple outputs for the MLIR + Pointwise fusions
* Pointwise fusions with MLIR across reshape operations
* `MIGRAPHX_MLIR_DUMP` environment variable to dump MLIR modules to MXRs
* The `3` option to `MIGRAPHX_TRACE_BENCHMARKING` to print the MLIR program for improved debug output
* `MIGRAPHX_ENABLE_HIPBLASLT_GEMM` environment variable to call hipBLASLt libraries
* `MIGRAPHX_VERIFY_DUMP_DIFF` to improve the debugging of accuracy issues
* `reduce_any` and `reduce_all` options to the `Reduce` operation via Torch MIGraphX
* Examples for RNNT, and ControlNet
#### Changed
* Switched to MLIR's 3D Convolution operator.
* MLIR is now used for Attention operations by default on gfx942 and newer ASICs.
* Names and locations for VRM specific libraries have changed.
* Use random mode for benchmarking GEMMs and convolutions.
* Python version is now printed with an actual version number.
#### Removed
* Disabled requirements for MIOpen and rocBLAS when running on Windows.
* Removed inaccurate warning messages when using exhaustive-tune.
* Remove the hard coded path in `MIGRAPHX_CXX_COMPILER` allowing the compiler to be installed in different locations.
#### Optimized
* Improved:
* Infrastructure code to enable better Kernel fusions with all supported data types
* Subsequent model compile time by creating a cache for already performant kernels
* Use of Attention fusion with models
* Performance of the Softmax JIT kernel and of the Pooling operator
* Tuning operations through a new 50ms delay before running the next kernel
* Performance of several convolution-based models through an optimized NHWC layout
* Performance for the `FP8` datatype
* GPU utilization
* Verification tools
* Debug prints
* Documentation, including gpu-driver utility documentation
* Summary section of the `migraphx-driver perf` command
* Reduced model compilation time
* Reordered some compiler passes to allow for more fusions
* Preloaded tiles into LDS to improve performance of pointwise transposes
* Exposed the `external_data_path` property in `onnx_options` to set the path from `onnxruntime`
#### Resolved issues
* Fixed a bug with gfx1030 that overwrote `dpp_reduce`.
* Fixed a bug in 1-arg dynamic reshape that created a failure.
* Fixed a bug with `dot_broadcast` and `inner_broadcast` that caused compile failures.
* Fixed a bug where some configs were failing when using exhaustive-tune.
* Fixed the ROCm Install Guide URL.
* Fixed an issue while building a whl package due to an apostrophe.
* Fixed the BERT Squad example requirements file to support different versions of Python.
* Fixed a bug that stopped the Vicuna model from compiling.
* Fixed failures with the verify option of migraphx-driver that would cause the application to exit early.
### **MIOpen** (3.3.0)
#### Added

View File

@@ -8,7 +8,6 @@
<!--list of projects for ROCm-->
<project name="ROCK-Kernel-Driver" />
<project name="ROCR-Runtime" />
<project name="ROCT-Thunk-Interface" />
<project name="amdsmi" />
<project name="rdc" />
<project name="rocm_bandwidth_test" />
@@ -42,6 +41,7 @@
<project groups="mathlibs" name="ROCmValidationSuite" />
<project groups="mathlibs" name="Tensile" />
<project groups="mathlibs" name="composable_kernel" />
<project groups="mathlibs" name="hipBLAS-common" />
<project groups="mathlibs" name="hipBLAS" />
<project groups="mathlibs" name="hipBLASLt" />
<project groups="mathlibs" name="hipCUB" />
@@ -68,6 +68,7 @@
<project groups="mathlibs" name="rocWMMA" />
<project groups="mathlibs" name="rocm-cmake" />
<project groups="mathlibs" name="rpp" />
<project groups="mathlibs" name="TransferBench" />
<!-- Projects for OpenMP-Extras -->
<project name="aomp" path="openmp-extras/aomp" />
<project name="aomp-extras" path="openmp-extras/aomp-extras" />

View File

@@ -34,7 +34,7 @@ ROCm Version,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
Thrust,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
CUB,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
,,,,,,,,,,
KFD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,
KMD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
,,,,,,,,,,
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,
1 ROCm Version 6.3.0 6.2.4 6.2.2 6.2.1 6.2.0 6.1.2 6.1.1 6.1.0 6.0.2 6.0.0
34 Thrust 2.3.2 2.2.0 2.2.0 2.2.0 2.2.0 2.1.0 2.1.0 2.1.0 2.0.1 2.0.1
35 CUB 2.3.2 2.2.0 2.2.0 2.2.0 2.2.0 2.1.0 2.1.0 2.1.0 2.0.1 2.0.1
36
37 KFD & USER SPACE [#kfd_support-past-60]_ KMD & USER SPACE [#kfd_support-past-60]_ .. _kfd-userspace-support-compatibility-matrix-past-60:
38 Tested user space versions 6.3.x, 6.2.x, 6.1.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x 6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x 6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x
39
40 ML & COMPUTER VISION .. _mllibs-support-compatibility-matrix-past-60:

View File

@@ -61,7 +61,7 @@ compatibility and system requirements.
Thrust,2.3.2,2.2.0,2.1.0
CUB,2.3.2,2.2.0,2.1.0
,,,
KFD & USER SPACE [#kfd_support]_,.. _kfd-userspace-support-compatibility-matrix:,,
KMD & USER SPACE [#kfd_support]_,.. _kfd-userspace-support-compatibility-matrix:,,
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x"
,,,
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix:,,
@@ -150,7 +150,7 @@ compatibility and system requirements.
.. [#oracle89] Oracle Linux is supported only on AMD Instinct MI300X.
.. [#mi300_624] **For ROCm 6.2.4** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
.. [#mi300_610] **For ROCm 6.1.0** - MI300A (gfx942) is supported on Ubuntu 22.04.4, RHEL 9.4, RHEL 9.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.4.
.. [#kfd_support] ROCm provides forward and backward compatibility between the Kernel Fusion Driver (KFD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
.. [#kfd_support] ROCm provides forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
.. [#ROCT-rocr] As of ROCm 6.3.0, the ROCT Thunk Interface is now included as part of the ROCr runtime package.
.. _OS-kernel-versions:
@@ -232,5 +232,5 @@ Expand for full historical view of:
.. [#mi300_610-past-60] **For ROCm 6.1.0** - MI300A (gfx942) is supported on Ubuntu 22.04.4, RHEL 9.4, RHEL 9.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.4.
.. [#mi300_602-past-60] **For ROCm 6.0.2** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3.
.. [#mi300_600-past-60] **For ROCm 6.0.0** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3.
.. [#kfd_support-past-60] ROCm provides forward and backward compatibility between the Kernel Fusion Driver (KFD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
.. [#kfd_support-past-60] ROCm provides forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
.. [#ROCT-rocr-past-60] As of ROCm 6.3.0, the ROCT Thunk Interface is now included as part of the ROCr runtime package.

View File

@@ -1,241 +0,0 @@
<head>
<meta charset="UTF-8">
<meta name="description" content="GPU memory">
<meta name="keywords" content="GPU memory, VRAM, video random access memory, pageable
memory, pinned memory, managed memory, AMD, ROCm">
</head>
# GPU memory
For the HIP reference documentation, see:
* {doc}`hip:doxygen/html/group___memory`
* {doc}`hip:doxygen/html/group___memory_m`
Host memory exists on the host (e.g. CPU) of the machine in random access memory (RAM).
Device memory exists on the device (e.g. GPU) of the machine in video random access memory (VRAM).
Recent architectures use graphics double data rate (GDDR) synchronous dynamic random-access memory (SDRAM)such as GDDR6, or high-bandwidth memory (HBM) such as HBM2e.
## Memory allocation
Memory can be allocated in two ways: pageable memory, and pinned memory.
The following API calls with result in these allocations:
| API | Data location | Allocation |
|--------------------|---------------|------------|
| System allocated | Host | Pageable |
| `hipMallocManaged` | Host | Managed |
| `hipHostMalloc` | Host | Pinned |
| `hipMalloc` | Device | Pinned |
:::{tip}
`hipMalloc` and `hipFree` are blocking calls, however, HIP recently added non-blocking versions `hipMallocAsync` and `hipFreeAsync` which take in a stream as an additional argument.
:::
### Pageable memory
Pageable memory is usually gotten when calling `malloc` or `new` in a C++ application.
It is unique in that it exists on "pages" (blocks of memory), which can be migrated to other memory storage.
For example, migrating memory between CPU sockets on a motherboard, or a system that runs out of space in RAM and starts dumping pages of RAM into the swap partition of your hard drive.
### Pinned memory
Pinned memory (or page-locked memory, or non-pageable memory) is host memory that is mapped into the address space of all GPUs, meaning that the pointer can be used on both host and device.
Accessing host-resident pinned memory in device kernels is generally not recommended for performance, as it can force the data to traverse the host-device interconnect (e.g. PCIe), which is much slower than the on-device bandwidth (>40x on MI200).
Pinned host memory can be allocated with one of two types of coherence support:
:::{note}
In HIP, pinned memory allocations are coherent by default (`hipHostMallocDefault`).
There are additional pinned memory flags (e.g. `hipHostMallocMapped` and `hipHostMallocPortable`).
On MI200 these options do not impact performance.
<!-- TODO: link to programming_manual#memory-allocation-flags -->
For more information, see the section *memory allocation flags* in the HIP Programming Guide: {doc}`hip:how-to/programming_manual`.
:::
Much like how a process can be locked to a CPU core by setting affinity, a pinned memory allocator does this with the memory storage system.
On multi-socket systems it is important to ensure that pinned memory is located on the same socket as the owning process, or else each cache line will be moved through the CPU-CPU interconnect, thereby increasing latency and potentially decreasing bandwidth.
In practice, pinned memory is used to improve transfer times between host and device.
For transfer operations, such as `hipMemcpy` or `hipMemcpyAsync`, using pinned memory instead of pageable memory on host can lead to a ~3x improvement in bandwidth.
:::{tip}
If the application needs to move data back and forth between device and host (separate allocations), use pinned memory on the host side.
:::
### Managed memory
Managed memory refers to universally addressable, or unified memory available on the MI200 series of GPUs.
Much like pinned memory, managed memory shares a pointer between host and device and (by default) supports fine-grained coherence, however, managed memory can also automatically migrate pages between host and device.
The allocation will be managed by AMD GPU driver using the Linux HMM (Heterogeneous Memory Management) mechanism.
If heterogenous memory management (HMM) is not available, then `hipMallocManaged` will default back to using system memory and will act like pinned host memory.
Other managed memory API calls will have undefined behavior.
It is therefore recommended to check for managed memory capability with: `hipDeviceGetAttribute` and `hipDeviceAttributeManagedMemory`.
HIP supports additional calls that work with page migration:
* `hipMemAdvise`
* `hipMemPrefetchAsync`
:::{tip}
If the application needs to use data on both host and device regularly, does not want to deal with separate allocations, and is not worried about maxing out the VRAM on MI200 GPUs (64 GB per GCD), use managed memory.
:::
:::{tip}
If managed memory performance is poor, check to see if managed memory is supported on your system and if page migration (XNACK) is enabled.
:::
## Access behavior
Memory allocations for GPUs behave as follow:
| API | Data location | Host access | Device access |
|--------------------|---------------|--------------|----------------------|
| System allocated | Host | Local access | Unhandled page fault |
| `hipMallocManaged` | Host | Local access | Zero-copy |
| `hipHostMalloc` | Host | Local access | Zero-copy* |
| `hipMalloc` | Device | Zero-copy | Local access |
Zero-copy accesses happen over the Infinity Fabric interconnect or PCI-E lanes on discrete GPUs.
:::{note}
While `hipHostMalloc` allocated memory is accessible by a device, the host pointer must be converted to a device pointer with `hipHostGetDevicePointer`.
Memory allocated through standard system allocators such as `malloc`, can be accessed a device by registering the memory via `hipHostRegister`.
The device pointer to be used in kernels can be retrieved with `hipHostGetDevicePointer`.
Registered memory is treated like `hipHostMalloc` and will have similar performance.
On devices that support and have [](#xnack) enabled, such as the MI250X, `hipHostRegister` is not required as memory accesses are handled via automatic page migration.
:::
### XNACK
Normally, host and device memory are separate and data has to be transferred manually via `hipMemcpy`.
On a subset of GPUs, such as the MI200, there is an option to automatically migrate pages of memory between host and device.
This is important for managed memory, where the locality of the data is important for performance.
Depending on the system, page migration may be disabled by default in which case managed memory will act like pinned host memory and suffer degraded performance.
*XNACK* describes the GPUs ability to retry memory accesses that failed due a page fault (which normally would lead to a memory access error), and instead retrieve the missing page.
This also affects memory allocated by the system as indicated by the following table:
| API | Data location | Host after device access | Device after host access |
|--------------------|---------------|--------------------------|--------------------------|
| System allocated | Host | Migrate page to host | Migrate page to device |
| `hipMallocManaged` | Host | Migrate page to host | Migrate page to device |
| `hipHostMalloc` | Host | Local access | Zero-copy |
| `hipMalloc` | Device | Zero-copy | Local access |
To check if page migration is available on a platform, use `rocminfo`:
```sh
$ rocminfo | grep xnack
Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack-
```
Here, `xnack-` means that XNACK is available but is disabled by default.
Turning on XNACK by setting the environment variable `HSA_XNACK=1` and gives the expected result, `xnack+`:
```sh
$ HSA_XNACK=1 rocminfo | grep xnack
Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack+
```
`hipcc`by default will generate code that runs correctly with both XNACK enabled or disabled.
Setting the `--offload-arch=`-option with `xnack+` or `xnack-` forces code to be only run with XNACK enabled or disabled respectively.
```sh
# Compiled kernels will run regardless if XNACK is enabled or is disabled.
hipcc --offload-arch=gfx90a
# Compiled kernels will only be run if XNACK is enabled with XNACK=1.
hipcc --offload-arch=gfx90a:xnack+
# Compiled kernels will only be run if XNACK is disabled with XNACK=0.
hipcc --offload-arch=gfx90a:xnack-
```
:::{tip}
If you want to make use of page migration, use managed memory. While pageable memory will migrate correctly, it is not a portable solution and can have performance issues if the accessed data isn't page aligned.
:::
### Coherence
* *Coarse-grained coherence* means that memory is only considered up to date at kernel boundaries, which can be enforced through `hipDeviceSynchronize`, `hipStreamSynchronize`, or any blocking operation that acts on the null stream (e.g. `hipMemcpy`).
For example, cacheable memory is a type of coarse-grained memory where an up-to-date copy of the data can be stored elsewhere (e.g. in an L2 cache).
* *Fine-grained coherence* means the coherence is supported while a CPU/GPU kernel is running.
This can be useful if both host and device are operating on the same dataspace using system-scope atomic operations (e.g. updating an error code or flag to a buffer).
Fine-grained memory implies that up-to-date data may be made visible to others regardless of kernel boundaries as discussed above.
| API | Flag | Coherence |
|-------------------------|------------------------------|----------------|
| `hipHostMalloc` | `hipHostMallocDefault` | Fine-grained |
| `hipHostMalloc` | `hipHostMallocNonCoherent` | Coarse-grained |
| API | Flag | Coherence |
|-------------------------|------------------------------|----------------|
| `hipExtMallocWithFlags` | `hipDeviceMallocDefault` | Coarse-grained |
| `hipExtMallocWithFlags` | `hipDeviceMallocFinegrained` | Fine-grained |
| API | `hipMemAdvise` argument | Coherence |
|-------------------------|------------------------------|----------------|
| `hipMallocManaged` | | Fine-grained |
| `hipMallocManaged` | `hipMemAdviseSetCoarseGrain` | Coarse-grained |
| `malloc` | | Fine-grained |
| `malloc` | `hipMemAdviseSetCoarseGrain` | Coarse-grained |
:::{tip}
Try to design your algorithms to avoid host-device memory coherence (e.g. system scope atomics). While it can be a useful feature in very specific cases, it is not supported on all systems, and can negatively impact performance by introducing the host-device interconnect bottleneck.
:::
The availability of fine- and coarse-grained memory pools can be checked with `rocminfo`:
```sh
$ rocminfo
...
*******
Agent 1
*******
Name: AMD EPYC 7742 64-Core Processor
...
Pool Info:
Pool 1
Segment: GLOBAL; FLAGS: FINE GRAINED
...
Pool 3
Segment: GLOBAL; FLAGS: COARSE GRAINED
...
*******
Agent 9
*******
Name: gfx90a
...
Pool Info:
Pool 1
Segment: GLOBAL; FLAGS: COARSE GRAINED
...
```
## System direct memory access
In most cases, the default behavior for HIP in transferring data from a pinned host allocation to device will run at the limit of the interconnect.
However, there are certain cases where the interconnect is not the bottleneck.
The primary way to transfer data onto and off of a GPU, such as the MI200, is to use the onboard System Direct Memory Access engine, which is used to feed blocks of memory to the off-device interconnect (either GPU-CPU or GPU-GPU).
Each GCD has a separate SDMA engine for host-to-device and device-to-host memory transfers.
Importantly, SDMA engines are separate from the computing infrastructure, meaning that memory transfers to and from a device will not impact kernel compute performance, though they do impact memory bandwidth to a limited extent.
The SDMA engines are mainly tuned for PCIe-4.0 x16, which means they are designed to operate at bandwidths up to 32 GB/s.
:::{note}
An important feature of the MI250X platform is the Infinity Fabric™ interconnect between host and device.
The Infinity Fabric interconnect supports improved performance over standard PCIe-4.0 (usually ~50% more bandwidth); however, since the SDMA engine does not run at this speed, it will not max out the bandwidth of the faster interconnect.
:::
The bandwidth limitation can be countered by bypassing the SDMA engine and replacing it with a type of copy kernel known as a "blit" kernel.
Blit kernels will use the compute units on the GPU, thereby consuming compute resources, which may not always be beneficial.
The easiest way to enable blit kernels is to set an environment variable `HSA_ENABLE_SDMA=0`, which will disable the SDMA engine.
On systems where the GPU uses a PCIe interconnect instead of an Infinity Fabric interconnect, blit kernels will not impact bandwidth, but will still consume compute resources.
The use of SDMA vs blit kernels also applies to MPI data transfers and GPU-GPU transfers.

View File

@@ -43,6 +43,7 @@ article_pages = [
{"file": "how-to/rocm-for-ai/index", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/install", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/train-a-model", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/accelerate-training", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/deploy-your-model", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/hugging-face-models", "os": ["linux"]},
{"file": "how-to/rocm-for-hpc/index", "os": ["linux"]},

Binary file not shown.

After

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 155 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

View File

@@ -16,6 +16,8 @@ In this guide, you'll learn about:
- :doc:`Installing ROCm and machine learning frameworks <install>`
- :doc:`Scaling model training <scale-model-training>`
- :doc:`Training a model <train-a-model>`
- :doc:`Running models from Hugging Face <hugging-face-models>`

View File

@@ -0,0 +1,135 @@
.. meta::
:description: How to scale and accelerate model training
:keywords: ROCm, AI, LLM, train, fine-tune, deploy, FSDP, DeepSpeed, LLaMA, tutorial
**********************
Scaling model training
**********************
To train a large-scale model like OpenAI GPT-2 or Meta Llama 2 70B, a single accelerator or GPU cannot store all the
model parameters required for training. This immense scale presents a fundamental challenge: no single GPU or
accelerator can simultaneously store and process the entire model's parameters during training. PyTorch
provides an answer to this computational constraint through its distributed training frameworks.
.. _rocm-for-ai-pytorch-distributed:
PyTorch distributed
===================
Features in ``torch.distributed`` are categorized into three main components:
- `Distributed data-parallel training
<https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html>`_ (DDP)
- `RPC-Based distributed training <https://pytorch.org/docs/stable/rpc.html>`_ (RPC)
- `Collective communication <https://pytorch.org/docs/stable/distributed.html>`_
In this topic, the focus is on the distributed data-parallelism strategy as its the most popular. To get started with DDP,
you need to first understand how to coordinate the model and its training data across multiple accelerators or GPUs.
The DDP workflow on multiple accelerators or GPUs is as follows:
#. Split the current global training batch into small local batches on each GPU. For instance, if you have 8 GPUs and
the global batch is set at 32 samples, each of the 8 GPUs will have a local batch size of 4 samples.
#. Copy the model to every device so each can process its local batches independently.
#. Run a forward pass, then a backward pass, and output the gradient of the weights with respect to the loss of the
model for that local batch. This happens in parallel on multiple devices.
#. Synchronize the local gradients computed by each device and combine them to update the model weights. The updated
weights are then redistributed to each device.
In DDP training, each process or worker owns a replica of the model and processes a batch of data, and then the reducer uses
``allreduce`` to sum up gradients over different workers.
See the following developer blogs for more in-depth explanations and examples.
* `Multi GPU training with DDP — PyTorch Tutorials <https://pytorch.org/tutorials/beginner/ddp_series_multigpu.html>`_
* `Building a decoder transformer model on AMD GPUs — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/decoder-transformer/README.html#distributed-training-on-multiple-gpus>`_
.. _rocm-for-ai-pytorch-fsdp:
PyTorch FSDP
------------
As noted in :ref:`PyTorch distributed <rocm-for-ai-pytorch-distributed>`, DDP model weights and optimizer states
are evenly replicated across all workers. Fully Sharded Data Parallel (FSDP) is a type of data parallelism that shards
model parameters, optimizer states, and gradients across DDP ranks.
When training with FSDP, the GPU memory footprint is smaller than when training with DDP across all workers. This makes
training some very large models feasible by allowing larger models or batch sizes to fit on-device. However, this
comes with the cost of increased communication volume. The communication overhead is reduced by internal optimizations
like overlapping communication and computation.
For a high-level overview of how FSDP works, review `Getting started with Fully Sharded Data Parallel
<https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html#how-fsdp-works>`_.
For detailed training steps, see `PyTorch FSDP examples
<https://github.com/pytorch/examples/tree/main/distributed/FSDP>`_.
.. _rocm-for-ai-deepspeed:
DeepSpeed
---------
`DeepSpeed <https://deepspeed.ai>`_ offers system innovations that make large-scale deep learning training effective,
efficient, and easy to use. Innovations such as ZeRO, 3D-Parallelism, DeepSpeed-MoE, ZeRO-Infinity, and so on fall under
the training pillar.
See `Pre-training a large language model with Megatron-DeepSpeed on multiple AMD GPUs
<https://rocm.blogs.amd.com/artificial-intelligence/megatron-deepspeed-pretrain/README.html>`_ for a detailed example of
training with DeepSpeed on an AMD accelerator or GPU.
.. _rocm-for-ai-automatic-mixed-precision:
Automatic mixed precision (AMP)
-------------------------------
As models increase in size, so do the time and memory needed to train them; their cost also increases. Any measure we
can take to reduce training time and memory usage through `automatic mixed precision
<https://pytorch.org/docs/stable/amp.html>`_ (AMP) is highly beneficial for most use cases.
See `Automatic mixed precision in PyTorch using AMD GPUs — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/automatic-mixed-precision/README.html#automatic-mixed-precision-in-pytorch-using-amd-gpus>`_
for more information about running AMP on an AMD accelerator.
.. _rocm-for-ai-fine-tune:
Fine-tuning your model
======================
ROCm supports multiple techniques for :ref:`optimizing fine-tuning <fine-tuning-llms-concept-optimizations>`, for
example, LoRA, QLoRA, PEFT, and FSDP.
Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`.
The following developer blogs showcase examples of fine-tuning a model on an AMD accelerator or GPU.
* Fine-tuning Llama2 with LoRA
* `Fine-tune Llama 2 with LoRA: Customizing a large language model for question-answering
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-lora/README.html>`_
* Fine-tuning Llama2 with QLoRA
* `Enhancing LLM accessibility: A deep dive into QLoRA through fine-tuning Llama 2 on a single AMD GPU
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-Qlora/README.html>`_
* Fine-tuning a BERT-based LLM for a text classification task using JAX
* `LLM distributed supervised fine-tuning with JAX
<https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
* Fine-tuning StarCoder using PEFT
* `Instruction fine-tuning of StarCoder with PEFT on multiple AMD GPUs
<https://rocm.blogs.amd.com/artificial-intelligence/starcoder-fine-tune/README.html>`_
* Recipes for fine-tuning Llama2 and 3 with ``llama-recipes``
* `meta-llama/llama-recipes: Scripts for fine-tuning Meta Llama3 with composable FSDP & PEFT methods to cover
single/multi-node GPUs <https://github.com/meta-llama/llama-recipes/tree/main/recipes/quickstart/finetuning>`_

View File

@@ -1,140 +1,503 @@
.. meta::
:description: How to use ROCm for AI
:keywords: ROCm, AI, LLM, train, fine-tune, FSDP, DeepSpeed, LLaMA, tutorial
:description: How to train a model using ROCm Megatron-LM
:keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch
****************
Training a model
****************
**************************************
Training a model with ROCm Megatron-LM
**************************************
The following is a brief overview of popular component paths per AI development use-case, such as training, LLMs,
and inferencing.
.. _amd-megatron-lm:
Accelerating model training
===========================
The ROCm Megatron-LM framework is a specialized fork of the robust Megatron-LM, designed to
enable efficient training of large-scale language models on AMD GPUs. By leveraging AMD Instinct™ MI300X
accelerators, AMD Megatron-LM delivers enhanced scalability, performance, and resource utilization for AI
workloads. It is purpose-built to :ref:`support models <amd-megatron-lm-model-support>`
like Meta's Llama 2, Llama 3, and Llama 3.1, enabling developers to train next-generation AI models with greater
efficiency. See the GitHub repository at `<https://github.com/ROCm/Megatron-LM>`__.
To train a large model like GPT2 or Llama 2 70B, a single accelerator or GPU cannot store all the model parameters
required for training. What if you could convert the single-GPU training code to run on multiple accelerators or GPUs?
PyTorch offers distributed training solutions to facilitate this.
For ease of use, AMD provides a ready-to-use Docker image for MI300X accelerators containing essential
components, including PyTorch, PyTorch Lightning, ROCm libraries, and Megatron-LM utilities. It contains the
following software to accelerate training workloads:
.. _rocm-for-ai-pytorch-distributed:
+--------------------------+--------------------------------+
| Software component | Version |
+==========================+================================+
| ROCm | 6.1 |
+--------------------------+--------------------------------+
| PyTorch | 2.4.0 |
+--------------------------+--------------------------------+
| PyTorch Lightning | 2.4.0 |
+--------------------------+--------------------------------+
| Megatron Core | 0.9.0 |
+--------------------------+--------------------------------+
| Transformer Engine | 1.5.0 |
+--------------------------+--------------------------------+
| Flash Attention | v2.6 |
+--------------------------+--------------------------------+
| Transformers | 4.44.0 |
+--------------------------+--------------------------------+
PyTorch distributed
-------------------
Supported features and models
=============================
As of PyTorch 1.6.0, features in ``torch.distributed`` are categorized into three main components:
Megatron-LM provides the following key features to train large language models efficiently:
- `Distributed data-parallel training
<https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html>`_ (DDP)
- Transformer Engine (TE)
- `RPC-Based distributed training <https://pytorch.org/docs/stable/rpc.html>`_ (RPC)
- APEX
- `Collective communication <https://pytorch.org/docs/stable/distributed.html>`_
- GEMM tuning
In this guide, the focus is on the distributed data-parallelism strategy as its the most popular. To get started with DDP,
lets first understand how to coordinate the model and its training data across multiple accelerators or GPUs.
- Torch.compile
The DDP workflow on multiple accelerators or GPUs is as follows:
- 3D parallelism: TP + SP + CP
#. Split the current global training batch into small local batches on each GPU. For instance, if you have 8 GPUs and
the global batch is set at 32 samples, each of the 8 GPUs will have a local batch size of 4 samples.
- Distributed optimizer
#. Copy the model to every device so each device can process its local batches independently.
- Flash Attention (FA) 2
#. Run a forward pass, then a backward pass, and output the gradient of the weights with respect to the loss of the
model for that local batch. This happens in parallel on multiple devices.
- Fused kernels
#. Synchronize the local gradients computed by each device and combine them to update the model weights. The updated
weights are then redistributed to each device.
- Pre-training
In DDP training, each process or worker owns a replica of the model and processes a batch of data, then the reducer uses
``allreduce`` to sum up gradients over different workers.
.. _amd-megatron-lm-model-support:
See the following developer blogs for more in-depth explanations and examples.
The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator.
* `Multi GPU training with DDP — PyTorch Tutorials <https://pytorch.org/tutorials/beginner/ddp_series_multigpu.html>`_
* Llama 2 7B
* `Building a decoder transformer model on AMD GPUs — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/decoder-transformer/README.html#distributed-training-on-multiple-gpus>`_
* Llama 2 70B
.. _rocm-for-ai-pytorch-fsdp:
* Llama 3 8B
PyTorch FSDP
------------
* Llama 3 70B
As noted in :ref:`PyTorch distributed <rocm-for-ai-pytorch-distributed>`, in DDP model weights and optimizer states
are evenly replicated across all workers. Fully Sharded Data Parallel (FSDP) is a type of data parallelism that shards
model parameters, optimizer states, and gradients across DDP ranks.
* Llama 3.1 8B
When training with FSDP, the GPU memory footprint is smaller than when training with DDP across all workers. This makes
the training of some very large models feasible by allowing larger models or batch sizes to fit on-device. However, this
comes with the cost of increased communication volume. The communication overhead is reduced by internal optimizations
like overlapping communication and computation.
* Llama 3.1 70B
For a high-level overview of how FSDP works, review `Getting started with Fully Sharded Data Parallel
<https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html#how-fsdp-works>`_.
Prerequisite system validation steps
====================================
For detailed training steps, refer to the `PyTorch FSDP examples
<https://github.com/pytorch/examples/tree/main/distributed/FSDP>`_.
Complete the following system validation and optimization steps to set up your system before starting training.
.. _rocm-for-ai-deepspeed:
Disable NUMA auto-balancing
---------------------------
DeepSpeed
---------
Generally, application performance can benefit from disabling NUMA auto-balancing. However,
it might be detrimental to performance with certain types of workloads.
`DeepSpeed <https://deepspeed.ai>`_ offers system innovations that make large-scale deep learning training effective,
efficient, and easy to use. Innovations such as ZeRO, 3D-Parallelism, DeepSpeed-MoE, ZeRO-Infinity, and so on fall under
the training pillar.
Run the command ``cat /proc/sys/kernel/numa_balancing`` to check your current NUMA (Non-Uniform
Memory Access) settings. Output ``0`` indicates this setting is disabled. If there is no output or
the output is ``1``, run the following command to disable NUMA auto-balancing.
See `Pre-training a large language model with Megatron-DeepSpeed on multiple AMD GPUs — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/megatron-deepspeed-pretrain/README.html>`_ for a detailed example of
training with DeepSpeed on an AMD accelerator or GPU.
.. code-block:: shell
.. _rocm-for-ai-automatic-mixed-precision:
sudo sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
Automatic mixed precision (AMP)
See :ref:`mi300x-disable-numa` for more information.
Hardware verification with ROCm
-------------------------------
As models increase in size, the time and memory needed to train them; that is, their cost also increases. Any measure we
can take to reduce training time and memory usage through `automatic mixed precision
<https://pytorch.org/docs/stable/amp.html>`_ (AMP) is highly beneficial for most use cases.
Use the command ``rocm-smi --setperfdeterminism 1900`` to set the max clock speed up to 1900 MHz
instead of the default 2100 MHz. This can reduce the chance of a PCC event lowering the attainable
GPU clocks. This setting will not be required for new IFWI releases with the production PRC feature.
You can restore this setting to its default value with the ``rocm-smi -r`` command.
See `Automatic mixed precision in PyTorch using AMD GPUs — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/automatic-mixed-precision/README.html#automatic-mixed-precision-in-pytorch-using-amd-gpus>`_
for more information about running AMP on an AMD accelerator.
Run the command:
.. _rocm-for-ai-fine-tune:
.. code-block:: shell
Fine-tuning your model
======================
rocm-smi --setperfdeterminism 1900
ROCm supports multiple techniques for :ref:`optimizing fine-tuning <fine-tuning-llms-concept-optimizations>`, for
example, LoRA, QLoRA, PEFT, and FSDP.
See :ref:`mi300x-hardware-verification-with-rocm` for more information.
Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`.
RCCL Bandwidth Test
-------------------
The following developer blogs showcase examples of how to fine-tune a model on an AMD accelerator or GPU.
ROCm Collective Communications Library (RCCL) is a standalone library of standard collective communication
routines for GPUs. See the :doc:`RCCL documentation <rccl:index>` for more information. Before starting
pre-training, running a RCCL bandwidth test helps ensure that the multi-GPU or multi-node setup is optimized
for efficient distributed training.
* Fine-tuning Llama2 with LoRA
Running the RCCL bandwidth test helps verify that:
* `Fine-tune Llama 2 with LoRA: Customizing a large language model for question-answering — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-lora/README.html>`_
- The GPUs can communicate across nodes or within a single node.
* Fine-tuning Llama2 with QLoRA
- The interconnect (such as InfiniBand, Ethernet, or Infinite fabric) is functioning as expected and
provides adequate bandwidth for communication.
* `Enhancing LLM accessibility: A deep dive into QLoRA through fine-tuning Llama 2 on a single AMD GPU — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-Qlora/README.html>`_
- No hardware setup or cabling issues could affect the communication between GPUs
* Fine-tuning a BERT-based LLM for a text classification task using JAX
Tuning and optimizing hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `LLM distributed supervised fine-tuning with JAX — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
In distributed training, specific hyperparameters related to distributed communication can be tuned based on
the results of the RCCL bandwidth test. These variables are already set in the Docker image:
* Fine-tuning StarCoder using PEFT
.. code-block:: shell
* `Instruction fine-tuning of StarCoder with PEFT on multiple AMD GPUs — ROCm Blogs
<https://rocm.blogs.amd.com/artificial-intelligence/starcoder-fine-tune/README.html>`_
# force all RCCL streams to be high priority
export TORCH_NCCL_HIGH_PRIORITY=1
* Recipes for fine-tuning Llama2 and 3 with ``llama-recipes``
# specify which RDMA interfaces to use for communication
export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7
# define the Global ID index used in RoCE mode
export NCCL_IB_GID_INDEX=3
# avoid data corruption/mismatch issue that existed in past releases
export RCCL_MSCCL_ENABLE=0
Running the RCCL Bandwidth Test
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's recommended you run the RCCL bandwidth test before launching training. It ensures system
performance is sufficient to launch training. RCCL is not included in the AMD Megatron-LM Docker
image; follow the instructions in `<https://github.com/ROCm/rccl-tests>`__ to get started.
See :ref:`mi300x-rccl` for more information.
Run on 8 GPUs (``-g 8``), scanning from 8 bytes to 10 GB:
.. code-block:: shell
./build/all_reduce_perf -b 8 -e 10G -f 2 -g 8
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-8-gpu.png
:width: 800
Using one MPI process per GPU and ``-g 1`` for performance-oriented runs on both single-node and multi-node is
recommended. So, a run on 8 GPUs looks something like:
.. code-block:: shell
mpirun -np 8 --bind-to numa ./build/all_reduce_perf -b 8 -e 10G -f 2 -g 1
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png
:width: 800
Running with one MPI process per GPU ensures a one-to-one mapping for CPUs and GPUs, which can be beneficial
for smaller message sizes. This better represents the real-world use of RCCL in deep learning frameworks like
PyTorch and TensorFlow.
Use the following script to run the RCCL test for four MI300X GPU nodes. Modify paths and node addresses as needed.
.. code-block::
/home/$USER/ompi_for_gpu/ompi/bin/mpirun -np 32 -H tw022:8,tw024:8,tw010:8, tw015:8 \
--mca pml ucx \
--mca btl ^openib \
-x NCCL_SOCKET_IFNAME=ens50f0np0 \
-x NCCL_IB_HCA=rdma0:1,rdma1:1,rdma2:1,rdma3:1,rdma4:1,rdma5:1,rdma6:1,rdma7:1 \
-x NCCL_IB_GID_INDEX=3 \
-x NCCL_MIN_NCHANNELS=40 \
-x NCCL_DEBUG=version \
$HOME/rccl-tests/build/all_reduce_perf -b 8 -e 8g -f 2 -g 1
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png
:width: 800
.. _mi300x-amd-megatron-lm-training:
Start training on MI300X accelerators
=====================================
The pre-built ROCm Megatron-LM environment allows users to quickly validate system performance, conduct
training benchmarks, and achieve superior performance for models like Llama 2 and Llama 3.1.
Use the following instructions to set up the environment, configure the script to train models, and
reproduce the benchmark results on the MI300X accelerators with the AMD Megatron-LM Docker
image.
.. _amd-megatron-lm-requirements:
Download the Docker image and required packages
-----------------------------------------------
1. Use the following command to pull the Docker image from Docker Hub.
.. code-block:: shell
docker pull rocm/megatron-lm:24.12-dev
2. Launch the Docker container.
.. code-block:: shell
docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $CACHE_DIR:/root/.cache --name megatron-dev-env rocm/megatron-lm:24.12-dev /bin/bash
3. Clone the ROCm Megatron-LM repository to a local directory and install the required packages on the host machine.
.. code-block:: shell
git clone https://github.com/ROCm/Megatron-LM
cd Megatron-LM
.. note::
This release is validated with ``ROCm/Megatron-LM`` commit `bb93ccb <https://github.com/ROCm/Megatron-LM/tree/bb93ccbfeae6363c67b361a97a27c74ab86e7e92>`_.
Checking out this specific commit is recommended for a stable and reproducible environment.
.. code-block:: shell
git checkout bb93ccbfeae6363c67b361a97a27c74ab86e7e92
Prepare training datasets
-------------------------
If you already have the preprocessed data, you can skip this section.
Use the following command to process datasets. We use GPT data as an example. You may change the merge table, use an
end-of-document token, remove sentence splitting, and use the tokenizer type.
.. code-block:: shell
python tools/preprocess_data.py \
--input my-corpus.json \
--output-prefix my-gpt2 \
--vocab-file gpt2-vocab.json \
--tokenizer-type GPT2BPETokenizer \
--merge-file gpt2-merges.txt \
--append-eod
In this case, the automatically generated output files are named ``my-gpt2_text_document.bin`` and
``my-gpt2_text_document.idx``.
.. image:: ../../data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png
:width: 800
.. _amd-megatron-lm-environment-setup:
Environment setup
-----------------
In the ``examples/llama`` directory of Megatron-LM, if you're working with Llama 2 7B or Llama 2 70 B, use the
``train_llama2.sh`` configuration script. Likewise, if you're working with Llama 3 or Llama 3.1, then use
``train_llama3.sh`` and update the configuration script accordingly.
Network interface
^^^^^^^^^^^^^^^^^
To avoid connectivity issues, ensure the correct network interface is set in your training scripts.
1. Run the following command to find the active network interface on your system.
.. code-block:: shell
ip a
2. Update the ``NCCL_SOCKET_IFNAME`` and ``GLOO_SOCKET_IFNAME`` variables with your systems network interface. For
example:
.. code-block:: shell
export NCCL_SOCKET_IFNAME=ens50f0np0
export GLOO_SOCKET_IFNAME=ens50f0np0
Dataset options
^^^^^^^^^^^^^^^
You can use either mock data or real data for training.
* If you're using a real dataset, update the ``DATA_PATH`` variable to point to the location of your dataset.
.. code-block:: shell
DATA_DIR="/root/.cache/data" # Change to where your dataset is stored
DATA_PATH=${DATA_DIR}/bookcorpus_text_sentence
.. code-block:: shell
--data-path $DATA_PATH
Ensure that the files are accessible inside the Docker container.
* Mock data can be useful for testing and validation. If you're using mock data, replace ``--data-path $DATA_PATH`` with the ``--mock-data`` option.
.. code-block:: shell
--mock-data
Tokenizer
^^^^^^^^^
Tokenization is the process of converting raw text into tokens that can be processed by the model. For Llama
models, this typically involves sub-word tokenization, where words are broken down into smaller units based on
a fixed vocabulary. The tokenizer is trained along with the model on a large corpus of text, and it learns a
fixed vocabulary that can represent a wide range of text from different domains. This allows Llama models to
handle a variety of input sequences, including unseen words or domain-specific terms.
To train any of the Llama 2 models that this Docker image supports, use the ``Llama2Tokenizer``.
To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``.
Set the Hugging Face model link in the ``TOKENIZER_MODEL`` variable.
For example, if you're using the Llama 3.1 8B model:
.. code-block:: shell
TOKENIZER_MODEL=meta-llama/Llama-3.1-8B
Run benchmark tests
-------------------
.. note::
If you're running **multi node training**, update the following environment variables. They can
also be passed as command line arguments.
* Change ``localhost`` to the master node's hostname:
.. code-block:: shell
MASTER_ADDR="${MASTER_ADDR:-localhost}"
* Set the number of nodes you want to train on (for instance, ``2``, ``4``, ``8``):
.. code-block:: shell
NNODES="${NNODES:-1}"
* Set the rank of each node (0 for master, 1 for the first worker node, and so on):
.. code-block:: shell
NODE_RANK="${NODE_RANK:-0}"
* Use this command to run a performance benchmark test of any of the Llama 2 models that this Docker image supports (see :ref:`variables <amd-megatron-lm-benchmark-test-vars>`).
.. code-block:: shell
{variables} bash examples/llama/train_llama2.sh
* Use this command to run a performance benchmark test of any of the Llama 3 and Llama 3.1 models that this Docker image supports (see :ref:`variables <amd-megatron-lm-benchmark-test-vars>`).
.. code-block:: shell
{variables} bash examples/llama/train_llama3.sh
.. _amd-megatron-lm-benchmark-test-vars:
The benchmark tests support the same set of variables:
+--------------------------+-----------------------+-----------------------+
| Name | Options | Description |
+==========================+=======================+=======================+
| ``TEE_OUTPUT`` | 0 or 1 | 0: disable training |
| | | log |
| | | |
| | | 1: enable training |
| | | log |
+--------------------------+-----------------------+-----------------------+
| ``MBS`` | | Micro batch size |
+--------------------------+-----------------------+-----------------------+
| ``BS`` | | Batch size |
+--------------------------+-----------------------+-----------------------+
| ``TP`` | 1, 2, 4, 8 | Tensor parallel |
+--------------------------+-----------------------+-----------------------+
| ``TE_FP8`` | 0 or 1 | Datatype. |
| | | If it is set to 1, |
| | | FP8. |
| | | |
| | | If it is set to 0. |
| | | BP16 |
+--------------------------+-----------------------+-----------------------+
| ``NO_TORCH_COMPILE`` | 0 or 1 | If it is set to 1, |
| | | enable torch.compile. |
| | | |
| | | If it is set to 0. |
| | | Disable torch.compile |
| | | (default) |
+--------------------------+-----------------------+-----------------------+
| ``SEQ_LENGTH`` | | Input sequence length |
+--------------------------+-----------------------+-----------------------+
| ``GEMM_TUNING`` | 0 or 1 | If it is set to 1, |
| | | enable gemm tuning. |
| | | |
| | | If it is set to 0, |
| | | disable gemm tuning |
+--------------------------+-----------------------+-----------------------+
| ``USE_FLASH_ATTN`` | 0 or 1 | 0: disable flash |
| | | attention |
| | | |
| | | 1: enable flash |
| | | attention |
+--------------------------+-----------------------+-----------------------+
| ``ENABLE_PROFILING`` | 0 or 1 | 0: disable torch |
| | | profiling |
| | | |
| | | 1: enable torch |
| | | profiling |
+--------------------------+-----------------------+-----------------------+
| ``MODEL_SIZE`` | | The size of the mode: |
| | | 7B/70B, etc. |
+--------------------------+-----------------------+-----------------------+
| ``TOTAL_ITERS`` | | Total number of |
| | | iterations |
+--------------------------+-----------------------+-----------------------+
| ``transformer-impl`` | transformer_engine or | Enable transformer |
| | local | engine by default |
+--------------------------+-----------------------+-----------------------+
Benchmarking examples
^^^^^^^^^^^^^^^^^^^^^
.. tab-set::
.. tab-item:: Single node training
:sync: single
Use this command to run training with Llama 2 7B model on a single node. You can specify MBS, BS, FP,
datatype, and so on.
.. code-block:: bash
TEE_OUTPUT=1 MBS=5 BS=120 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
See the sample output:
.. image:: ../../data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png
:width: 800
.. tab-item:: Multi node training
:sync: multi
Launch the Docker container on each node.
In this example, run training with Llama 2 7B model on 2 nodes with specific MBS, BS, FP, datatype, and
so on.
On the master node:
.. code-block:: bash
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
On the worker node:
.. code-block:: bash
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
Sample output for 2-node training:
Master node:
.. image:: ../../data/how-to/rocm-for-ai/2-node-training-master.png
:width: 800
Worker node:
.. image:: ../../data/how-to/rocm-for-ai/2-node-training-worker.png
:width: 800
* `meta-llama/llama-recipes: Scripts for fine-tuning Meta Llama3 with composable FSDP & PEFT methods to cover
single/multi-node GPUs <https://github.com/meta-llama/llama-recipes/tree/main/recipes/quickstart/finetuning>`_

View File

@@ -537,6 +537,8 @@ installation was successful, refer to the
:doc:`rocm-install-on-linux:install/post-install`.
Should verification fail, consult :doc:`/how-to/system-debugging`.
.. _mi300x-hardware-verification-with-rocm:
Hardware verification with ROCm
-------------------------------

View File

@@ -2062,11 +2062,10 @@ collectives.
Multi-node FSDP and RCCL settings
---------------------------------
It's recommended to use high-priority HIP streams with RCCL.
The simplest way to enable this is by using the nightly PyTorch wheels, as the required changes from
`PR #122830 <https://github.com/pytorch/pytorch/pull/122830>`_ were not included in the PyTorch 2.3
release but are available in the nightly builds.
When using PyTorch's FSDP (Full Sharded Data Parallel) feature, the HIP
streams used by RCCL and HIP streams used for compute kernels do not
always overlap well. As a workaround, it's recommended to use
high-priority HIP streams with RCCL.
To configure high-priority streams:

View File

@@ -37,7 +37,6 @@ ROCm documentation is organized into the following categories:
:::{grid-item-card} How to
:class-body: rocm-card-banner rocm-hue-12
* [Programming guide](./how-to/hip_programming_guide.rst)
* [Use ROCm for AI](./how-to/rocm-for-ai/index.rst)
* [Use ROCm for HPC](./how-to/rocm-for-hpc/index.rst)
* [Fine-tune LLMs and inference optimization](./how-to/llm-fine-tuning-optimization/index.rst)
@@ -55,7 +54,6 @@ ROCm documentation is organized into the following categories:
:class-body: rocm-card-banner rocm-hue-8
* [GPU architecture overview](./conceptual/gpu-arch.md)
* [GPU memory](./conceptual/gpu-memory.md)
* [Input-Output Memory Management Unit (IOMMU)](./conceptual/iommu.rst)
* [File structure (Linux FHS)](./conceptual/file-reorg.md)
* [GPU isolation techniques](./conceptual/gpu-isolation.md)

View File

@@ -40,6 +40,8 @@ subtrees:
title: Installation
- file: how-to/rocm-for-ai/train-a-model.rst
title: Train a model
- file: how-to/rocm-for-ai/scale-model-training.rst
title: Scale model training
- file: how-to/rocm-for-ai/hugging-face-models.rst
title: Run models from Hugging Face
- file: how-to/rocm-for-ai/deploy-your-model.rst
@@ -144,8 +146,6 @@ subtrees:
title: AMD Instinct MI100/CDNA1 ISA
- url: https://www.amd.com/system/files/documents/amd-cdna-whitepaper.pdf
title: White paper
- file: conceptual/gpu-memory.md
title: GPU memory
- file: conceptual/iommu.rst
title: Input-Output Memory Management Unit (IOMMU)
- file: conceptual/file-reorg.md

Submodule libs/AMDMIGraphX deleted from 4b20cbc960

Submodule libs/HIP deleted from 2451439986

Submodule libs/HIPIFY deleted from f156b776ec

Submodule libs/MIOpen deleted from a85ca8a54f

Submodule libs/MIVisionX deleted from f0e86f9638

Submodule libs/ROCdbgapi deleted from 61dfa5620c

Submodule libs/ROCgdb deleted from 16782e8d04

Submodule libs/Tensile deleted from df4be50f2c

Submodule libs/amdsmi deleted from 2858e51b4e

Submodule libs/clr deleted from fa1d09cbda

Submodule libs/half deleted from 1ddada2251

Submodule libs/hip-tests deleted from 61f4742c4d

Submodule libs/hipBLAS deleted from e75831fa5e

Submodule libs/hipBLASLt deleted from 9f30df5504

Submodule libs/hipCUB deleted from a4b3b193ee

Submodule libs/hipFFT deleted from 85a2d27308

Submodule libs/hipRAND deleted from 08810454ad

Submodule libs/hipSOLVER deleted from be093932fd

Submodule libs/hipSPARSE deleted from 54071d80f5

Submodule libs/hipSPARSELt deleted from ac2f73ab21

Submodule libs/hipTensor deleted from 68014b5c76

Submodule libs/hipfort deleted from e068c4372e

Submodule libs/hipother deleted from ed5b3fbed8

Submodule libs/rccl deleted from eef7b2918c

Submodule libs/rdc deleted from 3bb5c700a1

Submodule libs/rocAL deleted from 3947ad031c

Submodule libs/rocALUTION deleted from 9ed79898ae

Submodule libs/rocBLAS deleted from 8ebd6c11c4

Submodule libs/rocDecode deleted from a99eb1d3d7

Submodule libs/rocFFT deleted from 3806d68ba7

Submodule libs/rocJPEG deleted from d2e513133c

Submodule libs/rocPRIM deleted from 1eefdb730b

Submodule libs/rocPyDecode deleted from 2025c51b0d

Submodule libs/rocRAND deleted from 07b4fba423

Submodule libs/rocSOLVER deleted from ee3ec9d2c2

Submodule libs/rocSPARSE deleted from ed5da19958

Submodule libs/rocThrust deleted from c33822fe39

Submodule libs/rocWMMA deleted from ba38cf3015

Submodule libs/rocm-cmake deleted from 489ef56a39

Submodule libs/rocm-core deleted from ce2be3b65a

Submodule libs/rocminfo deleted from 9f6d7cdf6b

Submodule libs/rocprofiler deleted from ec0e166dfe

Submodule libs/roctracer deleted from d5bbc8aa77

Submodule libs/rpp deleted from fe68edd53a

View File

@@ -1,331 +0,0 @@
'''
This script sets up git submodules based on a manifest file. It performs the following tasks:
1. Finds the root directory of the project.
2. Parses command line arguments to get the manifest file and output folder paths.
3. Parses the manifest file to get a list of projects.
4. Gets a list of existing submodules.
5. Removes submodules that are not in the manifest file.
6. Adds new submodules from the manifest file.
7. Updates existing submodules to the specified revision.
'''
import os
import subprocess
import sys
import argparse
import pathlib
import logging
import xml.etree.ElementTree as ET
class Project:
"""
Project class to store project information.
Attributes:
name (str): Name of the project.
revision (str): Revision of the project.
path (str): Path to the project.
Methods:
__str__: Returns a string representation of the project.
__repr__: Returns a string representation of the project.
"""
def __init__(self, **kwargs):
'''
Project class to store project information
:param name: Name of the project
:param revision: Revision of the project
:param path: Path to the project
'''
self.name = kwargs.get('name')
self.revision = kwargs.get('revision')
self.path = kwargs.get('path')
def __str__(self):
return f"Project {self.name} at {self.path} with revision {self.revision}"
def __repr__(self):
return self.__str__()
# Color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
DARK_RED='\033[0;31m'
NC='\033[0m' # No Color
def find_root() -> str:
'''
Find the root directory of the project
:return: Root directory of the project
'''
current = os.getcwd()
while current != "/":
if os.path.exists(os.path.join(current, ".git")):
return current
current = os.path.dirname(current)
print("Could not find root directory", file=sys.stderr)
sys.exit(1)
def run_command(command) -> list:
'''
Run a command in the shell
:param command: Command to run
:return: List of errors
'''
error_log = []
print(f"Running command: {command}")
# subprocess.run(command, shell=True, check=True)
result = subprocess.run(command, shell=True, check=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
print(f"{RED}Error running command: {result.stderr.decode('utf-8')}{NC}",
file=sys.stderr)
logging.error("Error running command: %s", command)
logging.error("\t%s", result.stderr.decode('utf-8'))
error_log.append(f"Error running command: {command}")
error_log.append(f"\t{result.stderr.decode('utf-8')}\n")
else:
if result.stdout:
print(result.stdout.decode('utf-8'))
return error_log
def parse_arguments(project_root: str) -> argparse.Namespace:
'''
Parse command line arguments
:param project_root: Root directory of the project
:return: Parsed arguments
'''
parser = argparse.ArgumentParser(description="Setup submodules based on a manifest file.")
parser.add_argument("-m", "--manifest", required=False,
default=f'{project_root}/default.xml', help="Path to the manifest file")
parser.add_argument("-o", "--output", required=False,
default='libs', help="Path to the submodule folder")
return parser.parse_args()
def parse_manifest(manifest_path: str, output_path: str) -> list:
'''
Parse the manifest file and return a list of projects
:param manifest_path: Path to the manifest file
:param output_path: Path to the output folder
:return: List of projects
'''
tree = ET.parse(manifest_path)
root = tree.getroot()
defaults = root.find('default')
default_revision = None
if defaults is not None:
default_revision = defaults.get('revision')
# print(f"Default revision: {default_revision}")
projects = []
for project in root.findall('project'):
name = project.get('name')
revision = project.get('revision', default_revision)
path = os.path.join(output_path, project.get('path', name))
projects.append(Project(name = name,
revision = revision,
path = path))
return projects
def get_existing_submodules(project_root: str) -> list:
'''
Get a list of existing submodules
:param project_root: Root directory of the project
:return: List of existing submodules
'''
gitmodules_path = os.path.join(project_root, ".gitmodules")
existing_submodules = []
if os.path.exists(gitmodules_path):
with open(gitmodules_path, "r", encoding="utf-8") as gitmodules_file:
for line in gitmodules_file:
line = line.strip()
if line.startswith("[submodule"):
submodule_name = pathlib.Path(line.split('"')[1]).as_posix()
existing_submodules.append(submodule_name)
return existing_submodules
def remove_submodule(project_root: str, module_path: str) -> list:
'''
Remove a submodule
:param project_root: Root directory of the project
:param module_path: Path to the submodule
:return: List of errors
'''
error_log = []
gitmodules_path = os.path.join(project_root, ".gitmodules")
error_log.extend(run_command(f"git submodule deinit -f {module_path}"))
error_log.extend(run_command(f"rm -rf {module_path}"))
error_log.extend(run_command(f"rm -rf .git/modules/{module_path}"))
# error_log.extend(run_command(f"git rm -r --cached {module_path}"))
# # Remove the submodule from the .gitmodules file
with open(gitmodules_path, "r", encoding="utf-8") as gitmodules_file:
lines = gitmodules_file.readlines()
with open(gitmodules_path, "w", encoding="utf-8") as gitmodules_file:
skip = False
for line in lines:
if line.strip().startswith(f"[submodule \"{module_path}\"]"):
skip = True
elif skip and line.strip().startswith("[submodule"):
skip = False
if not skip:
gitmodules_file.write(line)
return error_log
def main():
'''
Main function
'''
# Set up logging
log_file = os.path.join(os.path.expanduser("~"), "submodule_setup.log")
logging.basicConfig(level=logging.DEBUG,
filename=log_file,
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s')
# Save errors to a list to output at the end
error_log = []
project_root = find_root()
os.chdir(project_root)
print(f"Found project root at {project_root}")
args = parse_arguments(project_root)
print(f"Using manifest file at {args.manifest}")
# Check if manifest file exists
if not os.path.exists(args.manifest):
print(f"Manifest file {args.manifest} does not exist", file=sys.stderr)
sys.exit(1)
# Check if output folder exists
print(f"Using output folder at {args.output}\n")
if not os.path.exists(args.output):
print(f"Output folder {args.output} does not exist. Creating it.")
os.makedirs(args.output)
# Create a list of existing submodules
existing_submodules = get_existing_submodules(project_root)
for module in existing_submodules:
logging.debug("Existing submodule %s", module)
logging.debug("=====\n")
# Create a list of projects from the manifest file
projects = parse_manifest(args.manifest, args.output)
for project in projects:
logging.debug("Manifest Project %s to %s at %s",
project.name,
project.path,
project.revision)
logging.debug("=====\n")
print('\n*** Initializing submodules ***\n')
run_command("git submodule update --init")
print(f'\n{DARK_RED}*** Removing old projects ***{NC}\n')
# If project is in existing submodules and not in manifest, remove it
removed_modules = []
project_paths = {project.path for project in projects}
for module in existing_submodules:
if module not in project_paths:
print(f"{DARK_RED}Removing submodule {module}{NC}")
logging.info("Removing submodule %s", module)
removed_modules.append(module)
error_log.extend(remove_submodule(project_root, module))
print()
# Remove the submodules from the list of existing submodules
existing_submodules = [module for module in existing_submodules
if module not in removed_modules]
print(f'\n{GREEN}*** Adding new projects ***{NC}\n')
# Add new submodules
for project in projects:
if project.path not in existing_submodules:
print(f"{GREEN}Adding submodule ../{project.name}{NC}")
logging.info("Adding submodule ../%s to %s", project.name, project.path)
error_log.extend(
run_command(f"git submodule add ../{project.name} {project.path}"))
error_log.extend(
run_command(f"cd {project.path} "
f"&& git checkout {project.revision} "
f"&& cd {project_root}"))
print()
print(f'\n{BLUE}*** Updating existing projects ***{NC}\n')
# Update existing submodules
for project in projects:
if project.path in existing_submodules and project.path not in removed_modules:
print(f"{BLUE}Updating submodule {project.path}{NC}")
logging.info("Updating submodule %s", project.path)
error_log.extend(
run_command(f"cd {project.path} && "
"git fetch --tag && "
f"git checkout {project.revision} "
f"&& cd {project_root}"))
print()
# Print errors at the end
if error_log:
print(f"\n{RED}Errors occurred. Please check the logs: {log_file}{NC}\n"
"Error Summary:\n",
file=sys.stderr)
logging.error("Errors occurred. Please check the logs: %s", log_file)
logging.error("Error Summary:")
for error in error_log:
print(error, file=sys.stderr)
logging.error(error)
else:
print(f"{GREEN}All submodules updated successfully{NC}")
if __name__ == "__main__":
main()

View File

@@ -1,150 +0,0 @@
#!/usr/bin/bash
# Color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
printHelp() {
# Print the help message
printf -- \
"
Usage: update-submodules.sh [-t <tag>] [-p <path>]
Description: Update submodules to the ROCm release tag
Options:
-h, --help: Print the help message
-t, --tag: The ROCm release tag to update the submodules to
-p, --path: The path to the submodules directory
Example:
./update-submodules.sh -t rocm-6.1.0 -p ../../libs"
}
updateSubmodules() {
# Update the submodules in the given directory to the desired tag
# $1: The directory to update the submodules in
local directory=$1
pushd "$directory" >> /dev/null || exit
for d in */
do
pushd "$d" >> /dev/null || exit
if [[ -e .git ]]; then
echo -e "${GREEN}${d} is a git folder${NC}"
if ! git fetch --all; then
echo -e "${RED}Failed to fetch for ${d}${NC}"
# Save the current directory to an array to output the list of
# failed fetches at the end.
gitFetchErrors+=("$d")
fi
# Checkout the desired tag
if ! git checkout tags/"${RocmRelease}"; then
echo -e "${RED}Failed to checkout tag ${RocmRelease} for ${d}${NC}"
# Save the current directory to an array to output the list of
# failed checkouts at the end.
gitCheckoutErrors+=("$d")
fi
else
echo -e "${RED}${d} is NOT a git folder${NC}"
# Save the current directory to an array to output the list of
# non-git folders at the end.
nonGitFolders+=("$d")
fi
echo -e "${NC}"
popd >> /dev/null || exit
done
popd >> /dev/null || exit
}
#######
# MAIN PROGRAM
#######
# Default values
RocmRelease=rocm-6.1.0
PathToSubmodules=../../libs
# Parse command line parameters.
while [ $# -gt 0 ]; do
case $1 in
-h | --help )
printHelp
exit 0
;;
-t | --tag )
shift
RocmRelease=$1
shift
;;
-p | --path )
shift
PathToSubmodules=$1
shift
;;
* )
shift
;;
esac
done
echo "********************************"
echo -e "${BLUE}Path to Submodules: ${PathToSubmodules}${NC}"
pushd "$PathToSubmodules" >> /dev/null || exit
echo -e "${BLUE}Syncing to Tag: ${RocmRelease}.${NC}"
echo "********************************"
echo
# Update submodules in the current directory
updateSubmodules "."
# Update the `openmp-extras` modules
updateSubmodules "openmp-extras"
popd >> /dev/null || exit
# Output summary of errors
echo "********************************"
echo -e "${BLUE}*** Error Report ***${NC}"
echo "********************************"
echo
if [ ${#nonGitFolders[@]} -gt 0 ]; then
echo -e "${RED}The following folders are not git folders:${NC}"
for d in "${nonGitFolders[@]}"
do
echo "${d}"
done
echo
fi
if [ ${#gitFetchErrors[@]} -gt 0 ]; then
echo -e "${RED}The following folders failed to fetch:${NC}"
for d in "${gitFetchErrors[@]}"
do
echo "${d}"
done
echo
fi
if [ ${#gitCheckoutErrors[@]} -gt 0 ]; then
echo -e "${RED}The following folders failed to checkout tag ${RocmRelease}:${NC}"
for d in "${gitCheckoutErrors[@]}"
do
echo "${d}"
done
echo
fi