mirror of
https://github.com/ROCm/ROCm.git
synced 2026-01-13 16:48:03 -05:00
Compare commits
3 Commits
jax_fix
...
rocm-submo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
602cc00fd6 | ||
|
|
222963b85c | ||
|
|
96ccfcb026 |
@@ -214,7 +214,7 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
|
||||
parameters:
|
||||
componentName: MIOpen
|
||||
testParameters: '--output-on-failure --force-new-ctest-process --output-junit test_output.xml --exclude-regex "test_rnn_seq_api|GPU_Conv2dTuningAsm_FP32"'
|
||||
testParameters: '--output-on-failure --force-new-ctest-process --output-junit test_output.xml --exclude-regex test_rnn_seq_api'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
|
||||
@@ -15,7 +15,6 @@ parameters:
|
||||
type: object
|
||||
default:
|
||||
- bison
|
||||
- cmake
|
||||
- dejagnu
|
||||
- flex
|
||||
- libbabeltrace-dev
|
||||
@@ -40,69 +39,17 @@ parameters:
|
||||
- name: jobMatrix
|
||||
type: object
|
||||
default:
|
||||
testJobs:
|
||||
buildTestJobs:
|
||||
- gfx942:
|
||||
target: gfx942
|
||||
- gfx90a:
|
||||
target: gfx90a
|
||||
|
||||
jobs:
|
||||
- job: ROCgdb
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: PKG_CONFIG_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/share/pkgconfig
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
checkoutRef: ${{ parameters.checkoutRef }}
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
aggregatePipeline: ${{ parameters.aggregatePipeline }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-autotools.yml
|
||||
parameters:
|
||||
configureFlags: >-
|
||||
--program-prefix=roc
|
||||
--enable-64-bit-bfd
|
||||
--enable-targets="x86_64-linux-gnu,amdgcn-amd-amdhsa"
|
||||
--disable-ld
|
||||
--disable-gas
|
||||
--disable-gdbserver
|
||||
--disable-sim
|
||||
--enable-tui
|
||||
--disable-gdbtk
|
||||
--disable-shared
|
||||
--disable-gprofng
|
||||
--with-expat
|
||||
--with-system-zlib
|
||||
--without-guile
|
||||
--with-babeltrace
|
||||
--with-lzma
|
||||
--with-python=python3
|
||||
--with-rocm-dbgapi=$(Agent.BuildDirectory)/rocm
|
||||
LDFLAGS="-Wl,--enable-new-dtags,-rpath=$(Agent.BuildDirectory)/rocm/lib"
|
||||
makeCallPrefix: LD_RUN_PATH='${ORIGIN}/../lib'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml
|
||||
|
||||
- ${{ each job in parameters.jobMatrix.testJobs }}:
|
||||
- job: ROCgdb_test_${{ job.target }}
|
||||
dependsOn: ROCgdb
|
||||
- ${{ each job in parameters.jobMatrix.buildTestJobs }}:
|
||||
- job: ROCgdb_build_test_${{ job.target }}
|
||||
condition:
|
||||
and(succeeded(),
|
||||
and(
|
||||
eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'),
|
||||
not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])),
|
||||
eq(${{ parameters.aggregatePipeline }}, False)
|
||||
@@ -152,6 +99,8 @@ jobs:
|
||||
--with-rocm-dbgapi=$(Agent.BuildDirectory)/rocm
|
||||
LDFLAGS="-Wl,--enable-new-dtags,-rpath=$(Agent.BuildDirectory)/rocm/lib"
|
||||
makeCallPrefix: LD_RUN_PATH='${ORIGIN}/../lib'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
- task: Bash@3
|
||||
displayName: Setup test environment
|
||||
inputs:
|
||||
@@ -160,6 +109,7 @@ jobs:
|
||||
# Assuming that /opt is no longer persistent across runs, test environments are fully ephemeral
|
||||
sudo ln -s $(Agent.BuildDirectory)/rocm /opt/rocm
|
||||
echo "##vso[task.prependpath]/opt/rocm/bin"
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml
|
||||
- task: Bash@3
|
||||
displayName: check-gdb
|
||||
|
||||
@@ -27,7 +27,6 @@ parameters:
|
||||
type: object
|
||||
default:
|
||||
- amdsmi
|
||||
- aomp
|
||||
- clr
|
||||
- hipBLAS-common
|
||||
- hipBLASLt
|
||||
@@ -44,7 +43,6 @@ parameters:
|
||||
type: object
|
||||
default:
|
||||
- amdsmi
|
||||
- aomp
|
||||
- clr
|
||||
- hipBLAS-common
|
||||
- hipBLASLt
|
||||
@@ -110,7 +108,6 @@ jobs:
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang++
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_CXX_FLAGS=-I$(Agent.BuildDirectory)/rocm/llvm/include
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX=$(Build.BinariesDirectory)
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
|
||||
@@ -26,11 +26,9 @@ jobs:
|
||||
parameters:
|
||||
componentName: HIP
|
||||
pipelineId: $(HIP_PIPELINE_ID)
|
||||
- task: Bash@3
|
||||
displayName: Copy HIP artifacts
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: cp -a $(Agent.BuildDirectory)/rocm/* $(Build.BinariesDirectory)/
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml
|
||||
parameters:
|
||||
sourceDir: $(Agent.BuildDirectory)/rocm
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml
|
||||
|
||||
@@ -89,8 +89,6 @@ jobs:
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
parameters:
|
||||
gpuTarget: ${{ job.target }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml
|
||||
# - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
|
||||
# parameters:
|
||||
@@ -124,8 +122,6 @@ jobs:
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml
|
||||
parameters:
|
||||
gpuTarget: ${{ job.target }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
@@ -151,3 +147,4 @@ jobs:
|
||||
environment: test
|
||||
gpuTarget: ${{ job.target }}
|
||||
registerROCmPackages: true
|
||||
optSymLink: true
|
||||
|
||||
@@ -183,7 +183,6 @@ jobs:
|
||||
parameters:
|
||||
componentName: rocm-examples
|
||||
testDir: $(Build.SourcesDirectory)/build
|
||||
testParameters: '--output-on-failure --force-new-ctest-process --output-junit test_output.xml --exclude-regex "rocfft_callback"'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
|
||||
@@ -67,7 +67,6 @@ parameters:
|
||||
- rocprofiler-register
|
||||
- rocprofiler-sdk
|
||||
- ROCR-Runtime
|
||||
- roctracer
|
||||
|
||||
- name: jobMatrix
|
||||
type: object
|
||||
|
||||
@@ -463,7 +463,7 @@ steps:
|
||||
displayName: 'List downloaded ROCm files'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: ls -la1R $(Agent.BuildDirectory)/rocm
|
||||
script: ls -1R $(Agent.BuildDirectory)/rocm
|
||||
- ${{ if eq(parameters.skipLibraryLinking, false) }}:
|
||||
- task: Bash@3
|
||||
displayName: 'Link ROCm shared libraries'
|
||||
|
||||
251
.gitmodules
vendored
Normal file
251
.gitmodules
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
[submodule "submodule-srcs/ROCR-Runtime"]
|
||||
path = submodule-srcs/ROCR-Runtime
|
||||
url = https://github.com/rocm/ROCR-Runtime
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/amdsmi"]
|
||||
path = submodule-srcs/amdsmi
|
||||
url = https://github.com/rocm/amdsmi
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rdc"]
|
||||
path = submodule-srcs/rdc
|
||||
url = https://github.com/rocm/rdc
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm_bandwidth_test"]
|
||||
path = submodule-srcs/rocm_bandwidth_test
|
||||
url = https://github.com/rocm/rocm_bandwidth_test
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm_smi_lib"]
|
||||
path = submodule-srcs/rocm_smi_lib
|
||||
url = https://github.com/rocm/rocm_smi_lib
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm-core"]
|
||||
path = submodule-srcs/rocm-core
|
||||
url = https://github.com/rocm/rocm-core
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm-examples"]
|
||||
path = submodule-srcs/rocm-examples
|
||||
url = https://github.com/rocm/rocm-examples
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocminfo"]
|
||||
path = submodule-srcs/rocminfo
|
||||
url = https://github.com/rocm/rocminfo
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler"]
|
||||
path = submodule-srcs/rocprofiler
|
||||
url = https://github.com/rocm/rocprofiler
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-register"]
|
||||
path = submodule-srcs/rocprofiler-register
|
||||
url = https://github.com/rocm/rocprofiler-register
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-sdk"]
|
||||
path = submodule-srcs/rocprofiler-sdk
|
||||
url = https://github.com/rocm/rocprofiler-sdk
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-compute"]
|
||||
path = submodule-srcs/rocprofiler-compute
|
||||
url = https://github.com/rocm/rocprofiler-compute
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocprofiler-systems"]
|
||||
path = submodule-srcs/rocprofiler-systems
|
||||
url = https://github.com/rocm/rocprofiler-systems
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/roctracer"]
|
||||
path = submodule-srcs/roctracer
|
||||
url = https://github.com/rocm/roctracer
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/HIP"]
|
||||
path = submodule-srcs/HIP
|
||||
url = https://github.com/rocm/HIP
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hip-tests"]
|
||||
path = submodule-srcs/hip-tests
|
||||
url = https://github.com/rocm/hip-tests
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/HIPIFY"]
|
||||
path = submodule-srcs/HIPIFY
|
||||
url = https://github.com/rocm/HIPIFY
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/clr"]
|
||||
path = submodule-srcs/clr
|
||||
url = https://github.com/rocm/clr
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipother"]
|
||||
path = submodule-srcs/hipother
|
||||
url = https://github.com/rocm/hipother
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/half"]
|
||||
path = submodule-srcs/half
|
||||
url = https://github.com/rocm/half
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/llvm-project"]
|
||||
path = submodule-srcs/llvm-project
|
||||
url = https://github.com/rocm/llvm-project
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/spirv-llvm-translator"]
|
||||
path = submodule-srcs/spirv-llvm-translator
|
||||
url = https://github.com/rocm/spirv-llvm-translator
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCdbgapi"]
|
||||
path = submodule-srcs/ROCdbgapi
|
||||
url = https://github.com/rocm/ROCdbgapi
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCgdb"]
|
||||
path = submodule-srcs/ROCgdb
|
||||
url = https://github.com/rocm/ROCgdb
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocr_debug_agent"]
|
||||
path = submodule-srcs/rocr_debug_agent
|
||||
url = https://github.com/rocm/rocr_debug_agent
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/AMDMIGraphX"]
|
||||
path = submodule-srcs/AMDMIGraphX
|
||||
url = https://github.com/rocm/AMDMIGraphX
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/MIOpen"]
|
||||
path = submodule-srcs/MIOpen
|
||||
url = https://github.com/rocm/MIOpen
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/MIVisionX"]
|
||||
path = submodule-srcs/MIVisionX
|
||||
url = https://github.com/rocm/MIVisionX
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCmValidationSuite"]
|
||||
path = submodule-srcs/ROCmValidationSuite
|
||||
url = https://github.com/rocm/ROCmValidationSuite
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/Tensile"]
|
||||
path = submodule-srcs/Tensile
|
||||
url = https://github.com/rocm/Tensile
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/composable_kernel"]
|
||||
path = submodule-srcs/composable_kernel
|
||||
url = https://github.com/rocm/composable_kernel
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipBLAS-common"]
|
||||
path = submodule-srcs/hipBLAS-common
|
||||
url = https://github.com/rocm/hipBLAS-common
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipBLAS"]
|
||||
path = submodule-srcs/hipBLAS
|
||||
url = https://github.com/rocm/hipBLAS
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipBLASLt"]
|
||||
path = submodule-srcs/hipBLASLt
|
||||
url = https://github.com/rocm/hipBLASLt
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipCUB"]
|
||||
path = submodule-srcs/hipCUB
|
||||
url = https://github.com/rocm/hipCUB
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipFFT"]
|
||||
path = submodule-srcs/hipFFT
|
||||
url = https://github.com/rocm/hipFFT
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipRAND"]
|
||||
path = submodule-srcs/hipRAND
|
||||
url = https://github.com/rocm/hipRAND
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipSOLVER"]
|
||||
path = submodule-srcs/hipSOLVER
|
||||
url = https://github.com/rocm/hipSOLVER
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipSPARSE"]
|
||||
path = submodule-srcs/hipSPARSE
|
||||
url = https://github.com/rocm/hipSPARSE
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipSPARSELt"]
|
||||
path = submodule-srcs/hipSPARSELt
|
||||
url = https://github.com/rocm/hipSPARSELt
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipTensor"]
|
||||
path = submodule-srcs/hipTensor
|
||||
url = https://github.com/rocm/hipTensor
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/hipfort"]
|
||||
path = submodule-srcs/hipfort
|
||||
url = https://github.com/rocm/hipfort
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rccl"]
|
||||
path = submodule-srcs/rccl
|
||||
url = https://github.com/rocm/rccl
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocAL"]
|
||||
path = submodule-srcs/rocAL
|
||||
url = https://github.com/rocm/rocAL
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocALUTION"]
|
||||
path = submodule-srcs/rocALUTION
|
||||
url = https://github.com/rocm/rocALUTION
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocBLAS"]
|
||||
path = submodule-srcs/rocBLAS
|
||||
url = https://github.com/rocm/rocBLAS
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocDecode"]
|
||||
path = submodule-srcs/rocDecode
|
||||
url = https://github.com/rocm/rocDecode
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocJPEG"]
|
||||
path = submodule-srcs/rocJPEG
|
||||
url = https://github.com/rocm/rocJPEG
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocPyDecode"]
|
||||
path = submodule-srcs/rocPyDecode
|
||||
url = https://github.com/rocm/rocPyDecode
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocFFT"]
|
||||
path = submodule-srcs/rocFFT
|
||||
url = https://github.com/rocm/rocFFT
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocPRIM"]
|
||||
path = submodule-srcs/rocPRIM
|
||||
url = https://github.com/rocm/rocPRIM
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocRAND"]
|
||||
path = submodule-srcs/rocRAND
|
||||
url = https://github.com/rocm/rocRAND
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocSHMEM"]
|
||||
path = submodule-srcs/rocSHMEM
|
||||
url = https://github.com/rocm/rocSHMEM
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocSOLVER"]
|
||||
path = submodule-srcs/rocSOLVER
|
||||
url = https://github.com/rocm/rocSOLVER
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocSPARSE"]
|
||||
path = submodule-srcs/rocSPARSE
|
||||
url = https://github.com/rocm/rocSPARSE
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocThrust"]
|
||||
path = submodule-srcs/rocThrust
|
||||
url = https://github.com/rocm/rocThrust
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocWMMA"]
|
||||
path = submodule-srcs/rocWMMA
|
||||
url = https://github.com/rocm/rocWMMA
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rocm-cmake"]
|
||||
path = submodule-srcs/rocm-cmake
|
||||
url = https://github.com/rocm/rocm-cmake
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/rpp"]
|
||||
path = submodule-srcs/rpp
|
||||
url = https://github.com/rocm/rpp
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/TransferBench"]
|
||||
path = submodule-srcs/TransferBench
|
||||
url = https://github.com/rocm/TransferBench
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/openmp-extras/aomp"]
|
||||
path = submodule-srcs/openmp-extras/aomp
|
||||
url = https://github.com/rocm/aomp
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/openmp-extras/aomp-extras"]
|
||||
path = submodule-srcs/openmp-extras/aomp-extras
|
||||
url = https://github.com/rocm/aomp-extras
|
||||
branch = release/rocm-rel-6.4
|
||||
[submodule "submodule-srcs/ROCK-Kernel-Driver"]
|
||||
path = submodule-srcs/ROCK-Kernel-Driver
|
||||
url = https://github.com/rocm/ROCK-Kernel-Driver
|
||||
@@ -226,7 +226,6 @@ LM
|
||||
LSAN
|
||||
LSan
|
||||
LTS
|
||||
LanguageCrossEntropy
|
||||
LoRA
|
||||
MEM
|
||||
MERCHANTABILITY
|
||||
@@ -244,7 +243,6 @@ MMIOH
|
||||
MMU
|
||||
MNIST
|
||||
MPI
|
||||
MPT
|
||||
MSVC
|
||||
MVAPICH
|
||||
MVFFR
|
||||
@@ -261,7 +259,6 @@ Meta's
|
||||
Miniconda
|
||||
MirroredStrategy
|
||||
Mixtral
|
||||
MosaicML
|
||||
Multicore
|
||||
Multithreaded
|
||||
MyEnvironment
|
||||
@@ -332,7 +329,6 @@ PipelineParallel
|
||||
PnP
|
||||
PowerEdge
|
||||
PowerShell
|
||||
Pretrained
|
||||
Pretraining
|
||||
Profiler's
|
||||
PyPi
|
||||
|
||||
10
CHANGELOG.md
10
CHANGELOG.md
@@ -6,7 +6,7 @@ different versions of the ROCm software stack and its components.
|
||||
|
||||
## ROCm 6.4.0
|
||||
|
||||
See the [ROCm 6.4.0 release notes](https://rocm.docs.amd.com/en/docs-6.4.0/about/release-notes.html)
|
||||
See the [ROCm 6.4.0 release notes](https://rocm-stg.amd.com/en/latest/about/release-notes.html)
|
||||
for a complete overview of this release.
|
||||
|
||||
### **AMD SMI** (25.3.0)
|
||||
@@ -743,10 +743,6 @@ See the full [ROCm SMI changelog](https://github.com/ROCm/rocm_smi_lib/blob/rele
|
||||
#### Added
|
||||
|
||||
- Support for VA-API and rocDecode tracing.
|
||||
- Aggregation of MPI data collected across distributed nodes and ranks. The data is concatenated into a single proto file.
|
||||
|
||||
#### Changed
|
||||
- Backend refactored to use [ROCprofiler-SDK](https://github.com/ROCm/rocprofiler-sdk) rather than [ROCProfiler](https://github.com/ROCm/rocprofiler) and [ROCTracer](https://github.com/ROCm/ROCTracer).
|
||||
|
||||
#### Resolved issues
|
||||
|
||||
@@ -757,9 +753,9 @@ See the full [ROCm SMI changelog](https://github.com/ROCm/rocm_smi_lib/blob/rele
|
||||
- Fixed interruption in config file generation.
|
||||
|
||||
- Fixed segmentation fault while running rocprof-sys-instrument.
|
||||
- Fixed an issue where running `rocprof-sys-causal` or using the `-I all` option with `rocprof-sys-sample` caused the system to become non-responsive.
|
||||
|
||||
- Fixed an issue where sampling multi-GPU Python workloads caused the system to stop responding.
|
||||
#### Changed
|
||||
- Backend refactored to use [ROCprofiler-SDK](https://github.com/ROCm/rocprofiler-sdk) rather than [ROCProfiler](https://github.com/ROCm/rocprofiler) and [ROCTracer](https://github.com/ROCm/ROCTracer).
|
||||
|
||||
### **rocPRIM** (3.4.0)
|
||||
|
||||
|
||||
44
README.md
44
README.md
@@ -20,12 +20,17 @@ source software compilers, debuggers, and libraries. ROCm is fully integrated in
|
||||
(ML) frameworks, such as PyTorch and TensorFlow.
|
||||
|
||||
## Getting the ROCm Source Code
|
||||
AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. The source code for ROCm components can be cloned from each of the GitHub repositories using git.
|
||||
|
||||
AMD ROCm is built from open source software. It is, therefore, possible to modify the various components of ROCm by downloading the source code and rebuilding the components. The source code for ROCm components can be cloned from each of the GitHub repositories using git. For easy access to download the correct versions of each of these tools, the ROCm repository contains a repo manifest file called [default.xml](./default.xml). You can use this manifest file to download the source code for ROCm software.
|
||||
There are two methods to clone/sync the ROCm sources. you can use either of the methods to sync the ROCm Sources
|
||||
|
||||
## [Method 1]
|
||||
|
||||
For easy access to download the correct versions of each of these tools, the ROCm repository contains a repo manifest file called [default.xml](./default.xml). You can use this manifest file to download the source code for ROCm software.
|
||||
|
||||
### Installing the repo tool
|
||||
|
||||
The repo tool from Google allows you to manage multiple git repositories simultaneously. Run the following commands to install the repo tool:
|
||||
We need the repo tool to work with the manifest file. The repo tool from Google allows you to manage multiple git repositories simultaneously. Run the following commands to install the repo tool:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/bin/
|
||||
@@ -43,11 +48,12 @@ Some ROCm projects use the Git Large File Storage (LFS) format that may require
|
||||
sudo apt-get install git-lfs
|
||||
```
|
||||
|
||||
### Downloading the ROCm source code
|
||||
|
||||
The following example shows how to use the repo tool to download the ROCm source code. If you choose a directory other than ~/bin/ to install the repo tool, you must use that chosen directory in the code as shown below:
|
||||
|
||||
```bash
|
||||
# --------------------------------------
|
||||
# Step1: clone source code
|
||||
# --------------------------------------
|
||||
mkdir -p ~/ROCm/
|
||||
cd ~/ROCm/
|
||||
export ROCM_VERSION=6.4.0
|
||||
@@ -57,29 +63,35 @@ export ROCM_VERSION=6.4.0
|
||||
|
||||
**Note:** Using this sample code will cause the repo tool to download the open source code associated with the specified ROCm release. Ensure that you have ssh-keys configured on your machine for your GitHub ID prior to the download as explained at [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh).
|
||||
|
||||
## Building the ROCm source code
|
||||
|
||||
## [Method 2]
|
||||
|
||||
This repository contains the source code for ROCm. Below you will find instructions for cloning the repository using submodules as an alternative to using the `repo` tool.
|
||||
|
||||
## Cloning with Git Submodules
|
||||
|
||||
As an alternative method, you can clone this repository and its submodules using Git's submodule functionality. This approach may be preferred if you are familiar with Git and wish to avoid using the `repo` tool.
|
||||
|
||||
To clone the repository along with all its submodules, use the following command:
|
||||
|
||||
```bash
|
||||
# --------------------------------------
|
||||
# Step1: clone source code
|
||||
# --------------------------------------
|
||||
git clone --recurse-submodules --remote-submodules https://github.com/ROCm/ROCm.git
|
||||
cd ROCm/submodule-srcs
|
||||
```
|
||||
|
||||
Each ROCm component repository contains directions for building that component, such as the rocSPARSE documentation [Installation and Building for Linux](https://rocm.docs.amd.com/projects/rocSPARSE/en/latest/install/Linux_Install_Guide.html). Refer to the specific component documentation for instructions on building the repository.
|
||||
|
||||
Each release of the ROCm software supports specific hardware and software configurations. Refer to [System requirements (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/system-requirements.html) for the current supported hardware and OS.
|
||||
|
||||
## Build ROCm from source
|
||||
|
||||
The Build will use as many processors as it can find to build in parallel. Some of the compiles can consume as much as 10GB of RAM, so make sure you have plenty of Swap Space !
|
||||
|
||||
By default the ROCm build will compile for all supported GPU architectures and will take approximately 500 CPU hours.
|
||||
The Build time will reduce significantly if we limit the GPU Architecture/s against which we need to build by using the environment variable GPU_ARCHS as mentioned below.
|
||||
|
||||
```bash
|
||||
# --------------------------------------
|
||||
# Step1: clone source code
|
||||
# --------------------------------------
|
||||
|
||||
mkdir -p ~/WORKSPACE/ # Or any folder name other than WORKSPACE
|
||||
cd ~/WORKSPACE/
|
||||
export ROCM_VERSION=6.4.0
|
||||
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.4.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
|
||||
~/bin/repo sync
|
||||
|
||||
# --------------------------------------
|
||||
# Step 2: Prepare build environment
|
||||
|
||||
32
RELEASE.md
32
RELEASE.md
@@ -253,19 +253,14 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
</tbody>
|
||||
<tbody class="rocm-components-libs rocm-components-communication tbody-reverse-zebra">
|
||||
<tr>
|
||||
<th rowspan="2"></th>
|
||||
<th rowspan="2">Communication</th>
|
||||
<th rowspan="1"></th>
|
||||
<th rowspan="1">Communication</th>
|
||||
<td><a href="https://rocm.docs.amd.com/projects/rccl/en/docs-6.4.0/index.html">RCCL</a></td>
|
||||
<td>2.21.5 ⇒ <a href="#rccl-2-22-3">2.22.3</a></td>
|
||||
<td><a href="https://github.com/ROCm/rccl"><i class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://github.com/ROCm/rocSHMEM">rocSHMEM</a></td>
|
||||
<td>2.0.0</td>
|
||||
<td><a href="https://github.com/ROCm/rocSHMEM"><i class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody class="rocm-components-libs rocm-components-math tbody-reverse-zebra">
|
||||
<tbody class="rocm-components-libs rocm-components-math">
|
||||
<tr>
|
||||
<th rowspan="16"></th>
|
||||
<th rowspan="16">Math</th>
|
||||
@@ -349,7 +344,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
<td><a href="https://github.com/ROCm/Tensile"><i class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody class="rocm-components-libs rocm-components-primitives tbody-reverse-zebra">
|
||||
<tbody class="rocm-components-libs rocm-components-primitives">
|
||||
<tr>
|
||||
<th rowspan="4"></th>
|
||||
<th rowspan="4">Primitives</th>
|
||||
@@ -373,7 +368,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
<td><a href="https://github.com/ROCm/rocThrust"><i class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody class="rocm-components-tools rocm-components-system tbody-reverse-zebra">
|
||||
<tbody class="rocm-components-tools rocm-components-system">
|
||||
<tr>
|
||||
<th rowspan="7">Tools</th>
|
||||
<th rowspan="7">System management</th>
|
||||
@@ -402,7 +397,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
<td><a href="https://github.com/ROCm/ROCmValidationSuite"><i class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody class="rocm-components-tools rocm-components-perf">
|
||||
<tbody class="rocm-components-tools rocm-components-perf tbody-reverse-zebra">
|
||||
<tr>
|
||||
<th rowspan="6"></th>
|
||||
<th rowspan="6">Performance</th>
|
||||
@@ -443,7 +438,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody class="rocm-components-tools rocm-components-dev">
|
||||
<tbody class="rocm-components-tools rocm-components-dev tbody-reverse-zebra">
|
||||
<tr>
|
||||
<th rowspan="5"></th>
|
||||
<th rowspan="5">Development</th>
|
||||
@@ -479,7 +474,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody class="rocm-components-compilers tbody-reverse-zebra">
|
||||
<tbody class="rocm-components-compilers">
|
||||
<tr>
|
||||
<th rowspan="2" colspan="2">Compilers</th>
|
||||
<td><a href="https://rocm.docs.amd.com/projects/HIPCC/en/docs-6.4.0/index.html">HIPCC</a></td>
|
||||
@@ -494,7 +489,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody class="rocm-components-runtimes tbody-reverse-zebra">
|
||||
<tbody class="rocm-components-runtimes">
|
||||
<tr>
|
||||
<th rowspan="2" colspan="2">Runtimes</th>
|
||||
<td><a href="https://rocm.docs.amd.com/projects/HIP/en/docs-6.4.0/index.html">HIP</a></td>
|
||||
@@ -1252,11 +1247,6 @@ See the full [ROCm SMI changelog](https://github.com/ROCm/rocm_smi_lib/blob/rele
|
||||
#### Added
|
||||
|
||||
- Support for VA-API and rocDecode tracing.
|
||||
- Aggregation of MPI data collected across distributed nodes and ranks. The data is concatenated into a single proto file.
|
||||
|
||||
|
||||
#### Changed
|
||||
- Backend refactored to use [ROCprofiler-SDK](https://github.com/ROCm/rocprofiler-sdk) rather than [ROCProfiler](https://github.com/ROCm/rocprofiler) and [ROCTracer](https://github.com/ROCm/ROCTracer).
|
||||
|
||||
#### Resolved issues
|
||||
|
||||
@@ -1267,9 +1257,9 @@ See the full [ROCm SMI changelog](https://github.com/ROCm/rocm_smi_lib/blob/rele
|
||||
- Fixed interruption in config file generation.
|
||||
|
||||
- Fixed segmentation fault while running rocprof-sys-instrument.
|
||||
- Fixed an issue where running `rocprof-sys-causal` or using the `-I all` option with `rocprof-sys-sample` caused the system to become non-responsive.
|
||||
|
||||
- Fixed an issue where sampling multi-GPU Python workloads caused the system to stop responding.
|
||||
#### Changed
|
||||
- Backend refactored to use [ROCprofiler-SDK](https://github.com/ROCm/rocprofiler-sdk) rather than [ROCProfiler](https://github.com/ROCm/rocprofiler) and [ROCTracer](https://github.com/ROCm/ROCTracer).
|
||||
|
||||
### **rocPRIM** (3.4.0)
|
||||
|
||||
|
||||
@@ -81,7 +81,6 @@ additional licenses. Please review individual repositories for more information.
|
||||
| [rocRAND](https://github.com/ROCm/rocRAND/) | [MIT](https://github.com/ROCm/rocRAND/blob/develop/LICENSE.txt) |
|
||||
| [ROCr Debug Agent](https://github.com/ROCm/rocr_debug_agent/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocr_debug_agent/blob/amd-staging/LICENSE.txt) |
|
||||
| [ROCR-Runtime](https://github.com/ROCm/ROCR-Runtime/) | [The University of Illinois/NCSA](https://github.com/ROCm/ROCR-Runtime/blob/amd-staging/LICENSE.txt) |
|
||||
| [rocSHMEM](https://github.com/ROCm/rocSHMEM/) | [MIT](https://github.com/ROCm/rocSHMEM/blob/develop/LICENSE.md) |
|
||||
| [rocSOLVER](https://github.com/ROCm/rocSOLVER/) | [BSD-2-Clause](https://github.com/ROCm/rocSOLVER/blob/develop/LICENSE.md) |
|
||||
| [rocSPARSE](https://github.com/ROCm/rocSPARSE/) | [MIT](https://github.com/ROCm/rocSPARSE/blob/develop/LICENSE.md) |
|
||||
| [rocThrust](https://github.com/ROCm/rocThrust/) | [Apache 2.0](https://github.com/ROCm/rocThrust/blob/develop/LICENSE) |
|
||||
|
||||
@@ -27,6 +27,7 @@ ROCm Version,6.4.0,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.5, 6.1.2
|
||||
:doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.18.1, 2.17.1, 2.16.2","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
|
||||
:doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.4.35,0.4.31,0.4.31,0.4.31,0.4.31,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.2,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
|
||||
,,,,,,,,,,,,,,,
|
||||
,,,,,,,,,,,,,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
|
||||
@@ -52,7 +53,6 @@ ROCm Version,6.4.0,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.5, 6.1.2
|
||||
,,,,,,,,,,,,,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,
|
||||
:doc:`RCCL <rccl:index>`,2.22.3,2.21.5,2.21.5,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
|
||||
`rocSHMEM <https://github.com/ROCm/rocSHMEM>`_,2.0.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
,,,,,,,,,,,,,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
|
||||
|
||||
|
@@ -77,7 +77,6 @@ compatibility and system requirements.
|
||||
,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix:,,
|
||||
:doc:`RCCL <rccl:index>`,2.22.3,2.21.5,2.20.5
|
||||
`rocSHMEM <https://github.com/ROCm/rocSHMEM>`_ ,2.0.0,N/A,N/A
|
||||
,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix:,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0
|
||||
|
||||
@@ -14,18 +14,17 @@ JAX provides a NumPy-like API, which combines automatic differentiation and the
|
||||
Accelerated Linear Algebra (XLA) compiler to achieve high-performance machine
|
||||
learning at scale.
|
||||
|
||||
JAX uses composable transformations of Python and NumPy through just-in-time
|
||||
(JIT) compilation, automatic vectorization, and parallelization. To learn about
|
||||
JAX, including profiling and optimizations, see the official `JAX documentation
|
||||
JAX uses composable transformations of Python and NumPy through just-in-time (JIT) compilation,
|
||||
automatic vectorization, and parallelization. To learn about JAX, including profiling and
|
||||
optimizations, see the official `JAX documentation
|
||||
<https://jax.readthedocs.io/en/latest/notebooks/quickstart.html>`_.
|
||||
|
||||
ROCm support for JAX is upstreamed, and users can build the official source code
|
||||
with ROCm support:
|
||||
ROCm support for JAX is upstreamed and users can build the official source code with ROCm
|
||||
support:
|
||||
|
||||
- ROCm JAX release:
|
||||
|
||||
- Offers AMD-validated and community :ref:`Docker images <jax-docker-compat>`
|
||||
with ROCm and JAX preinstalled.
|
||||
- Offers AMD-validated and community :ref:`Docker images <jax-docker-compat>` with ROCm and JAX pre-installed.
|
||||
|
||||
- ROCm JAX repository: `ROCm/jax <https://github.com/ROCm/jax>`_
|
||||
|
||||
@@ -37,8 +36,8 @@ with ROCm support:
|
||||
- Official JAX repository: `jax-ml/jax <https://github.com/jax-ml/jax>`_
|
||||
|
||||
- See the `AMD GPU (Linux) installation section
|
||||
<https://jax.readthedocs.io/en/latest/installation.html#amd-gpu-linux>`_ in
|
||||
the JAX documentation.
|
||||
<https://jax.readthedocs.io/en/latest/installation.html#amd-gpu-linux>`_ in the JAX
|
||||
documentation.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -47,44 +46,6 @@ with ROCm support:
|
||||
`Community ROCm JAX Docker images <https://hub.docker.com/r/rocm/jax-community>`_
|
||||
follow upstream JAX releases and use the latest available ROCm version.
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* The `nanoGPT in JAX <https://rocm.blogs.amd.com/artificial-intelligence/nanoGPT-JAX/README.html>`_
|
||||
blog explores the implementation and training of a Generative Pre-trained
|
||||
Transformer (GPT) model in JAX, inspired by Andrej Karpathy’s JAX-based
|
||||
nanoGPT. Comparing how essential GPT components—such as self-attention
|
||||
mechanisms and optimizers—are realized in JAX and JAX, also highlights
|
||||
JAX’s unique features.
|
||||
|
||||
* The `Optimize GPT Training: Enabling Mixed Precision Training in JAX using
|
||||
ROCm on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-mixed-precision/README.html>`_
|
||||
blog post provides a comprehensive guide on enhancing the training efficiency
|
||||
of GPT models by implementing mixed precision techniques in JAX, specifically
|
||||
tailored for AMD GPUs utilizing the ROCm platform.
|
||||
|
||||
* The `Supercharging JAX with Triton Kernels on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-triton/README.html>`_
|
||||
blog demonstrates how to develop a custom fused dropout-activation kernel for
|
||||
matrices using Triton, integrate it with JAX, and benchmark its performance
|
||||
using ROCm.
|
||||
|
||||
* The `Distributed fine-tuning with JAX on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
|
||||
outlines the process of fine-tuning a Bidirectional Encoder Representations
|
||||
from Transformers (BERT)-based large language model (LLM) using JAX for a text
|
||||
classification task. The blog post discuss techniques for parallelizing the
|
||||
fine-tuning across multiple AMD GPUs and assess the model's performance on a
|
||||
holdout dataset. During the fine-tuning, a BERT-base-cased transformer model
|
||||
and the General Language Understanding Evaluation (GLUE) benchmark dataset was
|
||||
used on a multi-GPU setup.
|
||||
|
||||
* The `MI300X workload optimization guide <https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html>`_
|
||||
provides detailed guidance on optimizing workloads for the AMD Instinct MI300X
|
||||
accelerator using ROCm. The page is aimed at helping users achieve optimal
|
||||
performance for deep learning and other high-performance computing tasks on
|
||||
the MI300X GPU.
|
||||
|
||||
For more use cases and recommendations, see `ROCm JAX blog posts <https://rocm.blogs.amd.com/blog/tag/jax.html>`_.
|
||||
|
||||
.. _jax-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
@@ -96,7 +57,7 @@ Docker image compatibility
|
||||
|
||||
AMD validates and publishes ready-made `ROCm JAX Docker images <https://hub.docker.com/r/rocm/jax>`_
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories represent the latest JAX version from the official Docker Hub and are validated for
|
||||
associated inventories are validated for
|
||||
`ROCm 6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`_. Click the |docker-icon|
|
||||
icon to view the image on Docker Hub.
|
||||
|
||||
@@ -160,12 +121,13 @@ associated inventories are tested for `ROCm 6.3.2 <https://repo.radeon.com/rocm/
|
||||
- Ubuntu 22.04
|
||||
- `3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
|
||||
Key ROCm libraries for JAX
|
||||
Critical ROCm libraries for JAX
|
||||
================================================================================
|
||||
|
||||
JAX functionality on ROCm is determined by its underlying library
|
||||
dependencies. These ROCm components affect the capabilities, performance, and
|
||||
feature set available to developers.
|
||||
The functionality of JAX with ROCm is determined by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers. The versions described
|
||||
are available in ROCm :version:`rocm_version`.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
@@ -253,10 +215,10 @@ feature set available to developers.
|
||||
distributed training, which involves parallel reductions or
|
||||
operations like ``jax.numpy.cumsum`` can use rocThrust.
|
||||
|
||||
Supported features
|
||||
Supported and unsupported features
|
||||
===============================================================================
|
||||
|
||||
The following table maps the public JAX API modules to their supported
|
||||
The following table maps GPU-accelerated JAX modules to their supported
|
||||
ROCm and JAX versions.
|
||||
|
||||
.. list-table::
|
||||
@@ -264,8 +226,8 @@ ROCm and JAX versions.
|
||||
|
||||
* - Module
|
||||
- Description
|
||||
- As of JAX
|
||||
- As of ROCm
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.numpy``
|
||||
- Implements the NumPy API, using the primitives in ``jax.lax``.
|
||||
- 0.1.56
|
||||
@@ -293,11 +255,21 @@ ROCm and JAX versions.
|
||||
devices.
|
||||
- 0.3.20
|
||||
- 5.1.0
|
||||
* - ``jax.dlpack``
|
||||
- For exchanging tensor data between JAX and other libraries that support the
|
||||
DLPack standard.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.distributed``
|
||||
- Enables the scaling of computations across multiple devices on a single
|
||||
machine or across multiple machines.
|
||||
- 0.1.74
|
||||
- 5.0.0
|
||||
* - ``jax.dtypes``
|
||||
- Provides utilities for working with and managing data types in JAX
|
||||
arrays and computations.
|
||||
- 0.1.66
|
||||
- 5.0.0
|
||||
* - ``jax.image``
|
||||
- Contains image manipulation functions like resize, scale and translation.
|
||||
- 0.1.57
|
||||
@@ -311,10 +283,27 @@ ROCm and JAX versions.
|
||||
array.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.profiler``
|
||||
- Contains JAX’s tracing and time profiling features.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.stages``
|
||||
- Contains interfaces to stages of the compiled execution process.
|
||||
- 0.3.4
|
||||
- 5.0.0
|
||||
* - ``jax.tree``
|
||||
- Provides utilities for working with tree-like container data structures.
|
||||
- 0.4.26
|
||||
- 5.6.0
|
||||
* - ``jax.tree_util``
|
||||
- Provides utilities for working with nested data structures, or
|
||||
``pytrees``.
|
||||
- 0.1.65
|
||||
- 5.0.0
|
||||
* - ``jax.typing``
|
||||
- Provides JAX-specific static type annotations.
|
||||
- 0.3.18
|
||||
- 5.1.0
|
||||
* - ``jax.extend``
|
||||
- Provides modules for access to JAX internal machinery module. The
|
||||
``jax.extend`` module defines a library view of some of JAX’s internal
|
||||
@@ -350,8 +339,8 @@ A SciPy-like API for scientific computing.
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- As of JAX
|
||||
- As of ROCm
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.scipy.cluster``
|
||||
- 0.3.11
|
||||
- 5.1.0
|
||||
@@ -396,8 +385,8 @@ jax.scipy.stats module
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- As of JAX
|
||||
- As of ROCm
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.scipy.stats.bernouli``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
@@ -480,8 +469,8 @@ Modules for JAX extensions.
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- As of JAX
|
||||
- As of ROCm
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.extend.ffi``
|
||||
- 0.4.30
|
||||
- 6.0.0
|
||||
@@ -495,25 +484,190 @@ Modules for JAX extensions.
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
|
||||
Unsupported JAX features
|
||||
--------------------------------------------------------------------------------
|
||||
jax.experimental module
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following GPU-accelerated JAX features are not supported by ROCm for
|
||||
the listed supported JAX versions.
|
||||
Experimental modules and APIs.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.checkify``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.compilation_cache.compilation_cache``
|
||||
- 0.1.68
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.custom_partitioning``
|
||||
- 0.4.0
|
||||
- 5.3.0
|
||||
* - ``jax.experimental.jet``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.key_reuse``
|
||||
- 0.4.26
|
||||
- 5.6.0
|
||||
* - ``jax.experimental.mesh_utils``
|
||||
- 0.1.76
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.multihost_utils``
|
||||
- 0.3.2
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.pallas``
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
* - ``jax.experimental.pjit``
|
||||
- 0.1.61
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.serialize_executable``
|
||||
- 0.4.0
|
||||
- 5.3.0
|
||||
* - ``jax.experimental.shard_map``
|
||||
- 0.4.3
|
||||
- 5.3.0
|
||||
* - ``jax.experimental.sparse``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - API
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.enable_x64``
|
||||
- 0.1.60
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.disable_x64``
|
||||
- 0.1.60
|
||||
- 5.0.0
|
||||
|
||||
jax.experimental.pallas module
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Module for Pallas, a JAX extension for custom kernels.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.pallas.mosaic_gpu``
|
||||
- 0.4.31
|
||||
- 6.1.3
|
||||
* - ``jax.experimental.pallas.tpu``
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
* - ``jax.experimental.pallas.triton``
|
||||
- 0.4.32
|
||||
- 6.1.3
|
||||
|
||||
jax.experimental.sparse module
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Experimental support for sparse matrix operations.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.sparse.linalg``
|
||||
- 0.3.15
|
||||
- 5.2.0
|
||||
* - ``jax.experimental.sparse.sparsify``
|
||||
- 0.3.25
|
||||
- ❌
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ``sparse`` data structure API
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.sparse.BCOO``
|
||||
- 0.1.72
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.sparse.BCSR``
|
||||
- 0.3.20
|
||||
- 5.1.0
|
||||
* - ``jax.experimental.sparse.CSR``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.sparse.NM``
|
||||
- 0.4.27
|
||||
- 5.6.0
|
||||
* - ``jax.experimental.sparse.COO``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
|
||||
Unsupported JAX features
|
||||
------------------------
|
||||
|
||||
The following are GPU-accelerated JAX features not currently supported by
|
||||
ROCm.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
|
||||
- Since JAX
|
||||
* - Mixed Precision with TF32
|
||||
- Mixed precision with TF32 is used for matrix multiplications,
|
||||
convolutions, and other linear algebra operations, particularly in
|
||||
deep learning workloads like CNNs and transformers.
|
||||
|
||||
- 0.2.25
|
||||
* - RNN support
|
||||
- Currently only LSTM with double bias is supported with float32 input
|
||||
and weight.
|
||||
- 0.3.25
|
||||
* - XLA int4 support
|
||||
- 4-bit integer (int4) precision in the XLA compiler.
|
||||
- 0.4.0
|
||||
* - ``jax.experimental.sparsify``
|
||||
- Converts a dense matrix to a sparse matrix representation.
|
||||
- Experimental
|
||||
|
||||
* - MOSAIC (GPU)
|
||||
- Mosaic is a library of kernel-building abstractions for JAX's Pallas system
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* The `nanoGPT in JAX <https://rocm.blogs.amd.com/artificial-intelligence/nanoGPT-JAX/README.html>`_
|
||||
blog explores the implementation and training of a Generative Pre-trained
|
||||
Transformer (GPT) model in JAX, inspired by Andrej Karpathy’s PyTorch-based
|
||||
nanoGPT. By comparing how essential GPT components—such as self-attention
|
||||
mechanisms and optimizers—are realized in PyTorch and JAX, also highlight
|
||||
JAX’s unique features.
|
||||
|
||||
* The `Optimize GPT Training: Enabling Mixed Precision Training in JAX using
|
||||
ROCm on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-mixed-precision/README.html>`_
|
||||
blog post provides a comprehensive guide on enhancing the training efficiency
|
||||
of GPT models by implementing mixed precision techniques in JAX, specifically
|
||||
tailored for AMD GPUs utilizing the ROCm platform.
|
||||
|
||||
* The `Supercharging JAX with Triton Kernels on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-triton/README.html>`_
|
||||
blog demonstrates how to develop a custom fused dropout-activation kernel for
|
||||
matrices using Triton, integrate it with JAX, and benchmark its performance
|
||||
using ROCm.
|
||||
|
||||
* The `Distributed fine-tuning with JAX on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
|
||||
outlines the process of fine-tuning a Bidirectional Encoder Representations
|
||||
from Transformers (BERT)-based large language model (LLM) using JAX for a text
|
||||
classification task. The blog post discuss techniques for parallelizing the
|
||||
fine-tuning across multiple AMD GPUs and assess the model's performance on a
|
||||
holdout dataset. During the fine-tuning, a BERT-base-cased transformer model
|
||||
and the General Language Understanding Evaluation (GLUE) benchmark dataset was
|
||||
used on a multi-GPU setup.
|
||||
|
||||
* The `MI300X workload optimization guide <https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html>`_
|
||||
provides detailed guidance on optimizing workloads for the AMD Instinct MI300X
|
||||
accelerator using ROCm. The page is aimed at helping users achieve optimal
|
||||
performance for deep learning and other high-performance computing tasks on
|
||||
the MI300X GPU.
|
||||
|
||||
For more use cases and recommendations, see `ROCm JAX blog posts <https://rocm.blogs.amd.com/blog/tag/jax.html>`_.
|
||||
|
||||
@@ -21,68 +21,31 @@ release cycles for PyTorch on ROCm:
|
||||
|
||||
- ROCm PyTorch release:
|
||||
|
||||
- Provides the latest version of ROCm but might not necessarily support the
|
||||
latest stable PyTorch version.
|
||||
- Provides the latest version of ROCm but doesn't immediately support the latest stable PyTorch
|
||||
version.
|
||||
|
||||
- Offers :ref:`Docker images <pytorch-docker-compat>` with ROCm and PyTorch
|
||||
preinstalled.
|
||||
pre-installed.
|
||||
|
||||
- ROCm PyTorch repository: `<https://github.com/ROCm/pytorch>`_
|
||||
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>`
|
||||
to get started.
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>` to get started.
|
||||
|
||||
- Official PyTorch release:
|
||||
|
||||
- Provides the latest stable version of PyTorch but might not necessarily
|
||||
support the latest ROCm version.
|
||||
- Provides the latest stable version of PyTorch but doesn't immediately support the latest ROCm version.
|
||||
|
||||
- Official PyTorch repository: `<https://github.com/pytorch/pytorch>`_
|
||||
|
||||
- See the `Nightly and latest stable version installation guide <https://pytorch.org/get-started/locally/>`_
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_
|
||||
to get started.
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_ to get started.
|
||||
|
||||
PyTorch includes tooling that generates HIP source code from the CUDA backend.
|
||||
This approach allows PyTorch to support ROCm without requiring manual code
|
||||
modifications. For more information, see :doc:`HIPIFY <hipify:index>`.
|
||||
The upstream PyTorch includes an automatic HIPification solution that automatically generates HIP
|
||||
source code from the CUDA backend. This approach allows PyTorch to support ROCm without requiring
|
||||
manual code modifications.
|
||||
|
||||
ROCm development is aligned with the stable release of PyTorch, while upstream
|
||||
PyTorch testing uses the stable release of ROCm to maintain consistency.
|
||||
|
||||
.. _pytorch-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/training/benchmark-docker/pytorch-training>`
|
||||
guides how to leverage the ROCm platform for training AI models. It covers the
|
||||
steps, tools, and best practices for optimizing training workflows on AMD GPUs
|
||||
using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning
|
||||
and inference of machine learning models, particularly large language models
|
||||
(LLMs), on systems with a single GPU. This topic provides a detailed guide for
|
||||
setting up, optimizing, and executing fine-tuning and inference workflows in
|
||||
such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning
|
||||
models on systems with multiple GPUs.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/rocm-for-ai/inference-optimization/workload>`
|
||||
provides detailed guidance on optimizing workloads for the AMD Instinct MI300X
|
||||
accelerator using ROCm. This guide helps users achieve optimal performance for
|
||||
deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the
|
||||
use of PyTorch on the ROCm platform and focuses on efficiently leveraging AMD
|
||||
GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_.
|
||||
Development of ROCm is aligned with the stable release of PyTorch while upstream PyTorch testing uses
|
||||
the stable release of ROCm to maintain consistency.
|
||||
|
||||
.. _pytorch-docker-compat:
|
||||
|
||||
@@ -93,10 +56,10 @@ Docker image compatibility
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `PyTorch images <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and associated
|
||||
inventories were tested on `ROCm 6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`_.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
AMD validates and publishes ready-made `PyTorch images <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are validated for `ROCm 6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`_.
|
||||
Click the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
.. list-table:: PyTorch Docker image components
|
||||
:header-rows: 1
|
||||
@@ -249,12 +212,13 @@ Click |docker-icon| to view the image on Docker Hub.
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Key ROCm libraries for PyTorch
|
||||
Critical ROCm libraries for PyTorch
|
||||
================================================================================
|
||||
|
||||
PyTorch functionality on ROCm is determined by its underlying library
|
||||
dependencies. These ROCm components affect the capabilities, performance, and
|
||||
feature set available to developers.
|
||||
The functionality of PyTorch with ROCm is determined by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers. The versions described
|
||||
are available in ROCm :version:`rocm_version`.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
@@ -274,23 +238,24 @@ feature set available to developers.
|
||||
- :version-ref:`hipBLAS rocm_version`
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations such as matrix multiplication, matrix-vector
|
||||
products, and tensor contractions. Utilized in both dense and batched
|
||||
linear algebra operations.
|
||||
- Supports operations like matrix multiplication, matrix-vector products,
|
||||
and tensor contractions. Utilized in both dense and batched linear
|
||||
algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- :version-ref:`hipBLASLt rocm_version`
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- Accelerates operations such as ``torch.matmul``, ``torch.mm``, and the
|
||||
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
|
||||
matrix multiplications used in convolutional and linear layers.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- :version-ref:`hipCUB rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations such as ``torch.sum``, ``torch.cumsum``,
|
||||
``torch.sort`` irregular shapes often involve scanning, sorting, and
|
||||
filtering, which hipCUB handles efficiently.
|
||||
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
|
||||
and ``torch.topk``. Operations on sparse tensors or tensors with
|
||||
irregular shapes often involve scanning, sorting, and filtering, which
|
||||
hipCUB handles efficiently.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- :version-ref:`hipFFT rocm_version`
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
@@ -298,8 +263,8 @@ feature set available to developers.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- :version-ref:`hipRAND rocm_version`
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``torch.rand``, ``torch.randn``, and stochastic layers like
|
||||
``torch.nn.Dropout`` rely on hipRAND.
|
||||
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
|
||||
``torch.nn.Dropout``.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- :version-ref:`hipSOLVER rocm_version`
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
@@ -370,7 +335,7 @@ feature set available to developers.
|
||||
- :version-ref:`RPP rocm_version`
|
||||
- Speeds up data augmentation, transformation, and other preprocessing steps.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads to speed up data processing.
|
||||
``torchvision`` data load workloads.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- :version-ref:`rocThrust rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
@@ -387,11 +352,11 @@ feature set available to developers.
|
||||
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
|
||||
more.
|
||||
|
||||
Supported features
|
||||
Supported and unsupported features
|
||||
================================================================================
|
||||
|
||||
This section maps GPU-accelerated PyTorch features to their supported ROCm and
|
||||
PyTorch versions.
|
||||
The following section maps GPU-accelerated PyTorch features to their supported
|
||||
ROCm and PyTorch versions.
|
||||
|
||||
torch
|
||||
--------------------------------------------------------------------------------
|
||||
@@ -399,24 +364,23 @@ torch
|
||||
`torch <https://pytorch.org/docs/stable/index.html>`_ is the central module of
|
||||
PyTorch, providing data structures for multi-dimensional tensors and
|
||||
implementing mathematical operations on them. It also includes utilities for
|
||||
efficient serialization of tensors and arbitrary data types and other tools.
|
||||
efficient serialization of tensors and arbitrary data types, along with various
|
||||
other tools.
|
||||
|
||||
Tensor data types
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The tensor data type is specified using the ``dtype`` attribute or argument.
|
||||
PyTorch supports many data types for different use cases.
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or argument, and PyTorch supports a wide range of data types for different use cases.
|
||||
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_
|
||||
single data types:
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_'s single data types:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.float8_e4m3fn``
|
||||
- 8-bit floating point, e4m3
|
||||
- 2.3
|
||||
@@ -508,11 +472,11 @@ single data types:
|
||||
|
||||
.. note::
|
||||
|
||||
Unsigned types except ``uint8`` have limited support in eager mode. They
|
||||
primarily exist to assist usage with ``torch.compile``.
|
||||
Unsigned types aside from ``uint8`` are currently only have limited support in
|
||||
eager mode (they primarily exist to assist usage with ``torch.compile``).
|
||||
|
||||
See :doc:`ROCm precision support <rocm:reference/precision-support>` for the
|
||||
native hardware support of data types.
|
||||
The :doc:`ROCm precision support page <rocm:reference/precision-support>`
|
||||
collected the native HW support of different data types.
|
||||
|
||||
torch.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -527,8 +491,8 @@ leveraging ROCm and CUDA as the underlying frameworks.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Device management
|
||||
- Utilities for managing and interacting with GPUs.
|
||||
- 0.4.0
|
||||
@@ -602,8 +566,8 @@ PyTorch interacts with the ROCm or CUDA environment.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``cufft_plan_cache``
|
||||
- Manages caching of GPU FFT plans to optimize repeated FFT computations.
|
||||
- 1.7.0
|
||||
@@ -651,8 +615,8 @@ Supported ``torch`` options include:
|
||||
|
||||
* - Option
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions on NVIDIA
|
||||
Ampere or newer GPUs.
|
||||
@@ -667,28 +631,28 @@ Supported ``torch`` options include:
|
||||
Automatic mixed precision: torch.amp
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
PyTorch automates the process of using both 16-bit (half-precision, float16) and
|
||||
32-bit (single-precision, float32) floating-point types in model training and
|
||||
inference.
|
||||
PyTorch that automates the process of using both 16-bit (half-precision,
|
||||
float16) and 32-bit (single-precision, float32) floating-point types in model
|
||||
training and inference.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Autocasting
|
||||
- Autocast instances serve as context managers or decorators that allow
|
||||
- Instances of autocast serve as context managers or decorators that allow
|
||||
regions of your script to run in mixed precision.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - Gradient scaling
|
||||
- To prevent underflow, “gradient scaling” multiplies the network’s
|
||||
loss by a scale factor and invokes a backward pass on the scaled
|
||||
loss. The same factor then scales gradients flowing backward through
|
||||
the network. In other words, gradient values have a larger magnitude so
|
||||
that they don’t flush to zero.
|
||||
loss(es) by a scale factor and invokes a backward pass on the scaled
|
||||
loss(es). Gradients flowing backward through the network are then
|
||||
scaled by the same factor. In other words, gradient values have a
|
||||
larger magnitude, so they don’t flush to zero.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - CUDA op-specific behavior
|
||||
@@ -702,7 +666,7 @@ inference.
|
||||
Distributed library features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
PyTorch distributed library includes a collective of parallelism modules, a
|
||||
The PyTorch distributed library includes a collective of parallelism modules, a
|
||||
communications layer, and infrastructure for launching and debugging large
|
||||
training jobs. See :ref:`rocm-for-ai-pytorch-distributed` for more information.
|
||||
|
||||
@@ -716,13 +680,13 @@ of computational resources and scalability for large-scale tasks.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - TensorPipe
|
||||
- A point-to-point communication library integrated into
|
||||
PyTorch for distributed training. It handles tensor data transfers
|
||||
efficiently between different processes or devices, including those on
|
||||
separate machines.
|
||||
PyTorch for distributed training. It is designed to handle tensor data
|
||||
transfers efficiently between different processes or devices, including
|
||||
those on separate machines.
|
||||
- 1.8
|
||||
- 5.4
|
||||
* - Gloo
|
||||
@@ -741,8 +705,8 @@ torch.compiler
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- As of ROCm
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.compiler`` (AOT Autograd)
|
||||
- Autograd captures not only the user-level code, but also backpropagation,
|
||||
which results in capturing the backwards pass “ahead-of-time”. This
|
||||
@@ -765,8 +729,8 @@ The `torchaudio <https://pytorch.org/audio/stable/index.html>`_ library provides
|
||||
utilities for processing audio data in PyTorch, such as audio loading,
|
||||
transformations, and feature extraction.
|
||||
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to
|
||||
explicitly move audio data (waveform tensor) to GPU using ``.to('cuda')``.
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to move audio
|
||||
data (waveform tensor) explicitly to GPU using ``.to('cuda')``.
|
||||
|
||||
The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
@@ -775,10 +739,10 @@ The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of torchaudio version
|
||||
- As of ROCm
|
||||
- Since torchaudio version
|
||||
- Since ROCm
|
||||
* - ``torchaudio.transforms.Spectrogram``
|
||||
- Generate a spectrogram of an input waveform using STFT.
|
||||
- Generates spectrogram of an input waveform using STFT.
|
||||
- 0.6.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MelSpectrogram``
|
||||
@@ -798,7 +762,7 @@ torchvision
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchvision <https://pytorch.org/vision/stable/index.html>`_ library
|
||||
provides datasets, model architectures, and common image transformations for
|
||||
provide datasets, model architectures, and common image transformations for
|
||||
computer vision.
|
||||
|
||||
The following ``torchvision`` features are GPU-accelerated.
|
||||
@@ -808,8 +772,8 @@ The following ``torchvision`` features are GPU-accelerated.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of torchvision version
|
||||
- As of ROCm
|
||||
- Since torchvision version
|
||||
- Since ROCm
|
||||
* - ``torchvision.transforms.functional``
|
||||
- Provides GPU-compatible transformations for image preprocessing like
|
||||
resize, normalize, rotate and crop.
|
||||
@@ -855,7 +819,7 @@ torchtune
|
||||
The `torchtune <https://pytorch.org/torchtune/stable/index.html>`_ library for
|
||||
authoring, fine-tuning and experimenting with LLMs.
|
||||
|
||||
* Usage: Enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
* Usage: It works out-of-the-box, enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
@@ -866,8 +830,7 @@ The `torchserve <https://pytorch.org/serve/>`_ is a PyTorch domain library
|
||||
for common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by
|
||||
linking against ROCm libraries.
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
@@ -878,16 +841,14 @@ The `torchrec <https://pytorch.org/torchrec/>`_ is a PyTorch domain library for
|
||||
common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by
|
||||
linking against ROCm libraries.
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
Unsupported PyTorch features
|
||||
================================================================================
|
||||
----------------------------
|
||||
|
||||
The following GPU-accelerated PyTorch features are not supported by ROCm for
|
||||
the listed supported PyTorch versions.
|
||||
The following are GPU-accelerated PyTorch features not currently supported by ROCm.
|
||||
|
||||
.. list-table::
|
||||
:widths: 30, 60, 10
|
||||
@@ -895,7 +856,7 @@ the listed supported PyTorch versions.
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- As of PyTorch
|
||||
- Since PyTorch
|
||||
* - APEX batch norm
|
||||
- Use APEX batch norm instead of PyTorch batch norm.
|
||||
- 1.6.0
|
||||
@@ -951,3 +912,31 @@ the listed supported PyTorch versions.
|
||||
utilized effectively through custom CUDA extensions or advanced
|
||||
workflows.
|
||||
- Not a core feature
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/training/train-a-model>` provides
|
||||
guidance on how to leverage the ROCm platform for training AI models. It covers the steps, tools, and best practices
|
||||
for optimizing training workflows on AMD GPUs using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning and inference of
|
||||
machine learning models, particularly large language models (LLMs), on systems with a single AMD
|
||||
Instinct MI300X accelerator. This page provides a detailed guide for setting up, optimizing, and
|
||||
executing fine-tuning and inference workflows in such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning models on systems
|
||||
with multi MI300X accelerators.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/rocm-for-ai/inference-optimization/workload>` provides detailed
|
||||
guidance on optimizing workloads for the AMD Instinct MI300X accelerator using ROCm. This guide is aimed at helping
|
||||
users achieve optimal performance for deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the use of PyTorch on the ROCm platform and
|
||||
focuses on how to efficiently leverage AMD GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_.
|
||||
|
||||
@@ -57,7 +57,6 @@ article_pages = [
|
||||
{"file": "how-to/rocm-for-ai/training/prerequisite-system-validation", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/megatron-lm", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/pytorch-training", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/mpt-llm-foundry", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/scale-model-training", "os": ["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/index", "os": ["linux"]},
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
vllm_benchmark:
|
||||
unified_docker:
|
||||
latest:
|
||||
pull_tag: rocm/vllm:rocm6.3.1_instinct_vllm0.8.3_20250415
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_instinct_vllm0.8.3_20250415/images/sha256-ad9062dea3483d59dedb17c67f7c49f30eebd6eb37c3fac0a171fb19696cc845
|
||||
pull_tag: rocm/vllm:instinct_main
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_instinct_vllm0.7.3_20250311/images/sha256-de0a2649b735f45b7ecab8813eb7b19778ae1f40591ca1196b07bc29c42ed4a3
|
||||
rocm_version: 6.3.1
|
||||
vllm_version: 0.8.3
|
||||
vllm_version: 0.7.3
|
||||
pytorch_version: 2.7.0 (dev nightly)
|
||||
hipblaslt_version: 0.13
|
||||
model_groups:
|
||||
@@ -102,12 +102,19 @@ vllm_benchmark:
|
||||
model_repo: Qwen/Qwen2-72B-Instruct
|
||||
url: https://huggingface.co/Qwen/Qwen2-72B-Instruct
|
||||
precision: float16
|
||||
- model: QwQ-32B
|
||||
mad_tag: pyt_vllm_qwq-32b
|
||||
model_repo: Qwen/QwQ-32B
|
||||
url: https://huggingface.co/Qwen/QwQ-32B
|
||||
- group: JAIS
|
||||
tag: jais
|
||||
models:
|
||||
- model: JAIS 13B
|
||||
mad_tag: pyt_vllm_jais-13b
|
||||
model_repo: core42/jais-13b-chat
|
||||
url: https://huggingface.co/core42/jais-13b-chat
|
||||
precision: float16
|
||||
- model: JAIS 30B
|
||||
mad_tag: pyt_vllm_jais-30b
|
||||
model_repo: core42/jais-30b-chat-v3
|
||||
url: https://huggingface.co/core42/jais-30b-chat-v3
|
||||
precision: float16
|
||||
tunableop: true
|
||||
- group: DBRX
|
||||
tag: dbrx
|
||||
models:
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 1.2 MiB |
@@ -1,178 +1,15 @@
|
||||
.. meta::
|
||||
:description: How to use model quantization techniques to speed up inference.
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, quantization, Quark, GPTQ, transformers, bitsandbytes
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, quantization, GPTQ, transformers, bitsandbytes
|
||||
|
||||
*****************************
|
||||
Model quantization techniques
|
||||
*****************************
|
||||
|
||||
Quantization reduces the model size compared to its native full-precision version, making it easier to fit large models
|
||||
onto accelerators or GPUs with limited memory usage. This section explains how to perform LLM quantization using AMD Quark, GPTQ
|
||||
onto accelerators or GPUs with limited memory usage. This section explains how to perform LLM quantization using GPTQ
|
||||
and bitsandbytes on AMD Instinct hardware.
|
||||
|
||||
.. _quantize-llms-quark:
|
||||
|
||||
AMD Quark
|
||||
=========
|
||||
|
||||
`AMD Quark <https://quark.docs.amd.com/latest/>`_ offers the leading efficient and scalable quantization solution tailored to AMD Instinct GPUs. It supports ``FP8`` and ``INT8`` quantization for activations, weights, and KV cache,
|
||||
including ``FP8`` attention. For very large models, it employs a two-level ``INT4-FP8`` scheme—storing weights in ``INT4`` while computing with ``FP8``—for nearly 4× compression without sacrificing accuracy.
|
||||
Quark scales efficiently across multiple GPUs, efficiently handling ultra-large models like Llama-3.1-405B. Quantized ``FP8`` models like Llama, Mixtral, and Grok-1 are available under the `AMD organization on Hugging Face <https://huggingface.co/collections/amd/quark-quantized-ocp-fp8-models-66db7936d18fcbaf95d4405c>`_, and can be deployed directly via `vLLM <https://github.com/vllm-project/vllm/tree/main/vllm>`_.
|
||||
|
||||
Installing Quark
|
||||
-------------------
|
||||
|
||||
The latest release of Quark can be installed with pip
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
pip install amd-quark
|
||||
|
||||
For detailed installation instructions, refer to the `Quark documentation <https://quark.docs.amd.com/latest/install.html>`_.
|
||||
|
||||
|
||||
Using Quark for quantization
|
||||
-----------------------------
|
||||
|
||||
#. First, load the pre-trained model and its corresponding tokenizer using the Hugging Face ``transformers`` library.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
MODEL_ID = "meta-llama/Llama-2-70b-chat-hf"
|
||||
MAX_SEQ_LEN = 512
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_ID, device_map="auto", torch_dtype="auto",
|
||||
)
|
||||
model.eval()
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, model_max_length=MAX_SEQ_LEN)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
|
||||
#. Prepare the calibration DataLoader (static quantization requires calibration data).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
BATCH_SIZE = 1
|
||||
NUM_CALIBRATION_DATA = 512
|
||||
|
||||
dataset = load_dataset("mit-han-lab/pile-val-backup", split="validation")
|
||||
text_data = dataset["text"][:NUM_CALIBRATION_DATA]
|
||||
|
||||
tokenized_outputs = tokenizer(
|
||||
text_data, return_tensors="pt", padding=True, truncation=True, max_length=MAX_SEQ_LEN
|
||||
)
|
||||
calib_dataloader = DataLoader(
|
||||
tokenized_outputs['input_ids'], batch_size=BATCH_SIZE, drop_last=True
|
||||
)
|
||||
|
||||
#. Define the quantization configuration. See the comments in the following code snippet for descriptions of each configuration option.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from quark.torch.quantization import (Config, QuantizationConfig,
|
||||
FP8E4M3PerTensorSpec)
|
||||
|
||||
# Define fp8/per-tensor/static spec.
|
||||
FP8_PER_TENSOR_SPEC = FP8E4M3PerTensorSpec(observer_method="min_max",
|
||||
is_dynamic=False).to_quantization_spec()
|
||||
|
||||
# Define global quantization config, input tensors and weight apply FP8_PER_TENSOR_SPEC.
|
||||
global_quant_config = QuantizationConfig(input_tensors=FP8_PER_TENSOR_SPEC,
|
||||
weight=FP8_PER_TENSOR_SPEC)
|
||||
|
||||
# Define quantization config for kv-cache layers, output tensors apply FP8_PER_TENSOR_SPEC.
|
||||
KV_CACHE_SPEC = FP8_PER_TENSOR_SPEC
|
||||
kv_cache_layer_names_for_llama = ["*k_proj", "*v_proj"]
|
||||
kv_cache_quant_config = {name :
|
||||
QuantizationConfig(input_tensors=global_quant_config.input_tensors,
|
||||
weight=global_quant_config.weight,
|
||||
output_tensors=KV_CACHE_SPEC)
|
||||
for name in kv_cache_layer_names_for_llama}
|
||||
layer_quant_config = kv_cache_quant_config.copy()
|
||||
|
||||
EXCLUDE_LAYERS = ["lm_head"]
|
||||
quant_config = Config(
|
||||
global_quant_config=global_quant_config,
|
||||
layer_quant_config=layer_quant_config,
|
||||
kv_cache_quant_config=kv_cache_quant_config,
|
||||
exclude=EXCLUDE_LAYERS)
|
||||
|
||||
#. Quantize the model and export
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import torch
|
||||
from quark.torch import ModelQuantizer, ModelExporter
|
||||
from quark.torch.export import ExporterConfig, JsonExporterConfig
|
||||
|
||||
# Apply quantization.
|
||||
quantizer = ModelQuantizer(quant_config)
|
||||
quant_model = quantizer.quantize_model(model, calib_dataloader)
|
||||
|
||||
# Freeze quantized model to export.
|
||||
freezed_model = quantizer.freeze(model)
|
||||
|
||||
# Define export config.
|
||||
LLAMA_KV_CACHE_GROUP = ["*k_proj", "*v_proj"]
|
||||
export_config = ExporterConfig(json_export_config=JsonExporterConfig())
|
||||
export_config.json_export_config.kv_cache_group = LLAMA_KV_CACHE_GROUP
|
||||
|
||||
EXPORT_DIR = MODEL_ID.split("/")[1] + "-w-fp8-a-fp8-kvcache-fp8-pertensor"
|
||||
exporter = ModelExporter(config=export_config, export_dir=EXPORT_DIR)
|
||||
with torch.no_grad():
|
||||
exporter.export_safetensors_model(freezed_model,
|
||||
quant_config=quant_config, tokenizer=tokenizer)
|
||||
|
||||
Evaluating the quantized model with vLLM
|
||||
----------------------------------------
|
||||
|
||||
The exported Quark-quantized model can be loaded directly by vLLM for inference. You need to specify the model path and inform vLLM about the quantization method (``quantization='quark'``) and the KV cache data type (``kv_cache_dtype='fp8'``).
|
||||
Use the ``LLM`` interface to load the model:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from vllm import LLM, SamplingParamsinterface
|
||||
|
||||
# Sample prompts.
|
||||
prompts = [
|
||||
"Hello, my name is",
|
||||
"The president of the United States is",
|
||||
"The capital of France is",
|
||||
"The future of AI is",
|
||||
]
|
||||
# Create a sampling params object.
|
||||
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||||
|
||||
# Create an LLM.
|
||||
llm = LLM(model="Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor",
|
||||
kv_cache_dtype='fp8',quantization='quark')
|
||||
# Generate texts from the prompts. The output is a list of RequestOutput objects
|
||||
# that contain the prompt, generated text, and other information.
|
||||
outputs = llm.generate(prompts, sampling_params)
|
||||
# Print the outputs.
|
||||
print("\nGenerated Outputs:\n" + "-" * 60)
|
||||
for output in outputs:
|
||||
prompt = output.prompt
|
||||
generated_text = output.outputs[0].text
|
||||
print(f"Prompt: {prompt!r}")
|
||||
print(f"Output: {generated_text!r}")
|
||||
print("-" * 60)
|
||||
|
||||
You can also evaluate the quantized model's accuracy on standard benchmarks using the `lm-evaluation-harness <https://github.com/EleutherAI/lm-evaluation-harness>`_. Pass the necessary vLLM arguments to ``lm_eval`` via ``--model_args``.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
lm_eval --model vllm \
|
||||
--model_args pretrained=Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor,kv_cache_dtype='fp8',quantization='quark' \
|
||||
--tasks gsm8k
|
||||
|
||||
This provides a standardized way to measure the performance impact of quantization.
|
||||
.. _fine-tune-llms-gptq:
|
||||
|
||||
GPTQ
|
||||
@@ -196,7 +33,7 @@ The AutoGPTQ library implements the GPTQ algorithm.
|
||||
.. code-block:: shell
|
||||
|
||||
# This will install pre-built wheel for a specific ROCm version.
|
||||
|
||||
|
||||
pip install auto-gptq --no-build-isolation --extra-index-url https://huggingface.github.io/autogptq-index/whl/rocm573/
|
||||
|
||||
Or, install AutoGPTQ from source for the appropriate ROCm version (for example, ROCm 6.1).
|
||||
@@ -206,10 +43,10 @@ The AutoGPTQ library implements the GPTQ algorithm.
|
||||
# Clone the source code.
|
||||
git clone https://github.com/AutoGPTQ/AutoGPTQ.git
|
||||
cd AutoGPTQ
|
||||
|
||||
|
||||
# Speed up the compilation by specifying PYTORCH_ROCM_ARCH to target device.
|
||||
PYTORCH_ROCM_ARCH=gfx942 ROCM_VERSION=6.1 pip install .
|
||||
|
||||
|
||||
# Show the package after the installation
|
||||
|
||||
#. Run ``pip show auto-gptq`` to print information for the installed ``auto-gptq`` package. Its output should look like
|
||||
@@ -275,7 +112,7 @@ Using GPTQ with Hugging Face Transformers
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig
|
||||
|
||||
|
||||
base_model_name = " NousResearch/Llama-2-7b-hf"
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
||||
gptq_config = GPTQConfig(bits=4, dataset="c4", tokenizer=tokenizer)
|
||||
@@ -375,10 +212,10 @@ To get started with bitsandbytes primitives, use the following code as reference
|
||||
.. code-block:: python
|
||||
|
||||
import bitsandbytes as bnb
|
||||
|
||||
|
||||
# Use Int8 Matrix Multiplication
|
||||
bnb.matmul(..., threshold=6.0)
|
||||
|
||||
|
||||
# Use bitsandbytes 8-bit Optimizers
|
||||
adam = bnb.optim.Adam8bit(model.parameters(), lr=0.001, betas=(0.9, 0.995))
|
||||
|
||||
@@ -390,14 +227,14 @@ To load a Transformers model in 4-bit, set ``load_in_4bit=true`` in ``BitsAndByt
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||
|
||||
|
||||
base_model_name = "NousResearch/Llama-2-7b-hf"
|
||||
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
||||
bnb_model_4bit = AutoModelForCausalLM.from_pretrained(
|
||||
base_model_name,
|
||||
device_map="auto",
|
||||
quantization_config=quantization_config)
|
||||
|
||||
|
||||
# Check the memory footprint with get_memory_footprint method
|
||||
print(bnb_model_4bit.get_memory_footprint())
|
||||
|
||||
@@ -406,9 +243,9 @@ To load a model in 8-bit for inference, use the ``load_in_8bit`` option.
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||
|
||||
|
||||
base_model_name = "NousResearch/Llama-2-7b-hf"
|
||||
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
||||
@@ -416,7 +253,7 @@ To load a model in 8-bit for inference, use the ``load_in_8bit`` option.
|
||||
base_model_name,
|
||||
device_map="auto",
|
||||
quantization_config=quantization_config)
|
||||
|
||||
|
||||
prompt = "What is a large language model?"
|
||||
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
||||
generated_ids = model.generate(**inputs)
|
||||
|
||||
@@ -20,8 +20,6 @@ training, fine-tuning, and inference. It leverages popular machine learning fram
|
||||
|
||||
- :doc:`LLM inference frameworks <llm-inference-frameworks>`
|
||||
|
||||
- :doc:`vLLM inference performance testing <vllm-benchmark>`
|
||||
|
||||
- :doc:`PyTorch inference performance testing <pytorch-inference-benchmark>`
|
||||
- :doc:`Performance testing <vllm-benchmark>`
|
||||
|
||||
- :doc:`Deploying your model <deploy-your-model>`
|
||||
|
||||
@@ -86,19 +86,15 @@ PyTorch inference performance testing
|
||||
|
||||
.. container:: model-doc pyt_chai1_inference
|
||||
|
||||
2. Use the following command to pull the `ROCm PyTorch Docker image <https://hub.docker.com/layers/rocm/pytorch/rocm6.2.3_ubuntu22.04_py3.10_pytorch_release_2.3.0_triton_llvm_reg_issue/images/sha256-b736a4239ab38a9d0e448af6d4adca83b117debed00bfbe33846f99c4540f79b>`_ from Docker Hub.
|
||||
2. Use the following command to pull the `ROCm PyTorch Docker image <https://hub.docker.com/layers/rocm/pytorch/latest/images/sha256-05b55983e5154f46e7441897d0908d79877370adca4d1fff4899d9539d6c4969>`_ from Docker Hub.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker pull rocm/pytorch:rocm6.2.3_ubuntu22.04_py3.10_pytorch_release_2.3.0_triton_llvm_reg_issue
|
||||
|
||||
.. note::
|
||||
|
||||
The Chai-1 benchmark uses a specifically selected Docker image using ROCm 6.2.3 and PyTorch 2.3.0 to address an accuracy issue.
|
||||
|
||||
.. container:: model-doc pyt_clip_inference
|
||||
|
||||
2. Use the following command to pull the `ROCm PyTorch Docker image <https://hub.docker.com/layers/rocm/pytorch/latest/images/sha256-05b55983e5154f46e7441897d0908d79877370adca4d1fff4899d9539d6c4969>`_ from Docker Hub.
|
||||
2. Use the following command to pull the `ROCm PyTorch Docker image <https://hub.docker.com/layers/rocm/pytorch/rocm6.2.3_ubuntu22.04_py3.10_pytorch_release_2.3.0_triton_llvm_reg_issue/images/sha256-b736a4239ab38a9d0e448af6d4adca83b117debed00bfbe33846f99c4540f79b>`_ from Docker Hub.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ vLLM inference performance testing
|
||||
|
||||
.. _vllm-benchmark-available-models:
|
||||
|
||||
Supported models
|
||||
Available models
|
||||
================
|
||||
|
||||
.. raw:: html
|
||||
@@ -183,25 +183,6 @@ vLLM inference performance testing
|
||||
to collect latency and throughput performance data, you can also change the benchmarking
|
||||
parameters. See the standalone benchmarking tab for more information.
|
||||
|
||||
{% if model.tunableop %}
|
||||
|
||||
.. note::
|
||||
|
||||
For improved performance, consider enabling :ref:`PyTorch TunableOp <mi300x-tunableop>`.
|
||||
TunableOp automatically explores different implementations and configurations of certain PyTorch
|
||||
operators to find the fastest one for your hardware.
|
||||
|
||||
By default, ``{{model.mad_tag}}`` runs with TunableOp disabled
|
||||
(see
|
||||
`<https://github.com/ROCm/MAD/blob/develop/models.json>`__). To
|
||||
enable it, edit the default run behavior in the ``models.json``
|
||||
configuration before running inference -- update the model's run
|
||||
``args`` by changing ``--tunableop off`` to ``--tunableop on``.
|
||||
|
||||
Enabling TunableOp triggers a two-pass run -- a warm-up followed by the performance-collection run.
|
||||
|
||||
{% endif %}
|
||||
|
||||
.. tab-item:: Standalone benchmarking
|
||||
|
||||
Run the vLLM benchmark tool independently by starting the
|
||||
@@ -276,7 +257,7 @@ vLLM inference performance testing
|
||||
|
||||
* Latency benchmark
|
||||
|
||||
Use this command to benchmark the latency of the {{model.model}} model on eight GPUs with ``{{model.precision}}`` precision.
|
||||
Use this command to benchmark the latency of the {{model.model}} model on eight GPUs with the ``{{model.precision}}`` data type.
|
||||
|
||||
.. code-block::
|
||||
|
||||
@@ -286,11 +267,11 @@ vLLM inference performance testing
|
||||
|
||||
* Throughput benchmark
|
||||
|
||||
Use this command to benchmark the throughput of the {{model.model}} model on eight GPUs with ``{{model.precision}}`` precision.
|
||||
Use this command to throughput the latency of the {{model.model}} model on eight GPUs with the ``{{model.precision}}`` data type.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
./vllm_benchmark_report.sh -s throughput -m {{model.model_repo}} -g 8 -d {{model.precision}}
|
||||
./vllm_benchmark_report.sh -s latency -m {{model.model_repo}} -g 8 -d {{model.precision}}
|
||||
|
||||
Find the throughput report at ``./reports_{{model.precision}}_vllm_rocm{{unified_docker.rocm_version}}/summary/{{model.model_repo.split('/', 1)[1] if '/' in model.model_repo else model.model_repo}}_throughput_report.csv``.
|
||||
|
||||
@@ -350,18 +331,11 @@ for benchmarking, see the version-specific documentation.
|
||||
- PyTorch version
|
||||
- Resources
|
||||
|
||||
* - 6.3.1
|
||||
- 0.7.3
|
||||
- 2.7.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.3/how-to/rocm-for-ai/inference/vllm-benchmark.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_instinct_vllm0.7.3_20250325/images/sha256-25245924f61750b19be6dcd8e787e46088a496c1fe17ee9b9e397f3d84d35640>`_
|
||||
|
||||
* - 6.3.1
|
||||
- 0.6.6
|
||||
- 2.7.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.2/how-to/rocm-for-ai/inference/vllm-benchmark.html>`_
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.2/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6/images/sha256-9a12ef62bbbeb5a4c30a01f702c8e025061f575aa129f291a49fbd02d6b4d6c9>`_
|
||||
|
||||
* - 6.2.1
|
||||
|
||||
@@ -12,7 +12,7 @@ ROCm is an optimized fork of the upstream
|
||||
`<https://github.com/AI-Hypercomputer/maxtext>`__ enabling efficient AI workloads
|
||||
on AMD MI300X series accelerators.
|
||||
|
||||
The MaxText for ROCm training Docker (``rocm/jax-training:maxtext-v25.5``) image
|
||||
The MaxText for ROCm training Docker (``rocm/jax-training:maxtext-v25.4``) image
|
||||
provides a prebuilt environment for training on AMD Instinct MI300X and MI325X accelerators,
|
||||
including essential components like JAX, XLA, ROCm libraries, and MaxText utilities.
|
||||
It includes the following software components:
|
||||
@@ -20,15 +20,15 @@ It includes the following software components:
|
||||
+--------------------------+--------------------------------+
|
||||
| Software component | Version |
|
||||
+==========================+================================+
|
||||
| ROCm | 6.3.4 |
|
||||
| ROCm | 6.3.0 |
|
||||
+--------------------------+--------------------------------+
|
||||
| JAX | 0.4.35 |
|
||||
| JAX | 0.4.31 |
|
||||
+--------------------------+--------------------------------+
|
||||
| Python | 3.10.12 |
|
||||
| Python | 3.10 |
|
||||
+--------------------------+--------------------------------+
|
||||
| Transformer Engine | 1.12.0.dev0+b8b92dc |
|
||||
| Transformer Engine | 1.12.0.dev0+f81a3eb |
|
||||
+--------------------------+--------------------------------+
|
||||
| hipBLASLt | 0.13.0-ae9c477a |
|
||||
| hipBLASLt | git78ec8622 |
|
||||
+--------------------------+--------------------------------+
|
||||
|
||||
Supported features and models
|
||||
@@ -48,8 +48,6 @@ MaxText provides the following key features to train large language models effic
|
||||
|
||||
The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators.
|
||||
|
||||
* Llama 3.3 70B
|
||||
|
||||
* Llama 3.1 8B
|
||||
|
||||
* Llama 3.1 70B
|
||||
@@ -117,7 +115,7 @@ with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`.
|
||||
|
||||
a. Master address
|
||||
|
||||
Change ``localhost`` to the master node's resolvable hostname or IP address:
|
||||
Change `localhost` to the master node's resolvable hostname or IP address:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -182,15 +180,13 @@ Download the Docker image
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker pull rocm/jax-training:maxtext-v25.5
|
||||
docker pull rocm/jax-training:maxtext-v25.4
|
||||
|
||||
2. Use the following command to launch the Docker container. Note that the benchmarking scripts
|
||||
used in the :ref:`following section <amd-maxtext-get-started>` automatically launch the Docker container
|
||||
and execute the benchmark.
|
||||
2. Run the Docker container.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME/.ssh:/root/.ssh --shm-size 128G --name maxtext_training rocm/jax-training:maxtext-v25.5
|
||||
docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME/.ssh:/root/.ssh --shm-size 128G --name maxtext_training rocm/jax-training:maxtext-v25.4
|
||||
|
||||
.. _amd-maxtext-get-started:
|
||||
|
||||
@@ -223,9 +219,7 @@ Single node training benchmarking examples
|
||||
|
||||
Run the single node training benchmark:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_7b.sh
|
||||
IMAGE="rocm/jax-training:maxtext-v25.4" bash ./llama2_7b.sh
|
||||
|
||||
* Example 2: Single node training with Llama 2 70B
|
||||
|
||||
@@ -239,7 +233,7 @@ Single node training benchmarking examples
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_70b.sh
|
||||
IMAGE="rocm/jax-training:maxtext-v25.4" bash ./llama2_70b.sh
|
||||
|
||||
* Example 3: Single node training with Llama 3 8B
|
||||
|
||||
@@ -253,7 +247,7 @@ Single node training benchmarking examples
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_8b.sh
|
||||
IMAGE="rocm/jax-training:maxtext-v25.4" bash ./llama3_8b.sh
|
||||
|
||||
* Example 4: Single node training with Llama 3 70B
|
||||
|
||||
@@ -267,23 +261,9 @@ Single node training benchmarking examples
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_70b.sh
|
||||
IMAGE="rocm/jax-training:maxtext-v25.4" bash ./llama3_70b.sh
|
||||
|
||||
* Example 5: Single node training with Llama 3.3 70B
|
||||
|
||||
Download the benchmarking script:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3.3_70b.sh
|
||||
|
||||
Run the single node training benchmark:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3.3_70b.sh
|
||||
|
||||
* Example 6: Single node training with DeepSeek V2 16B
|
||||
* Example 5: Single node training with DeepSeek V2 16B
|
||||
|
||||
Download the benchmarking script:
|
||||
|
||||
@@ -295,7 +275,7 @@ Single node training benchmarking examples
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
IMAGE="rocm/jax-training:maxtext-v25.5" bash ./deepseek_v2_16b.sh
|
||||
IMAGE="rocm/jax-training:maxtext-v25.4" bash ./deepseek_v2_16b.sh
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -363,26 +343,3 @@ own cluster setup.
|
||||
.. code-block:: shell
|
||||
|
||||
sbatch -N <num_nodes> llama3_70b_multinode.sh
|
||||
|
||||
Previous versions
|
||||
=================
|
||||
|
||||
This table lists previous versions of the ROCm JAX MaxText Docker image for training
|
||||
performance testing. For detailed information about available models for
|
||||
benchmarking, see the version-specific documentation.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:stub-columns: 1
|
||||
|
||||
* - Image version
|
||||
- ROCm version
|
||||
- JAX version
|
||||
- Resources
|
||||
|
||||
* - 25.4
|
||||
- 6.3.0
|
||||
- 0.4.31
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.3/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/jax-training/maxtext-v25.4/images/sha256-fb3eb71cd74298a7b3044b7130cf84113f14d518ff05a2cd625c11ea5f6a7b01>`_
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
.. meta::
|
||||
:description: How to train a model using LLM Foundry for ROCm.
|
||||
:keywords: ROCm, AI, LLM, train, PyTorch, torch, Llama, flux, tutorial, docker
|
||||
|
||||
******************************************
|
||||
Training MPT-30B with LLM Foundry and ROCm
|
||||
******************************************
|
||||
|
||||
MPT-30B is a 30-billion parameter decoder-style transformer-based model from
|
||||
the Mosaic Pretrained Transformer (MPT) family -- learn more about it in
|
||||
MosaicML's research blog `MPT-30B: Raising the bar for open-source foundation
|
||||
models <https://www.databricks.com/blog/mpt-30b>`_.
|
||||
|
||||
ROCm and `<https://github.com/ROCm/MAD>`__ provide a pre-configured training
|
||||
environment for the MPT-30B model using the ``rocm/pytorch-training:v25.5``
|
||||
base `Docker image <https://hub.docker.com/layers/rocm/pytorch-training/v25.5/images/sha256-d47850a9b25b4a7151f796a8d24d55ea17bba545573f0d50d54d3852f96ecde5>`_
|
||||
and the `LLM Foundry <https://github.com/mosaicml/llm-foundry>`_ framework.
|
||||
This environment packages the following software components to train
|
||||
on AMD Instinct MI300X series accelerators:
|
||||
|
||||
+--------------------------+--------------------------------+
|
||||
| Software component | Version |
|
||||
+==========================+================================+
|
||||
| ROCm | 6.3.4 |
|
||||
+--------------------------+--------------------------------+
|
||||
| PyTorch | 2.7.0a0+git6374332 |
|
||||
+--------------------------+--------------------------------+
|
||||
| Flash Attention | 3.0.0.post1 |
|
||||
+--------------------------+--------------------------------+
|
||||
|
||||
Using this image, you can build, run, and test the training process
|
||||
for MPT-30B with access to detailed logs and performance metrics.
|
||||
|
||||
System validation
|
||||
=================
|
||||
|
||||
If you have already validated your system settings, including NUMA
|
||||
auto-balancing, skip this step. Otherwise, complete the :ref:`system validation
|
||||
and optimization steps <train-a-model-system-validation>` to set up your system
|
||||
before starting training.
|
||||
|
||||
Getting started
|
||||
===============
|
||||
|
||||
The following procedures help you set up the training environment in a
|
||||
reproducible Docker container. This training environment is tailored for
|
||||
training MPT-30B using LLM Foundry and the specific model configurations outlined.
|
||||
Other configurations and run conditions outside those described in this
|
||||
document are not validated.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: MAD-integrated benchmarking
|
||||
|
||||
On your host machine, clone the ROCm Model Automation and Dashboarding
|
||||
(`<https://github.com/ROCm/MAD>`__) repository to a local directory and
|
||||
install the required packages.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://github.com/ROCm/MAD
|
||||
cd MAD
|
||||
pip install -r requirements.txt
|
||||
|
||||
Use this command to initiate the MPT-30B training benchmark.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
python3 tools/run_models.py --tags pyt_mpt30b_training --keep-model-dir --live-output --clean-docker-cache
|
||||
|
||||
.. tip::
|
||||
|
||||
If you experience data download failures, set the
|
||||
``MAD_SECRETS_HFTOKEN`` variable to your Hugging Face access token. See
|
||||
`User access tokens <https://huggingface.co/docs/hub/security-tokens>`_
|
||||
for details.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models"
|
||||
|
||||
.. note::
|
||||
|
||||
For improved performance (training throughput), consider enabling TunableOp.
|
||||
By default, ``pyt_mpt30b_training`` runs with TunableOp disabled. To enable it,
|
||||
run ``tools/run_models.py`` with the ``--tunableop on`` argument or edit the
|
||||
``models.json`` configuration before running training.
|
||||
|
||||
Although this might increase the initial training time, it can result in a performance gain.
|
||||
|
||||
.. tab-item:: Standalone benchmarking
|
||||
|
||||
To set up the training environment, clone the
|
||||
`<https://github.com/ROCm/MAD>`__ repo and build the Docker image. In
|
||||
this snippet, the image is named ``mosaic_mpt30_image``.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://github.com/ROCm/MAD
|
||||
cd MAD
|
||||
|
||||
docker build --build-arg MAD_SYSTEM_GPU_ARCHITECTURE=gfx942 -f docker/pyt_mpt30b_training.ubuntu.amd.Dockerfile -t mosaic_mpt30_image .
|
||||
|
||||
Start a ``mosaic_mpt30_image`` container using the following command.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker run -it --device=/dev/kfd --device=/dev/dri --group-add=video --ipc=host --shm-size=8G mosaic_mpt30_image
|
||||
|
||||
In the Docker container, clone the `<https://github.com/ROCm/MAD>`__
|
||||
repository and navigate to the benchmark scripts directory at
|
||||
``/workspace/MAD/scripts/pyt_mpt30b_training``.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://github.com/ROCm/MAD
|
||||
cd MAD/scripts/pyt_mpt30b_training
|
||||
|
||||
To initiate the training process, use the following command. This script uses the hyperparameters defined in
|
||||
``mpt-30b-instruct.yaml``.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
source run.sh
|
||||
|
||||
.. note::
|
||||
|
||||
For improved performance (training throughput), consider enabling TunableOp.
|
||||
To enable it, add the ``--tunableop on`` flag.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
source run.sh --tunableop on
|
||||
|
||||
Although this might increase the initial training time, it can result in a performance gain.
|
||||
|
||||
Interpreting the output
|
||||
=======================
|
||||
|
||||
The training output will be displayed in the terminal and simultaneously saved
|
||||
to the ``output.txt`` file in the current directory. Key performance metrics will
|
||||
also be extracted and appended to the ``perf_pyt_mpt30b_training.csv`` file.
|
||||
|
||||
Key performance metrics include:
|
||||
|
||||
- Training logs: Real-time display of loss metrics, accuracy, and training progress.
|
||||
|
||||
- Model checkpoints: Periodically saved model snapshots for potential resume or evaluation.
|
||||
|
||||
- Performance metrics: Detailed summaries of training speed and training loss metrics.
|
||||
|
||||
- Performance (throughput/samples_per_sec)
|
||||
|
||||
Overall throughput, measuring the total samples processed per second. Higher values indicate better hardware utilization.
|
||||
|
||||
- Performance per device (throughput/samples_per_sec)
|
||||
|
||||
Throughput on a per-device basis, showing how each GPU or CPU is performing.
|
||||
|
||||
- Language Cross Entropy (metrics/train/LanguageCrossEntropy)
|
||||
|
||||
Measures prediction accuracy. Lower cross entropy suggests the model’s output is closer to the expected distribution.
|
||||
|
||||
- Training loss (loss/train/total)
|
||||
|
||||
Overall training loss. A decreasing trend indicates the model is learning effectively.
|
||||
|
||||
|
||||
@@ -443,7 +443,7 @@ benchmarking, see the version-specific documentation.
|
||||
- 6.3.0
|
||||
- 2.7.0a0+git637433
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.3/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.html>`_
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.4/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/pytorch-training/v25.4/images/sha256-fa98a9aa69968e654466c06f05aaa12730db79b48b113c1ab4f7a5fe6920a20b>`_
|
||||
|
||||
* - v25.3
|
||||
|
||||
@@ -45,7 +45,6 @@
|
||||
(communication-libraries)=
|
||||
|
||||
* {doc}`RCCL <rccl:index>`
|
||||
* [rocSHMEM](https://github.com/ROCm/rocSHMEM)
|
||||
:::
|
||||
|
||||
:::{grid-item-card} Math
|
||||
|
||||
@@ -46,8 +46,6 @@ subtrees:
|
||||
title: Train a model with PyTorch
|
||||
- file: how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext
|
||||
title: Train a model with JAX MaxText
|
||||
- file: how-to/rocm-for-ai/training/benchmark-docker/mpt-llm-foundry
|
||||
title: Train a model with LLM Foundry
|
||||
- file: how-to/rocm-for-ai/training/scale-model-training.rst
|
||||
title: Scale model training
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ ROCm is a software stack, composed primarily of open-source software, that
|
||||
provides the tools for programming AMD Graphics Processing Units (GPUs), from
|
||||
low-level kernels to high-level end-user applications.
|
||||
|
||||
.. image:: data/rocm-software-stack-6_4_0.jpg
|
||||
.. image:: data/rocm-software-stack-6_3_2.jpg
|
||||
:width: 800
|
||||
:alt: AMD's ROCm software stack and enabling technologies.
|
||||
:align: center
|
||||
@@ -52,7 +52,6 @@ Communication
|
||||
:header: "Component", "Description"
|
||||
|
||||
":doc:`RCCL <rccl:index>`", "Standalone library that provides multi-GPU and multi-node collective communication primitives"
|
||||
"`rocSHMEM <https://github.com/ROCm/rocSHMEM>`_", "Runtime that provides GPU-centric networking through an OpenSHMEM-like interface. This intra-kernel networking library simplifies application code complexity and enables more fine-grained communication/computation overlap than traditional host-driven networking"
|
||||
|
||||
Math
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
1
submodule-srcs/AMDMIGraphX
Submodule
1
submodule-srcs/AMDMIGraphX
Submodule
Submodule submodule-srcs/AMDMIGraphX added at e8b735f65e
1
submodule-srcs/HIP
Submodule
1
submodule-srcs/HIP
Submodule
Submodule submodule-srcs/HIP added at a0a2dd7fc8
1
submodule-srcs/HIPIFY
Submodule
1
submodule-srcs/HIPIFY
Submodule
Submodule submodule-srcs/HIPIFY added at b803a5270b
1
submodule-srcs/MIOpen
Submodule
1
submodule-srcs/MIOpen
Submodule
Submodule submodule-srcs/MIOpen added at a583315f3e
1
submodule-srcs/MIVisionX
Submodule
1
submodule-srcs/MIVisionX
Submodule
Submodule submodule-srcs/MIVisionX added at a2b69e5b30
1
submodule-srcs/ROCK-Kernel-Driver
Submodule
1
submodule-srcs/ROCK-Kernel-Driver
Submodule
Submodule submodule-srcs/ROCK-Kernel-Driver added at e796ccd5f9
1
submodule-srcs/ROCR-Runtime
Submodule
1
submodule-srcs/ROCR-Runtime
Submodule
Submodule submodule-srcs/ROCR-Runtime added at 4264d016ec
1
submodule-srcs/ROCdbgapi
Submodule
1
submodule-srcs/ROCdbgapi
Submodule
Submodule submodule-srcs/ROCdbgapi added at 59be7ff0aa
1
submodule-srcs/ROCgdb
Submodule
1
submodule-srcs/ROCgdb
Submodule
Submodule submodule-srcs/ROCgdb added at 401bb21f2f
1
submodule-srcs/ROCmValidationSuite
Submodule
1
submodule-srcs/ROCmValidationSuite
Submodule
Submodule submodule-srcs/ROCmValidationSuite added at fb251886ed
1
submodule-srcs/Tensile
Submodule
1
submodule-srcs/Tensile
Submodule
Submodule submodule-srcs/Tensile added at be49885fce
1
submodule-srcs/TransferBench
Submodule
1
submodule-srcs/TransferBench
Submodule
Submodule submodule-srcs/TransferBench added at 3ea2f226ec
1
submodule-srcs/amdsmi
Submodule
1
submodule-srcs/amdsmi
Submodule
Submodule submodule-srcs/amdsmi added at ede62f2534
1
submodule-srcs/clr
Submodule
1
submodule-srcs/clr
Submodule
Submodule submodule-srcs/clr added at 0f2d602424
1
submodule-srcs/composable_kernel
Submodule
1
submodule-srcs/composable_kernel
Submodule
Submodule submodule-srcs/composable_kernel added at a8c5bd9b9a
1
submodule-srcs/half
Submodule
1
submodule-srcs/half
Submodule
Submodule submodule-srcs/half added at 1ddada2251
1
submodule-srcs/hip-tests
Submodule
1
submodule-srcs/hip-tests
Submodule
Submodule submodule-srcs/hip-tests added at 3573bde0c2
1
submodule-srcs/hipBLAS
Submodule
1
submodule-srcs/hipBLAS
Submodule
Submodule submodule-srcs/hipBLAS added at 0a335435e9
1
submodule-srcs/hipBLAS-common
Submodule
1
submodule-srcs/hipBLAS-common
Submodule
Submodule submodule-srcs/hipBLAS-common added at 7c1566ba46
1
submodule-srcs/hipBLASLt
Submodule
1
submodule-srcs/hipBLASLt
Submodule
Submodule submodule-srcs/hipBLASLt added at a999b0721d
1
submodule-srcs/hipCUB
Submodule
1
submodule-srcs/hipCUB
Submodule
Submodule submodule-srcs/hipCUB added at a6005943c5
1
submodule-srcs/hipFFT
Submodule
1
submodule-srcs/hipFFT
Submodule
Submodule submodule-srcs/hipFFT added at 396169c84a
1
submodule-srcs/hipRAND
Submodule
1
submodule-srcs/hipRAND
Submodule
Submodule submodule-srcs/hipRAND added at d2516cc199
1
submodule-srcs/hipSOLVER
Submodule
1
submodule-srcs/hipSOLVER
Submodule
Submodule submodule-srcs/hipSOLVER added at ca0de3c9c9
1
submodule-srcs/hipSPARSE
Submodule
1
submodule-srcs/hipSPARSE
Submodule
Submodule submodule-srcs/hipSPARSE added at a6c62e48eb
1
submodule-srcs/hipSPARSELt
Submodule
1
submodule-srcs/hipSPARSELt
Submodule
Submodule submodule-srcs/hipSPARSELt added at f3f4f590a4
1
submodule-srcs/hipTensor
Submodule
1
submodule-srcs/hipTensor
Submodule
Submodule submodule-srcs/hipTensor added at e5529b9291
1
submodule-srcs/hipfort
Submodule
1
submodule-srcs/hipfort
Submodule
Submodule submodule-srcs/hipfort added at f3d6aa3e86
1
submodule-srcs/hipother
Submodule
1
submodule-srcs/hipother
Submodule
Submodule submodule-srcs/hipother added at 49b1588f83
1
submodule-srcs/llvm-project
Submodule
1
submodule-srcs/llvm-project
Submodule
Submodule submodule-srcs/llvm-project added at c7fe45cf4b
1
submodule-srcs/openmp-extras/aomp
Submodule
1
submodule-srcs/openmp-extras/aomp
Submodule
Submodule submodule-srcs/openmp-extras/aomp added at 1cd9ec1017
1
submodule-srcs/openmp-extras/aomp-extras
Submodule
1
submodule-srcs/openmp-extras/aomp-extras
Submodule
Submodule submodule-srcs/openmp-extras/aomp-extras added at 97567952ae
1
submodule-srcs/rccl
Submodule
1
submodule-srcs/rccl
Submodule
Submodule submodule-srcs/rccl added at 7b86f83d84
1
submodule-srcs/rdc
Submodule
1
submodule-srcs/rdc
Submodule
Submodule submodule-srcs/rdc added at be34d624f6
1
submodule-srcs/rocAL
Submodule
1
submodule-srcs/rocAL
Submodule
Submodule submodule-srcs/rocAL added at 373ef865ac
1
submodule-srcs/rocALUTION
Submodule
1
submodule-srcs/rocALUTION
Submodule
Submodule submodule-srcs/rocALUTION added at 9713084af8
1
submodule-srcs/rocBLAS
Submodule
1
submodule-srcs/rocBLAS
Submodule
Submodule submodule-srcs/rocBLAS added at 80e5394d6a
1
submodule-srcs/rocDecode
Submodule
1
submodule-srcs/rocDecode
Submodule
Submodule submodule-srcs/rocDecode added at a2a7b63cad
1
submodule-srcs/rocFFT
Submodule
1
submodule-srcs/rocFFT
Submodule
Submodule submodule-srcs/rocFFT added at 058ba87fdc
1
submodule-srcs/rocJPEG
Submodule
1
submodule-srcs/rocJPEG
Submodule
Submodule submodule-srcs/rocJPEG added at 73d36d35d9
1
submodule-srcs/rocPRIM
Submodule
1
submodule-srcs/rocPRIM
Submodule
Submodule submodule-srcs/rocPRIM added at d8771ec18a
1
submodule-srcs/rocPyDecode
Submodule
1
submodule-srcs/rocPyDecode
Submodule
Submodule submodule-srcs/rocPyDecode added at 848e49d29d
1
submodule-srcs/rocRAND
Submodule
1
submodule-srcs/rocRAND
Submodule
Submodule submodule-srcs/rocRAND added at 4d5d3a88d1
1
submodule-srcs/rocSHMEM
Submodule
1
submodule-srcs/rocSHMEM
Submodule
Submodule submodule-srcs/rocSHMEM added at 7702b3c0f3
1
submodule-srcs/rocSOLVER
Submodule
1
submodule-srcs/rocSOLVER
Submodule
Submodule submodule-srcs/rocSOLVER added at db754e3f55
1
submodule-srcs/rocSPARSE
Submodule
1
submodule-srcs/rocSPARSE
Submodule
Submodule submodule-srcs/rocSPARSE added at 4953add0ae
1
submodule-srcs/rocThrust
Submodule
1
submodule-srcs/rocThrust
Submodule
Submodule submodule-srcs/rocThrust added at 6bf2777019
1
submodule-srcs/rocWMMA
Submodule
1
submodule-srcs/rocWMMA
Submodule
Submodule submodule-srcs/rocWMMA added at 1a5b623166
1
submodule-srcs/rocm-cmake
Submodule
1
submodule-srcs/rocm-cmake
Submodule
Submodule submodule-srcs/rocm-cmake added at ecc716b97c
1
submodule-srcs/rocm-core
Submodule
1
submodule-srcs/rocm-core
Submodule
Submodule submodule-srcs/rocm-core added at 73dae9c82a
1
submodule-srcs/rocm-examples
Submodule
1
submodule-srcs/rocm-examples
Submodule
Submodule submodule-srcs/rocm-examples added at 3bbd2987a3
1
submodule-srcs/rocm_bandwidth_test
Submodule
1
submodule-srcs/rocm_bandwidth_test
Submodule
Submodule submodule-srcs/rocm_bandwidth_test added at 84b8ddd268
1
submodule-srcs/rocm_smi_lib
Submodule
1
submodule-srcs/rocm_smi_lib
Submodule
Submodule submodule-srcs/rocm_smi_lib added at 03a4530b68
1
submodule-srcs/rocminfo
Submodule
1
submodule-srcs/rocminfo
Submodule
Submodule submodule-srcs/rocminfo added at 6ea2ba38c8
1
submodule-srcs/rocprofiler
Submodule
1
submodule-srcs/rocprofiler
Submodule
Submodule submodule-srcs/rocprofiler added at 40da7312a0
1
submodule-srcs/rocprofiler-compute
Submodule
1
submodule-srcs/rocprofiler-compute
Submodule
Submodule submodule-srcs/rocprofiler-compute added at a11d700e10
1
submodule-srcs/rocprofiler-register
Submodule
1
submodule-srcs/rocprofiler-register
Submodule
Submodule submodule-srcs/rocprofiler-register added at 7c6cd44f63
1
submodule-srcs/rocprofiler-sdk
Submodule
1
submodule-srcs/rocprofiler-sdk
Submodule
Submodule submodule-srcs/rocprofiler-sdk added at e8e49fe769
1
submodule-srcs/rocprofiler-systems
Submodule
1
submodule-srcs/rocprofiler-systems
Submodule
Submodule submodule-srcs/rocprofiler-systems added at 9c07bf3ab0
1
submodule-srcs/rocr_debug_agent
Submodule
1
submodule-srcs/rocr_debug_agent
Submodule
Submodule submodule-srcs/rocr_debug_agent added at 5c49ec91fd
1
submodule-srcs/roctracer
Submodule
1
submodule-srcs/roctracer
Submodule
Submodule submodule-srcs/roctracer added at f55a694381
1
submodule-srcs/rpp
Submodule
1
submodule-srcs/rpp
Submodule
Submodule submodule-srcs/rpp added at 5fb204ca70
1
submodule-srcs/spirv-llvm-translator
Submodule
1
submodule-srcs/spirv-llvm-translator
Submodule
Submodule submodule-srcs/spirv-llvm-translator added at 8ed662a93b
Reference in New Issue
Block a user