mirror of
https://github.com/ROCm/ROCm.git
synced 2026-01-10 15:18:11 -05:00
Compare commits
22 Commits
docs_6.3.3
...
docs_versi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd85ccd539 | ||
|
|
de4ac7a5a3 | ||
|
|
fa0e212906 | ||
|
|
84001e176e | ||
|
|
9cd2706fdb | ||
|
|
13be0b6a51 | ||
|
|
efefa0f43e | ||
|
|
4d15adf284 | ||
|
|
1fb42c2591 | ||
|
|
e984954088 | ||
|
|
cd57bc8186 | ||
|
|
d7d3d02cd0 | ||
|
|
dd7164cada | ||
|
|
bf3a437cd5 | ||
|
|
4be8096109 | ||
|
|
934767322b | ||
|
|
1ea1c5c6e0 | ||
|
|
389fa7071b | ||
|
|
91e0cf5ecd | ||
|
|
1de89ef590 | ||
|
|
27cb8ea927 | ||
|
|
0c6f660d59 |
@@ -84,6 +84,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -67,6 +67,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -77,6 +77,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -67,6 +67,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -87,7 +87,6 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
|
||||
- job: Tensile_testing
|
||||
timeoutInMinutes: 90
|
||||
|
||||
@@ -42,6 +42,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -48,6 +48,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -52,6 +52,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -63,6 +63,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -72,6 +72,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
@@ -156,6 +158,7 @@ jobs:
|
||||
- deps
|
||||
|
||||
- job: hipBLASLt_testing
|
||||
timeoutInMinutes: 120
|
||||
dependsOn: hipBLASLt
|
||||
condition: and(succeeded(), eq(variables.ENABLE_GFX942_TESTS, 'true'), not(containsValue(split(variables.DISABLED_GFX942_TESTS, ','), variables['Build.DefinitionName'])))
|
||||
variables:
|
||||
|
||||
@@ -43,6 +43,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -54,6 +54,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -45,6 +45,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -57,6 +57,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -52,6 +52,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -105,6 +105,7 @@ jobs:
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_Fortran_COMPILER=f95
|
||||
-DAMDGPU_TARGETS=$(JOB_GPU_TARGET)
|
||||
-DTensile_LOGIC=
|
||||
-DTensile_CPU_THREADS=
|
||||
|
||||
@@ -42,6 +42,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -51,6 +51,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
value: '$(Build.BinariesDirectory)/amdgcn/bitcode'
|
||||
- name: HIP_PATH
|
||||
value: '$(Agent.BuildDirectory)/rocm'
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
pool: ${{ variables.ULTRA_BUILD_POOL }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH="$(Build.BinariesDirectory)/llvm;$(Build.BinariesDirectory)"
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DLLVM_ENABLE_PROJECTS=clang;lld;clang-tools-extra;mlir
|
||||
-DLLVM_ENABLE_PROJECTS=clang;lld;clang-tools-extra;mlir;flang
|
||||
-DLLVM_ENABLE_RUNTIMES=compiler-rt;libunwind;libcxx;libcxxabi
|
||||
-DCLANG_ENABLE_AMDCLANG=ON
|
||||
-DLLVM_TARGETS_TO_BUILD=AMDGPU;X86
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
componentName: check-llvm
|
||||
testDir: 'llvm/build'
|
||||
testExecutable: './bin/llvm-lit'
|
||||
testParameters: '-q --xunit-xml-output=llvm_test_output.xml ./test'
|
||||
testParameters: '-q --xunit-xml-output=llvm_test_output.xml --filter-out="live-debug-values-spill-tracking" ./test'
|
||||
testOutputFile: llvm_test_output.xml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
|
||||
parameters:
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
# largely referenced from: https://github.com/ROCm/omnitrace/blob/main/.github/workflows/ubuntu-jammy.yml
|
||||
parameters:
|
||||
- name: checkoutRepo
|
||||
type: string
|
||||
default: 'self'
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: ''
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- autoconf
|
||||
- autotools-dev
|
||||
- bison
|
||||
- build-essential
|
||||
- bzip2
|
||||
- clang
|
||||
- cmake
|
||||
- environment-modules
|
||||
- g++-12
|
||||
- libdrm-dev
|
||||
- libfabric-dev
|
||||
- libiberty-dev
|
||||
- libpapi-dev
|
||||
- libpfm4-dev
|
||||
- libtool
|
||||
- libopenmpi-dev
|
||||
- m4
|
||||
- openmpi-bin
|
||||
- software-properties-common
|
||||
- python3-pip
|
||||
- texinfo
|
||||
- zlib1g-dev
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
- numpy
|
||||
- perfetto
|
||||
- dataclasses
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- aomp
|
||||
- clr
|
||||
- llvm-project
|
||||
- rccl
|
||||
- rocm-core
|
||||
- rocm_smi_lib
|
||||
- rocminfo
|
||||
- ROCR-Runtime
|
||||
- rocprofiler
|
||||
- rocprofiler-register
|
||||
- roctracer
|
||||
|
||||
jobs:
|
||||
- job: omnitrace
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
workspace:
|
||||
clean: all
|
||||
strategy:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
checkoutRef: ${{ parameters.checkoutRef }}
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
- task: Bash@3
|
||||
displayName: ROCm symbolic link
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo rm -rf /opt/rocm
|
||||
sudo ln -s $(Agent.BuildDirectory)/rocm /opt/rocm
|
||||
- task: Bash@3
|
||||
displayName: Add ROCm binaries to PATH
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin"
|
||||
- task: Bash@3
|
||||
displayName: Add ROCm compilers to PATH
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin"
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
# build flags reference: https://rocm.docs.amd.com/projects/omnitrace/en/latest/install/install.html
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DOMNITRACE_BUILD_TESTING=ON
|
||||
-DOMNITRACE_BUILD_DYNINST=ON
|
||||
-DOMNITRACE_BUILD_LIBUNWIND=ON
|
||||
-DDYNINST_BUILD_TBB=ON
|
||||
-DDYNINST_BUILD_ELFUTILS=ON
|
||||
-DDYNINST_BUILD_LIBIBERTY=ON
|
||||
-DDYNINST_BUILD_BOOST=ON
|
||||
-DOMNITRACE_USE_PAPI=ON
|
||||
-DOMNITRACE_USE_MPI=ON
|
||||
-DAMDGPU_TARGETS=$(JOB_GPU_TARGET)
|
||||
multithreadFlag: -- -j32
|
||||
- task: Bash@3
|
||||
displayName: Set up omnitrace env
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: source share/omnitrace/setup-env.sh
|
||||
workingDirectory: build
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
|
||||
parameters:
|
||||
componentName: omnitrace
|
||||
- task: Bash@3
|
||||
displayName: Remove ROCm binaries from PATH
|
||||
condition: always()
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: echo "##vso[task.setvariable variable=PATH]$(echo $PATH | sed -e 's;:$(Agent.BuildDirectory)/rocm/bin;;' -e 's;^/;;' -e 's;/$;;')"
|
||||
- task: Bash@3
|
||||
displayName: Remove ROCm compilers from PATH
|
||||
condition: always()
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: echo "##vso[task.setvariable variable=PATH]$(echo $PATH | sed -e 's;:$(Agent.BuildDirectory)/rocm/llvm/bin;;' -e 's;^/;;' -e 's;/$;;')"
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
parameters:
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
parameters:
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
@@ -64,6 +64,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -65,6 +65,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -73,6 +73,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- task: Bash@3
|
||||
displayName: 'Register libjpeg-turbo packages'
|
||||
|
||||
@@ -60,6 +60,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -75,6 +75,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -55,6 +55,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -47,6 +47,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -42,6 +42,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -48,6 +48,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -45,6 +45,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -58,6 +58,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -56,6 +56,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -47,6 +47,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -57,6 +57,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -8,7 +8,6 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- cmake
|
||||
- doxygen
|
||||
- doxygen-doc
|
||||
- ninja-build
|
||||
@@ -18,7 +17,9 @@ parameters:
|
||||
type: object
|
||||
default:
|
||||
- cget
|
||||
- cmake==3.20.5
|
||||
- ninja
|
||||
- rocm-docs-core
|
||||
|
||||
jobs:
|
||||
- job: rocm_cmake
|
||||
@@ -33,21 +34,29 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
- task: Bash@3
|
||||
displayName: Add CMake to PATH
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: echo "##vso[task.prependpath]$(python3 -m site --user-base)/bin"
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
# extra steps for ctest suite
|
||||
- script: |
|
||||
python -m pip install -r $(Build.SourcesDirectory)/docs/requirements.txt
|
||||
python -m pip install -r $(Build.SourcesDirectory)/test/docsphinx/docs/.sphinx/requirements.txt
|
||||
git config --global user.email "you@example.com"
|
||||
git config --global user.name "Your Name"
|
||||
displayName: "ctest setup"
|
||||
- task: Bash@3
|
||||
displayName: CTest setup
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
python -m pip install -r $(Build.SourcesDirectory)/docs/requirements.txt
|
||||
python -m pip install -r $(Build.SourcesDirectory)/test/docsphinx/docs/.sphinx/requirements.txt
|
||||
git config --global user.email "you@example.com"
|
||||
git config --global user.name "Your Name"
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
|
||||
parameters:
|
||||
componentName: rocm-cmake
|
||||
testParameters: '-E "pass-version-parent" -VV --output-on-failure --force-new-ctest-process --output-junit test_output.xml'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml
|
||||
@@ -56,4 +65,3 @@ jobs:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
environment: combined
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
|
||||
@@ -75,6 +75,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -10,6 +10,7 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- libdrm-dev
|
||||
- pkg-config
|
||||
- python3-pip
|
||||
|
||||
jobs:
|
||||
@@ -39,7 +40,6 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
|
||||
- job: rocm_smi_lib_testing
|
||||
dependsOn: rocm_smi_lib
|
||||
|
||||
@@ -59,6 +59,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -57,6 +57,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -72,6 +72,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -57,6 +57,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
@@ -69,7 +69,6 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
|
||||
- job: rocr_debug_agent_testing
|
||||
dependsOn: rocr_debug_agent
|
||||
|
||||
@@ -11,6 +11,7 @@ parameters:
|
||||
- cmake
|
||||
- doxygen
|
||||
- graphviz
|
||||
- libdrm-amdgpu-dev
|
||||
- ninja-build
|
||||
- python3-pip
|
||||
- name: pipModules
|
||||
@@ -49,11 +50,14 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
@@ -85,6 +89,7 @@ jobs:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
registerROCmPackages: true
|
||||
|
||||
- job: roctracer_testing
|
||||
dependsOn: roctracer
|
||||
@@ -104,6 +109,8 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml
|
||||
parameters:
|
||||
@@ -128,3 +135,4 @@ jobs:
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
environment: test
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
registerROCmPackages: true
|
||||
|
||||
@@ -57,6 +57,8 @@ jobs:
|
||||
matrix:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
gfx90a:
|
||||
JOB_GPU_TARGET: gfx90a
|
||||
steps:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
|
||||
29
.azuredevops/tag-builds/TransferBench.yml
Normal file
29
.azuredevops/tag-builds/TransferBench.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
|
||||
parameters:
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: refs/tags/$(LATEST_RELEASE_TAG)
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: pipelines_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/ROCm
|
||||
- repository: release_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/TransferBench
|
||||
ref: ${{ parameters.checkoutRef }}
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
|
||||
jobs:
|
||||
- template: ${{ variables.CI_COMPONENT_PATH }}/TransferBench.yml
|
||||
parameters:
|
||||
checkoutRepo: release_repo
|
||||
checkoutRef: ${{ parameters.checkoutRef }}
|
||||
@@ -222,13 +222,13 @@ parameters:
|
||||
hasGpuTarget: false
|
||||
rocm-examples:
|
||||
pipelineId: $(ROCM_EXAMPLES_PIPELINE_ID)
|
||||
stagingBranch: develop
|
||||
mainlineBranch: develop
|
||||
stagingBranch: amd-staging
|
||||
mainlineBranch: amd-mainline
|
||||
hasGpuTarget: true
|
||||
rocminfo:
|
||||
pipelineId: $(ROCMINFO_PIPELINE_ID)
|
||||
stagingBranch: amd-staging
|
||||
mainlineBranch: amd-master
|
||||
mainlineBranch: amd-mainline
|
||||
hasGpuTarget: false
|
||||
rocMLIR:
|
||||
pipelineId: $(ROCMLIR_PIPELINE_ID)
|
||||
@@ -262,7 +262,7 @@ parameters:
|
||||
hasGpuTarget: true
|
||||
rocprofiler-compute:
|
||||
pipelineId: $(ROCPROFILER_COMPUTE_PIPELINE_ID)
|
||||
stagingBranch: amd-staging
|
||||
stagingBranch: develop
|
||||
mainlineBranch: amd-mainline
|
||||
hasGpuTarget: true
|
||||
rocprofiler-register:
|
||||
|
||||
@@ -33,7 +33,6 @@ parameters:
|
||||
- aomp
|
||||
- HIPIFY
|
||||
- MIVisionX
|
||||
- rocm-cmake
|
||||
- rocm_smi_lib
|
||||
- rocprofiler-sdk
|
||||
- roctracer
|
||||
|
||||
@@ -28,13 +28,13 @@ variables:
|
||||
- name: GFX942_TEST_POOL
|
||||
value: gfx942_test_pool
|
||||
- name: LATEST_RELEASE_VERSION
|
||||
value: 6.3.2
|
||||
value: 6.3.3
|
||||
- name: REPO_RADEON_VERSION
|
||||
value: 6.3.2
|
||||
value: 6.3.3
|
||||
- name: NEXT_RELEASE_VERSION
|
||||
value: 6.4.0
|
||||
- name: LATEST_RELEASE_TAG
|
||||
value: rocm-6.3.2
|
||||
value: rocm-6.3.3
|
||||
- name: AMDMIGRAPHX_GFX942_TEST_PIPELINE_ID
|
||||
value: 197
|
||||
- name: AMDMIGRAPHX_PIPELINE_ID
|
||||
|
||||
@@ -156,7 +156,6 @@ HCA
|
||||
HGX
|
||||
HIPCC
|
||||
HIPExtension
|
||||
HIPification
|
||||
HIPIFY
|
||||
HIPification
|
||||
HIPify
|
||||
|
||||
@@ -66,11 +66,10 @@ project-specific steps. Refer to each repository's PR process for any additional
|
||||
during our release cycle, as coordinated by the maintainer
|
||||
* We'll inform you once your change is committed
|
||||
|
||||
:::{important}
|
||||
By creating a PR, you agree to allow your contribution to be licensed under the
|
||||
terms of the LICENSE.txt file in the corresponding repository. Different repositories may use different
|
||||
licenses.
|
||||
:::
|
||||
> [!IMPORTANT]
|
||||
> By creating a PR, you agree to allow your contribution to be licensed under the
|
||||
> terms of the LICENSE.txt file in the corresponding repository. Different repositories may use different
|
||||
> licenses.
|
||||
|
||||
You can look up each license on the [ROCm licensing](https://rocm.docs.amd.com/en/latest/about/license.html) page.
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ bash install-prerequisites.sh
|
||||
# For ubuntu22.04 system
|
||||
cd ROCm/tools/rocm-build/docker/ubuntu22
|
||||
cp * /tmp && cd /tmp
|
||||
bash install-prerequisities.sh
|
||||
bash install-prerequisites.sh
|
||||
# For ubuntu24.04 system
|
||||
cd ROCm/tools/rocm-build/docker/ubuntu24
|
||||
cp * /tmp && cd /tmp
|
||||
|
||||
@@ -72,7 +72,7 @@ the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.12-tf2.17-dev/images/sha256-fd2653f436880366cc874aa24264ca9dabd892d76ccb63fb807debba459bcaaf"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.17.0-cp312-cp312-manylinux_2_28_x86_64.whl>`__
|
||||
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.17.0-cp312-cp312-manylinux_2_28_x86_64.whl>`__
|
||||
- dev
|
||||
- `Python 3.12.4 <https://www.python.org/downloads/release/python-3124/>`_
|
||||
- `TensorBoard 2.17.1 <https://github.com/tensorflow/tensorboard/tree/2.17.1>`_
|
||||
@@ -81,7 +81,7 @@ the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.10-tf2.17-dev/images/sha256-8a5eb7443798935dd269575e2abae847b702e1dfb06766ab84f081a6314d8b95"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.17.0-cp310-cp310-manylinux_2_28_x86_64.whl>`__
|
||||
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.17.0-cp310-cp310-manylinux_2_28_x86_64.whl>`__
|
||||
- dev
|
||||
- `Python 3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `TensorBoard 2.17.1 <https://github.com/tensorflow/tensorboard/tree/2.17.1>`_
|
||||
@@ -90,7 +90,7 @@ the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.12-tf2.16-dev/images/sha256-8fc939b10cdd6d2b11407474880d4c8ab2b52ab6e2d1743c921fc2adbfd0422f"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.16.2-cp312-cp312-manylinux_2_28_x86_64.whl>`__
|
||||
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.16.2-cp312-cp312-manylinux_2_28_x86_64.whl>`__
|
||||
- dev
|
||||
- `Python 3.12.4 <https://www.python.org/downloads/release/python-3124/>`_
|
||||
- `TensorBoard 2.16.2 <https://github.com/tensorflow/tensorboard/tree/2.16.2>`_
|
||||
@@ -99,7 +99,7 @@ the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.10-tf2.16-dev/images/sha256-a4cc6ab23d59fdf5459ceac1f0a603e6c16ae7f885d30e42c0c2b3ac60c2ad10"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.16.2-cp310-cp310-manylinux_2_28_x86_64.whl>`__
|
||||
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.16.2-cp310-cp310-manylinux_2_28_x86_64.whl>`__
|
||||
- dev
|
||||
- `Python 3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `TensorBoard 2.16.2 <https://github.com/tensorflow/tensorboard/tree/2.16.2>`_
|
||||
@@ -108,7 +108,7 @@ the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.10-tf2.15-dev/images/sha256-60887c488421184adcb60b9ed4f72a8bd7bdb64d238e50943ca7cbde38e4aa48"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.15.1 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.15.1-cp310-cp310-manylinux_2_28_x86_64.whl>`_
|
||||
- `tensorflow-rocm 2.15.1 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.15.1-cp310-cp310-manylinux_2_28_x86_64.whl>`_
|
||||
- dev
|
||||
- `Python 3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `TensorBoard 2.15.2 <https://github.com/tensorflow/tensorboard/tree/2.15.2>`_
|
||||
|
||||
@@ -1,916 +0,0 @@
|
||||
.. meta::
|
||||
:description: PyTorch compatibility
|
||||
:keywords: GPU, PyTorch compatibility
|
||||
|
||||
********************************************************************************
|
||||
PyTorch compatibility
|
||||
********************************************************************************
|
||||
|
||||
`PyTorch <https://pytorch.org/>`_ is an open-source tensor library designed for
|
||||
deep learning. PyTorch on ROCm provides mixed-precision and large-scale training
|
||||
using `MIOpen <https://github.com/ROCm/MIOpen>`_ and
|
||||
`RCCL <https://github.com/ROCm/rccl>`_ libraries.
|
||||
|
||||
ROCm support for PyTorch is upstreamed into the official PyTorch repository. Due to independent
|
||||
compatibility considerations, this results in two distinct release cycles for PyTorch on ROCm:
|
||||
|
||||
- ROCm PyTorch release:
|
||||
|
||||
- Provides the latest version of ROCm but doesn't immediately support the latest stable PyTorch
|
||||
version.
|
||||
|
||||
- Offers :ref:`Docker images <pytorch-docker-compat>` with ROCm and PyTorch
|
||||
pre-installed.
|
||||
|
||||
- ROCm PyTorch repository: `<https://github.com/rocm/pytorch>`__
|
||||
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>` to get started.
|
||||
|
||||
- Official PyTorch release:
|
||||
|
||||
- Provides the latest stable version of PyTorch but doesn't immediately support the latest ROCm version.
|
||||
|
||||
- Official PyTorch repository: `<https://github.com/pytorch/pytorch>`__
|
||||
|
||||
- See the `Nightly and latest stable version installation guide <https://pytorch.org/get-started/locally/>`_
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_ to get started.
|
||||
|
||||
The upstream PyTorch includes an automatic HIPification solution that automatically generates HIP
|
||||
source code from the CUDA backend. This approach allows PyTorch to support ROCm without requiring
|
||||
manual code modifications.
|
||||
|
||||
ROCm's development is aligned with the stable release of PyTorch while upstream PyTorch testing uses
|
||||
the stable release of ROCm to maintain consistency.
|
||||
|
||||
.. _pytorch-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
AMD validates and publishes ready-made `PyTorch <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
images with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are validated for `ROCm 6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_.
|
||||
|
||||
.. list-table:: PyTorch Docker image components
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- Apex
|
||||
- torchvision
|
||||
- TensorBoard
|
||||
- MAGMA
|
||||
- UCX
|
||||
- OMPI
|
||||
- OFED
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu24.04_py3.12_pytorch_release_2.4.0/images/sha256-98ddf20333bd01ff749b8092b1190ee369a75d3b8c71c2fac80ffdcb1a98d529?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 24.04
|
||||
- `3.12 <https://www.python.org/downloads/release/python-3128/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.4.0/images/sha256-402c9b4f1a6b5a81c634a1932b56cbe01abb699cfcc7463d226276997c6cf8ea?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_2.4.0/images/sha256-e0608b55d408c3bfe5c19fdd57a4ced3e0eb3a495b74c309980b60b156c526dd?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-652cf25263d05b1de548222970aeb76e60b12de101de66751264709c0d0ff9d8?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.3.0 <https://github.com/ROCm/pytorch/tree/release/2.3>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.3.0 <https://github.com/ROCm/apex/tree/release/1.3.0>`_
|
||||
- `0.18.0 <https://github.com/pytorch/vision/tree/v0.18.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.2.1/images/sha256-051976f26beab8f9aa65d999e3ad546c027b39240a0cc3ee81b114a9024f2912?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_2.2.1/images/sha256-88c839a364d109d3748c100385bfa100d28090d25118cc723fd0406390ab2f7e?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_1.13.1/images/sha256-994424ed07a63113f79dd9aa72159124c00f5fbfe18127151e6658f7d0b6f821?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_1.13.1/images/sha256-7b8139fe40a9aeb4bca3aecd15c22c1fa96e867d93479fa3a24fdeeeeafa1219?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Critical ROCm libraries for PyTorch
|
||||
================================================================================
|
||||
|
||||
The functionality of PyTorch with ROCm is shaped by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
|
||||
- 1.1.0
|
||||
- Enables faster execution of core operations like matrix multiplication
|
||||
(GEMM), convolutions and transformations.
|
||||
- Speeds up ``torch.permute``, ``torch.view``, ``torch.matmul``,
|
||||
``torch.mm``, ``torch.bmm``, ``torch.nn.Conv2d``, ``torch.nn.Conv3d``
|
||||
and ``torch.nn.MultiheadAttention``.
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations like matrix multiplication, matrix-vector products,
|
||||
and tensor contractions. Utilized in both dense and batched linear
|
||||
algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- 0.10.0
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
|
||||
matrix multiplications used in convolutional and linear layers.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
|
||||
and ``torch.topk``. Operations on sparse tensors or tensors with
|
||||
irregular shapes often involve scanning, sorting, and filtering, which
|
||||
hipCUB handles efficiently.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- 1.0.17
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
- Used in functions like the ``torch.fft`` module.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- 2.11.0
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
|
||||
``torch.nn.Dropout``.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
- Supports functions like ``torch.linalg.solve``,
|
||||
``torch.linalg.eig``, and ``torch.linalg.svd``.
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- 3.1.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- 0.2.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipTensor <https://github.com/ROCm/hipTensor>`_
|
||||
- 1.4.0
|
||||
- Optimizes for high-performance tensor operations, such as contractions.
|
||||
- Accelerates tensor algebra, especially in deep learning and scientific
|
||||
computing.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- 3.3.0
|
||||
- Optimizes deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs), recurrent neural
|
||||
networks (RNNs), and other layers. Used in operations like
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`_
|
||||
- 2.11.0
|
||||
- Add graph-level optimizations, ONNX models and mixed precision support
|
||||
and enable Ahead-of-Time (AOT) Compilation.
|
||||
- Speeds up inference models and executes ONNX models for
|
||||
compatibility with other frameworks.
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`_
|
||||
- 3.1.0
|
||||
- Optimizes acceleration for computer vision and AI workloads like
|
||||
preprocessing, augmentation, and inferencing.
|
||||
- Faster data preprocessing and augmentation pipelines for datasets like
|
||||
ImageNet or COCO and easy to integrate into PyTorch's ``torch.utils.data``
|
||||
and ``torchvision`` workflows.
|
||||
* - `rocAL <https://github.com/ROCm/rocAL>`_
|
||||
- 2.1.0
|
||||
- Accelerates the data pipeline by offloading intensive preprocessing and
|
||||
augmentation tasks. rocAL is part of MIVisionX.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- 2.21.5
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
- Distributed data parallel training (``torch.nn.parallel.DistributedDataParallel``).
|
||||
Handles communication in multi-GPU setups.
|
||||
* - `rocDecode <https://github.com/ROCm/rocDecode>`_
|
||||
- 0.8.0
|
||||
- Provide hardware-accelerated data decoding capabilities, particularly
|
||||
for image, video, and other dataset formats.
|
||||
- Can be integrated in ``torch.utils.data``, ``torchvision.transforms``
|
||||
and ``torch.distributed``.
|
||||
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`_
|
||||
- 0.6.0
|
||||
- Provide hardware-accelerated JPEG image decoding and encoding.
|
||||
- GPU accelerated ``torchvision.io.decode_jpeg`` and
|
||||
``torchvision.io.encode_jpeg`` and can be integrated in
|
||||
``torch.utils.data`` and ``torchvision``.
|
||||
* - `RPP <https://github.com/ROCm/RPP>`_
|
||||
- 1.9.1
|
||||
- Speed up data augmentation, transformation, and other preprocessing step.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Utilized in backend operations for tensor computations requiring
|
||||
parallel processing.
|
||||
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`_
|
||||
- 1.6.0
|
||||
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
|
||||
multiplication (GEMM) and accumulation operations with mixed precision
|
||||
support.
|
||||
- Linear layers (``torch.nn.Linear``), convolutional layers
|
||||
(``torch.nn.Conv2d``), attention layers, general tensor operations that
|
||||
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
|
||||
more.
|
||||
|
||||
Supported and unsupported features
|
||||
================================================================================
|
||||
|
||||
The following section maps GPU-accelerated PyTorch features to their supported
|
||||
ROCm and PyTorch versions.
|
||||
|
||||
torch
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
`torch <https://pytorch.org/docs/stable/index.html>`_ is the central module of
|
||||
PyTorch, providing data structures for multi-dimensional tensors and
|
||||
implementing mathematical operations on them. It also includes utilities for
|
||||
efficient serialization of tensors and arbitrary data types, along with various
|
||||
other tools.
|
||||
|
||||
Tensor data types
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or argument, and PyTorch supports a wide range of data types for different use cases.
|
||||
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_'s single data types:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.float8_e4m3fn``
|
||||
- 8-bit floating point, e4m3
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float8_e5m2``
|
||||
- 8-bit floating point, e5m2
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float16`` or ``torch.half``
|
||||
- 16-bit floating point
|
||||
- 0.1.6
|
||||
- 2.0
|
||||
* - ``torch.bfloat16``
|
||||
- 16-bit floating point
|
||||
- 1.6
|
||||
- 2.6
|
||||
* - ``torch.float32`` or ``torch.float``
|
||||
- 32-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.float64`` or ``torch.double``
|
||||
- 64-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.complex32`` or ``torch.chalf``
|
||||
- PyTorch provides native support for 32-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex64`` or ``torch.cfloat``
|
||||
- PyTorch provides native support for 64-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex128`` or ``torch.cdouble``
|
||||
- PyTorch provides native support for 128-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.uint8``
|
||||
- 8-bit integer (unsigned)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.uint16``
|
||||
- 16-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint32``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint64``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.int8``
|
||||
- 8-bit integer (signed)
|
||||
- 1.12
|
||||
- 5.0
|
||||
* - ``torch.int16`` or ``torch.short``
|
||||
- 16-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int32`` or ``torch.int``
|
||||
- 32-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int64`` or ``torch.long``
|
||||
- 64-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.bool``
|
||||
- Boolean
|
||||
- 1.2
|
||||
- 2.0
|
||||
* - ``torch.quint8``
|
||||
- Quantized 8-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint8``
|
||||
- Quantized 8-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint32``
|
||||
- Quantized 32-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.quint4x2``
|
||||
- Quantized 4-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
|
||||
.. note::
|
||||
|
||||
Unsigned types aside from ``uint8`` are currently only have limited support in
|
||||
eager mode (they primarily exist to assist usage with ``torch.compile``).
|
||||
|
||||
The :doc:`ROCm precision support page <rocm:reference/precision-support>`
|
||||
collected the native HW support of different data types.
|
||||
|
||||
torch.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.cuda`` in PyTorch is a module that provides utilities and functions for
|
||||
managing and utilizing AMD and NVIDIA GPUs. It enables GPU-accelerated
|
||||
computations, memory management, and efficient execution of tensor operations,
|
||||
leveraging ROCm and CUDA as the underlying frameworks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Device management
|
||||
- Utilities for managing and interacting with GPUs.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Tensor operations on GPU
|
||||
- Perform tensor operations such as addition and matrix multiplications on
|
||||
the GPU.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Streams and events
|
||||
- Streams allow overlapping computation and communication for optimized
|
||||
performance, events enable synchronization.
|
||||
- 1.6.0
|
||||
- 3.8
|
||||
* - Memory management
|
||||
- Functions to manage and inspect memory usage like
|
||||
``torch.cuda.memory_allocated()``, ``torch.cuda.max_memory_allocated()``,
|
||||
``torch.cuda.memory_reserved()`` and ``torch.cuda.empty_cache()``.
|
||||
- 0.3.0
|
||||
- 1.9.2
|
||||
* - Running process lists of memory management
|
||||
- Return a human-readable printout of the running processes and their GPU
|
||||
memory use for a given device with functions like
|
||||
``torch.cuda.memory_stats()`` and ``torch.cuda.memory_summary()``.
|
||||
- 1.8.0
|
||||
- 4.0
|
||||
* - Communication collectives
|
||||
- A set of APIs that enable efficient communication between multiple GPUs,
|
||||
allowing for distributed computing and data parallelism.
|
||||
- 1.9.0
|
||||
- 5.0
|
||||
* - ``torch.cuda.CUDAGraph``
|
||||
- Graphs capture sequences of GPU operations to minimize kernel launch
|
||||
overhead and improve performance.
|
||||
- 1.10.0
|
||||
- 5.3
|
||||
* - TunableOp
|
||||
- A mechanism that allows certain operations to be more flexible and
|
||||
optimized for performance. It enables automatic tuning of kernel
|
||||
configurations and other settings to achieve the best possible
|
||||
performance based on the specific hardware (GPU) and workload.
|
||||
- 2.0
|
||||
- 5.4
|
||||
* - NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.8.0
|
||||
- ❌
|
||||
* - Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.13.0
|
||||
- ❌
|
||||
* - Jiterator (beta)
|
||||
- Jiterator allows asynchronous data streaming into computation streams
|
||||
during training loops.
|
||||
- 1.13.0
|
||||
- 5.2
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.backends.cuda`` is a PyTorch module that provides configuration options
|
||||
and flags to control the behavior of CUDA or ROCm operations. It is part of the
|
||||
PyTorch backend configuration system, which allows users to fine-tune how
|
||||
PyTorch interacts with the CUDA or ROCm environment.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``cufft_plan_cache``
|
||||
- Manages caching of GPU FFT plans to optimize repeated FFT computations.
|
||||
- 1.7.0
|
||||
- 5.0
|
||||
* - ``matmul.allow_tf32``
|
||||
- Enables or disables the use of TensorFloat-32 (TF32) precision for
|
||||
faster matrix multiplications on GPUs with Tensor Cores.
|
||||
- 1.10.0
|
||||
- ❌
|
||||
* - ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions (e.g., with fp16 accumulation type) are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cudnn
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Supported ``torch`` options:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions on NVIDIA
|
||||
Ampere or newer GPUs.
|
||||
- 1.12.0
|
||||
- ❌
|
||||
* - ``deterministic``
|
||||
- A bool that, if True, causes cuDNN to only use deterministic
|
||||
convolution algorithms.
|
||||
- 1.12.0
|
||||
- 6.0
|
||||
|
||||
Automatic mixed precision: torch.amp
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
PyTorch that automates the process of using both 16-bit (half-precision,
|
||||
float16) and 32-bit (single-precision, float32) floating-point types in model
|
||||
training and inference.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Autocasting
|
||||
- Instances of autocast serve as context managers or decorators that allow
|
||||
regions of your script to run in mixed precision.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - Gradient scaling
|
||||
- To prevent underflow, “gradient scaling” multiplies the network’s
|
||||
loss(es) by a scale factor and invokes a backward pass on the scaled
|
||||
loss(es). Gradients flowing backward through the network are then
|
||||
scaled by the same factor. In other words, gradient values have a
|
||||
larger magnitude, so they don’t flush to zero.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - CUDA op-specific behavior
|
||||
- These ops always go through autocasting whether they are invoked as part
|
||||
of a ``torch.nn.Module``, as a function, or as a ``torch.Tensor`` method. If
|
||||
functions are exposed in multiple namespaces, they go through
|
||||
autocasting regardless of the namespace.
|
||||
- 1.9
|
||||
- 2.5
|
||||
|
||||
Distributed library features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The PyTorch distributed library includes a collective of parallelism modules, a
|
||||
communications layer, and infrastructure for launching and debugging large
|
||||
training jobs. See :ref:`rocm-for-ai-pytorch-distributed` for more information.
|
||||
|
||||
The Distributed Library feature in PyTorch provides tools and APIs for building
|
||||
and running distributed machine learning workflows. It allows training models
|
||||
across multiple processes, GPUs, or nodes in a cluster, enabling efficient use
|
||||
of computational resources and scalability for large-scale tasks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - TensorPipe
|
||||
- TensorPipe is a point-to-point communication library integrated into
|
||||
PyTorch for distributed training. It is designed to handle tensor data
|
||||
transfers efficiently between different processes or devices, including
|
||||
those on separate machines.
|
||||
- 1.8
|
||||
- 5.4
|
||||
* - Gloo
|
||||
- Gloo is designed for multi-machine and multi-GPU setups, enabling
|
||||
efficient communication and synchronization between processes. Gloo is
|
||||
one of the default backends for PyTorch's Distributed Data Parallel
|
||||
(DDP) and RPC frameworks, alongside other backends like NCCL and MPI.
|
||||
- 1.0
|
||||
- 2.0
|
||||
|
||||
torch.compiler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.compiler`` (AOT Autograd)
|
||||
- Autograd captures not only the user-level code, but also backpropagation,
|
||||
which results in capturing the backwards pass “ahead-of-time”. This
|
||||
enables acceleration of both forwards and backwards pass using
|
||||
``TorchInductor``.
|
||||
- 2.0
|
||||
- 5.3
|
||||
* - ``torch.compiler`` (TorchInductor)
|
||||
- The default ``torch.compile`` deep learning compiler that generates fast
|
||||
code for multiple accelerators and backends. You need to use a backend
|
||||
compiler to make speedups through ``torch.compile`` possible. For AMD,
|
||||
NVIDIA, and Intel GPUs, it leverages OpenAI Triton as the key building block.
|
||||
- 2.0
|
||||
- 5.3
|
||||
|
||||
torchaudio
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchaudio <https://pytorch.org/audio/stable/index.html>`_ library provides
|
||||
utilities for processing audio data in PyTorch, such as audio loading,
|
||||
transformations, and feature extraction.
|
||||
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to move audio
|
||||
data (waveform tensor) explicitly to GPU using ``.to('cuda')``.
|
||||
|
||||
The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since torchaudio version
|
||||
- Since ROCm
|
||||
* - ``torchaudio.transforms.Spectrogram``
|
||||
- Generate spectrogram of an input waveform using STFT.
|
||||
- 0.6.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MelSpectrogram``
|
||||
- Generate the mel-scale spectrogram of raw audio signals.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MFCC``
|
||||
- Extract of MFCC features.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.Resample``
|
||||
- Resample a signal from one frequency to another
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
|
||||
torchvision
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchvision <https://pytorch.org/vision/stable/index.html>`_ library
|
||||
provide datasets, model architectures, and common image transformations for
|
||||
computer vision.
|
||||
|
||||
The following ``torchvision`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since torchvision version
|
||||
- Since ROCm
|
||||
* - ``torchvision.transforms.functional``
|
||||
- Provides GPU-compatible transformations for image preprocessing like
|
||||
resize, normalize, rotate and crop.
|
||||
- 0.2.0
|
||||
- 4.0
|
||||
* - ``torchvision.ops``
|
||||
- GPU-accelerated operations for object detection and segmentation tasks.
|
||||
``torchvision.ops.roi_align``, ``torchvision.ops.nms`` and
|
||||
``box_convert``.
|
||||
- 0.6.0
|
||||
- 3.3
|
||||
* - ``torchvision.models`` with ``.to('cuda')``
|
||||
- ``torchvision`` provides several pre-trained models (ResNet, Faster
|
||||
R-CNN, Mask R-CNN, ...) that can run on CUDA for faster inference and
|
||||
training.
|
||||
- 0.1.6
|
||||
- 2.x
|
||||
* - ``torchvision.io``
|
||||
- Video decoding and frame extraction using GPU acceleration with NVIDIA’s
|
||||
NVDEC and nvJPEG (rocJPEG) on CUDA-enabled GPUs.
|
||||
- 0.4.0
|
||||
- 6.3
|
||||
|
||||
torchtext
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtext <https://pytorch.org/text/stable/index.html>`_ library provides
|
||||
utilities for processing and working with text data in PyTorch, including
|
||||
tokenization, vocabulary management, and text embeddings. torchtext supports
|
||||
preprocessing pipelines and integration with PyTorch models, simplifying the
|
||||
implementation of natural language processing (NLP) tasks.
|
||||
|
||||
To leverage GPU acceleration in torchtext, you need to move tensors
|
||||
explicitly to the GPU using ``.to('cuda')``.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchtune
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtune <https://pytorch.org/torchtune/stable/index.html>`_ library for
|
||||
authoring, fine-tuning and experimenting with LLMs.
|
||||
|
||||
* Usage: It works out-of-the-box, enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchserve
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchserve <https://pytorch.org/torchserve/>`_ is a PyTorch domain library
|
||||
for common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchrec
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchrec <https://pytorch.org/torchrec/>`_ is a PyTorch domain library for
|
||||
common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
Unsupported PyTorch features
|
||||
----------------------------
|
||||
|
||||
The following are GPU-accelerated PyTorch features not currently supported by ROCm.
|
||||
|
||||
.. list-table::
|
||||
:widths: 30, 60, 10
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
* - APEX batch norm
|
||||
- Use APEX batch norm instead of PyTorch batch norm.
|
||||
- 1.6.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_tf32``
|
||||
- A bool that controls whether TensorFloat-32 tensor cores may be used in
|
||||
matrix multiplications.
|
||||
- 1.7
|
||||
* - ``torch.cuda`` / NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.7.0
|
||||
* - ``torch.cuda`` / Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.8.0
|
||||
* - ``torch-tensorrt``
|
||||
- Integrate TensorRT library for optimizing and deploying PyTorch models.
|
||||
ROCm does not have equialent library for TensorRT.
|
||||
- 1.9.0
|
||||
* - ``torch.backends`` / ``cudnn.allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions.
|
||||
- 1.10.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions with fp16 accumulation type are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.nn.functional`` / ``scaled_dot_product_attention``
|
||||
- Flash attention backend for SDPA to accelerate attention computation in
|
||||
transformer-based models.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
* - Dynamic parallelism
|
||||
- PyTorch itself does not directly expose dynamic parallelism as a core
|
||||
feature. Dynamic parallelism allow GPU threads to launch additional
|
||||
threads which can be reached using custom operations via the
|
||||
``torch.utils.cpp_extension`` module.
|
||||
- Not a core feature
|
||||
* - Unified memory support in PyTorch
|
||||
- Unified Memory is not directly exposed in PyTorch's core API, it can be
|
||||
utilized effectively through custom CUDA extensions or advanced
|
||||
workflows.
|
||||
- Not a core feature
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/train-a-model>` provides
|
||||
guidance on how to leverage the ROCm platform for training AI models. It covers the steps, tools, and best practices
|
||||
for optimizing training workflows on AMD GPUs using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning and inference of
|
||||
machine learning models, particularly large language models (LLMs), on systems with a single AMD
|
||||
Instinct MI300X accelerator. This page provides a detailed guide for setting up, optimizing, and
|
||||
executing fine-tuning and inference workflows in such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/llm-fine-tuning-optimization/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning models on systems
|
||||
with multi MI300X accelerators.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/tuning-guides/mi300x/workload>` provides detailed
|
||||
guidance on optimizing workloads for the AMD Instinct MI300X accelerator using ROCm. This guide is aimed at helping
|
||||
users achieve optimal performance for deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the use of PyTorch on the ROCm platform and
|
||||
focuses on how to efficiently leverage AMD GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_
|
||||
@@ -308,24 +308,6 @@ Otherwise, if the system has Intel host CPUs add this instead to
|
||||
|
||||
intel_iommu=on iommu=pt
|
||||
|
||||
``modprobe.blacklist=amdgpu``
|
||||
For some system configurations, the ``amdgpu`` driver needs to be blocked during kernel initialization to avoid an issue where after boot, the GPUs are not listed when running the command ``rocm-smi`` or ``amd-smi``.
|
||||
|
||||
Alternatively, configuring the AMD recommended system optimized BIOS settings might remove the need for using this setting. Some manufacturers and users might not implement the recommended system optimized BIOS settings.
|
||||
|
||||
If you experience the mentioned issue, then add this to ``GRUB_CMDLINE_LINUX``:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
modprobe.blacklist=amdgpu
|
||||
|
||||
After the change, the ``amdgpu`` module must be loaded to support the ROCm framework
|
||||
software tools and utilities. Run the following command to load the ``amdgpu`` module:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
sudo modprobe amdgpu
|
||||
|
||||
Update GRUB
|
||||
-----------
|
||||
|
||||
|
||||
@@ -9,14 +9,16 @@
|
||||
Data types and precision support
|
||||
*************************************************************
|
||||
|
||||
This topic lists the data types support on AMD GPUs, ROCm libraries along
|
||||
with corresponding :doc:`HIP <hip:index>` data types.
|
||||
This topic lists the supported data types of AMD GPUs and ROCm libraries.
|
||||
Corresponding :doc:`HIP <hip:index>` data types are also noted.
|
||||
|
||||
Integral types
|
||||
==============
|
||||
==========================================
|
||||
|
||||
The signed and unsigned integral types supported by ROCm are listed in
|
||||
the following table.
|
||||
the following table, along with their corresponding HIP type and a short
|
||||
description.
|
||||
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
@@ -46,9 +48,10 @@ the following table.
|
||||
.. _precision_support_floating_point_types:
|
||||
|
||||
Floating-point types
|
||||
====================
|
||||
==========================================
|
||||
|
||||
The floating-point types supported by ROCm are listed in the following table.
|
||||
The floating-point types supported by ROCm are listed in the following
|
||||
table, along with their corresponding HIP type and a short description.
|
||||
|
||||
.. image:: ../data/about/compatibility/floating-point-data-types.png
|
||||
:alt: Supported floating-point types
|
||||
@@ -63,18 +66,18 @@ The floating-point types supported by ROCm are listed in the following table.
|
||||
- Description
|
||||
*
|
||||
- float8 (E4M3)
|
||||
- ``__hip_fp8_e4m3_fnuz``
|
||||
- ``-``
|
||||
- An 8-bit floating-point number that mostly follows IEEE-754 conventions
|
||||
and **S1E4M3** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_,
|
||||
with expanded range and no infinity or signed zero. NaN is represented
|
||||
as negative zero.
|
||||
and **S1E4M3** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ ,
|
||||
with expanded range and no infinity or signed zero. NaN is
|
||||
represented as negative zero.
|
||||
*
|
||||
- float8 (E5M2)
|
||||
- ``__hip_fp8_e5m2_fnuz``
|
||||
- ``-``
|
||||
- An 8-bit floating-point number mostly following IEEE-754 conventions and
|
||||
**S1E5M2** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_,
|
||||
with expanded range and no infinity or signed zero. NaN is represented
|
||||
as negative zero.
|
||||
**S1E5M2** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ ,
|
||||
with expanded range and no infinity or signed zero. NaN is
|
||||
represented as negative zero.
|
||||
*
|
||||
- float16
|
||||
- ``half``
|
||||
@@ -87,7 +90,7 @@ The floating-point types supported by ROCm are listed in the following table.
|
||||
format.
|
||||
*
|
||||
- tensorfloat32
|
||||
- Not available
|
||||
- ``-``
|
||||
- A floating-point number that occupies 32 bits or less of storage,
|
||||
providing improved range compared to half (16-bit) format, at
|
||||
(potentially) greater throughput than single-precision (32-bit) formats.
|
||||
@@ -114,15 +117,12 @@ The floating-point types supported by ROCm are listed in the following table.
|
||||
|
||||
* In some AMD documents and articles, float8 (E5M2) is referred to as bfloat8.
|
||||
|
||||
* The :doc:`low precision floating point types page <hip:reference/low_fp_types>`
|
||||
describes how to use these types in HIP with examples.
|
||||
ROCm support icons
|
||||
==========================================
|
||||
|
||||
Level of support definitions
|
||||
============================
|
||||
|
||||
In the following sections, icons represent the level of support. These icons,
|
||||
described in the following table, are also used in the library data type support
|
||||
pages.
|
||||
In the following sections, icons represent the level of support. These
|
||||
icons, described in the following table, are also used in the library data type
|
||||
support pages.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
@@ -130,11 +130,6 @@ pages.
|
||||
*
|
||||
- Icon
|
||||
- Definition
|
||||
|
||||
*
|
||||
- NA
|
||||
- Not applicable
|
||||
|
||||
*
|
||||
- ❌
|
||||
- Not supported
|
||||
@@ -163,15 +158,16 @@ pages.
|
||||
* Any type can be emulated by software, but this page does not cover such
|
||||
cases.
|
||||
|
||||
Data type support by Hardware Architecture
|
||||
Hardware data type support
|
||||
==========================================
|
||||
|
||||
The MI200 series GPUs, which include MI210, MI250, and MI250X, are based on the
|
||||
CDNA2 architecture. The MI300 series GPUs, consisting of MI300A, MI300X, and
|
||||
MI325X, are based on the CDNA3 architecture.
|
||||
The following tables provide information about AMD Instinct accelerators support
|
||||
for various data types. The MI200 series GPUs, which include MI210, MI250, and
|
||||
MI250X, are based on the CDNA2 architecture. The MI300 series GPUs, consisting
|
||||
of MI300A, MI300X, and MI325X, are built on the CDNA3 architecture.
|
||||
|
||||
Compute units support
|
||||
---------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following table lists data type support for compute units.
|
||||
|
||||
@@ -252,7 +248,7 @@ The following table lists data type support for compute units.
|
||||
- ✅
|
||||
|
||||
Matrix core support
|
||||
-------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following table lists data type support for AMD GPU matrix cores.
|
||||
|
||||
@@ -333,7 +329,7 @@ The following table lists data type support for AMD GPU matrix cores.
|
||||
- ✅
|
||||
|
||||
Atomic operations support
|
||||
-------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following table lists data type support for atomic operations.
|
||||
|
||||
@@ -420,14 +416,14 @@ The following table lists data type support for atomic operations.
|
||||
performance impact when they frequently access the same memory address.
|
||||
|
||||
Data type support in ROCm libraries
|
||||
===================================
|
||||
==========================================
|
||||
|
||||
ROCm library support for int8, float8 (E4M3), float8 (E5M2), int16, float16,
|
||||
bfloat16, int32, tensorfloat32, float32, int64, and float64 is listed in the
|
||||
following tables.
|
||||
|
||||
Libraries input/output type support
|
||||
-----------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following tables list ROCm library support for specific input and output
|
||||
data types. Refer to the corresponding library data type support page for a
|
||||
@@ -448,37 +444,37 @@ detailed description.
|
||||
- int32
|
||||
- int64
|
||||
*
|
||||
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
|
||||
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
|
||||
- ✅/✅
|
||||
- ❌/❌
|
||||
- ❌/❌
|
||||
- ❌/❌
|
||||
*
|
||||
- :doc:`rocRAND <rocrand:api-reference/data-type-support>`
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- rocRAND (:doc:`details <rocrand:api-reference/data-type-support>`)
|
||||
- -/✅
|
||||
- -/✅
|
||||
- -/✅
|
||||
- -/✅
|
||||
*
|
||||
- :doc:`hipRAND <hiprand:api-reference/data-type-support>`
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- hipRAND (:doc:`details <hiprand:api-reference/data-type-support>`)
|
||||
- -/✅
|
||||
- -/✅
|
||||
- -/✅
|
||||
- -/✅
|
||||
*
|
||||
- :doc:`rocPRIM <rocprim:reference/data-type-support>`
|
||||
- rocPRIM (:doc:`details <rocprim:reference/data-type-support>`)
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
*
|
||||
- :doc:`hipCUB <hipcub:api-reference/data-type-support>`
|
||||
- hipCUB (:doc:`details <hipcub:api-reference/data-type-support>`)
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
*
|
||||
- :doc:`rocThrust <rocthrust:data-type-support>`
|
||||
- rocThrust (:doc:`details <rocthrust:data-type-support>`)
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
@@ -500,7 +496,7 @@ detailed description.
|
||||
- float32
|
||||
- float64
|
||||
*
|
||||
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
|
||||
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
|
||||
- ❌/❌
|
||||
- ❌/❌
|
||||
- ✅/✅
|
||||
@@ -509,25 +505,25 @@ detailed description.
|
||||
- ❌/❌
|
||||
- ❌/❌
|
||||
*
|
||||
- :doc:`rocRAND <rocrand:api-reference/data-type-support>`
|
||||
- NA/❌
|
||||
- NA/❌
|
||||
- NA/✅
|
||||
- NA/❌
|
||||
- NA/❌
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- rocRAND (:doc:`details <rocrand:api-reference/data-type-support>`)
|
||||
- -/❌
|
||||
- -/❌
|
||||
- -/✅
|
||||
- -/❌
|
||||
- -/❌
|
||||
- -/✅
|
||||
- -/✅
|
||||
*
|
||||
- :doc:`hipRAND <hiprand:api-reference/data-type-support>`
|
||||
- NA/❌
|
||||
- NA/❌
|
||||
- NA/✅
|
||||
- NA/❌
|
||||
- NA/❌
|
||||
- NA/✅
|
||||
- NA/✅
|
||||
- hipRAND (:doc:`details <hiprand:api-reference/data-type-support>`)
|
||||
- -/❌
|
||||
- -/❌
|
||||
- -/✅
|
||||
- -/❌
|
||||
- -/❌
|
||||
- -/✅
|
||||
- -/✅
|
||||
*
|
||||
- :doc:`rocPRIM <rocprim:reference/data-type-support>`
|
||||
- rocPRIM (:doc:`details <rocprim:reference/data-type-support>`)
|
||||
- ❌/❌
|
||||
- ❌/❌
|
||||
- ✅/✅
|
||||
@@ -536,7 +532,7 @@ detailed description.
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
*
|
||||
- :doc:`hipCUB <hipcub:api-reference/data-type-support>`
|
||||
- hipCUB (:doc:`details <hipcub:api-reference/data-type-support>`)
|
||||
- ❌/❌
|
||||
- ❌/❌
|
||||
- ✅/✅
|
||||
@@ -545,7 +541,7 @@ detailed description.
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
*
|
||||
- :doc:`rocThrust <rocthrust:data-type-support>`
|
||||
- rocThrust (:doc:`details <rocthrust:data-type-support>`)
|
||||
- ❌/❌
|
||||
- ❌/❌
|
||||
- ⚠️/⚠️
|
||||
@@ -554,14 +550,9 @@ detailed description.
|
||||
- ✅/✅
|
||||
- ✅/✅
|
||||
|
||||
.. note::
|
||||
|
||||
As random number generation libraries, rocRAND and hipRAND only specify output
|
||||
data types for the random values they generate, with no need for input data
|
||||
types.
|
||||
|
||||
Libraries internal calculations type support
|
||||
--------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following tables list ROCm library support for specific internal data types.
|
||||
Refer to the corresponding library data type support page for a detailed
|
||||
@@ -582,7 +573,7 @@ description.
|
||||
- int32
|
||||
- int64
|
||||
*
|
||||
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
|
||||
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
|
||||
- ❌
|
||||
- ❌
|
||||
- ✅
|
||||
@@ -605,7 +596,7 @@ description.
|
||||
- float32
|
||||
- float64
|
||||
*
|
||||
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
|
||||
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
|
||||
- ❌
|
||||
- ❌
|
||||
- ❌
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.11
|
||||
# This file is autogenerated by pip-compile with Python 3.10
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile docs/sphinx/requirements.in
|
||||
@@ -8,8 +8,6 @@ accessible-pygments==0.0.5
|
||||
# via pydata-sphinx-theme
|
||||
alabaster==1.0.0
|
||||
# via sphinx
|
||||
appnope==0.1.4
|
||||
# via ipykernel
|
||||
asttokens==3.0.0
|
||||
# via stack-data
|
||||
attrs==25.1.0
|
||||
@@ -25,7 +23,7 @@ beautifulsoup4==4.12.3
|
||||
# via pydata-sphinx-theme
|
||||
breathe==4.35.0
|
||||
# via rocm-docs-core
|
||||
certifi==2024.12.14
|
||||
certifi==2024.8.30
|
||||
# via requests
|
||||
cffi==1.17.1
|
||||
# via
|
||||
@@ -39,7 +37,7 @@ click==8.1.7
|
||||
# sphinx-external-toc
|
||||
comm==0.2.2
|
||||
# via ipykernel
|
||||
cryptography==44.0.0
|
||||
cryptography==44.0.1
|
||||
# via pyjwt
|
||||
debugpy==1.8.12
|
||||
# via ipykernel
|
||||
@@ -55,9 +53,11 @@ docutils==0.21.2
|
||||
# myst-parser
|
||||
# pydata-sphinx-theme
|
||||
# sphinx
|
||||
exceptiongroup==1.2.2
|
||||
# via ipython
|
||||
executing==2.2.0
|
||||
# via stack-data
|
||||
fastjsonschema==2.21.1
|
||||
fastjsonschema==2.20.0
|
||||
# via
|
||||
# nbformat
|
||||
# rocm-docs-core
|
||||
@@ -65,6 +65,8 @@ gitdb==4.0.11
|
||||
# via gitpython
|
||||
gitpython==3.1.43
|
||||
# via rocm-docs-core
|
||||
greenlet==3.1.1
|
||||
# via sqlalchemy
|
||||
idna==3.10
|
||||
# via requests
|
||||
imagesize==1.4.1
|
||||
@@ -75,13 +77,13 @@ importlib-metadata==8.6.1
|
||||
# myst-nb
|
||||
ipykernel==6.29.5
|
||||
# via myst-nb
|
||||
ipython==8.32.0
|
||||
ipython==8.31.0
|
||||
# via
|
||||
# ipykernel
|
||||
# myst-nb
|
||||
jedi==0.19.2
|
||||
# via ipython
|
||||
jinja2==3.1.4
|
||||
jinja2==3.1.5
|
||||
# via
|
||||
# myst-parser
|
||||
# sphinx
|
||||
@@ -115,7 +117,7 @@ mdit-py-plugins==0.4.2
|
||||
# via myst-parser
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
myst-nb==1.2.0
|
||||
myst-nb==1.1.2
|
||||
# via rocm-docs-core
|
||||
myst-parser==4.0.0
|
||||
# via myst-nb
|
||||
@@ -142,7 +144,7 @@ platformdirs==4.3.6
|
||||
# via jupyter-core
|
||||
prompt-toolkit==3.0.50
|
||||
# via ipython
|
||||
psutil==7.0.0
|
||||
psutil==6.1.1
|
||||
# via ipykernel
|
||||
ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
@@ -150,7 +152,7 @@ pure-eval==0.2.3
|
||||
# via stack-data
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pydata-sphinx-theme==0.16.1
|
||||
pydata-sphinx-theme==0.16.0
|
||||
# via
|
||||
# rocm-docs-core
|
||||
# sphinx-book-theme
|
||||
@@ -162,7 +164,7 @@ pygments==2.18.0
|
||||
# ipython
|
||||
# pydata-sphinx-theme
|
||||
# sphinx
|
||||
pyjwt[crypto]==2.10.1
|
||||
pyjwt[crypto]==2.10.0
|
||||
# via pygithub
|
||||
pynacl==1.5.0
|
||||
# via pygithub
|
||||
@@ -175,7 +177,8 @@ pyyaml==6.0.2
|
||||
# myst-parser
|
||||
# rocm-docs-core
|
||||
# sphinx-external-toc
|
||||
pyzmq==26.2.1
|
||||
# sphinxcontrib-datatemplates
|
||||
pyzmq==26.2.0
|
||||
# via
|
||||
# ipykernel
|
||||
# jupyter-client
|
||||
@@ -247,12 +250,14 @@ sphinxcontrib-runcmd==0.2.0
|
||||
# via sphinxcontrib-datatemplates
|
||||
sphinxcontrib-serializinghtml==2.0.0
|
||||
# via sphinx
|
||||
sqlalchemy==2.0.38
|
||||
sqlalchemy==2.0.37
|
||||
# via jupyter-cache
|
||||
stack-data==0.6.3
|
||||
# via ipython
|
||||
tabulate==0.9.0
|
||||
# via jupyter-cache
|
||||
tomli==2.1.0
|
||||
# via sphinx
|
||||
tornado==6.4.2
|
||||
# via
|
||||
# ipykernel
|
||||
|
||||
@@ -68,6 +68,85 @@ set_address_sanitizer_off() {
|
||||
export LDFLAGS=""
|
||||
}
|
||||
|
||||
build_miopen_ckProf() {
|
||||
ENABLE_ADDRESS_SANITIZER=false
|
||||
echo "Start Building Composable Kernel Profiler"
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
else
|
||||
unset_asan_env_vars
|
||||
set_address_sanitizer_off
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
cd "$BUILD_DIR"
|
||||
rm -rf *
|
||||
|
||||
architectures='gfx10 gfx11 gfx90 gfx94'
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
architectures=$(echo ${GPU_ARCHS} | awk -F';' '{for(i=1;i<=NF;i++) a[substr($i,1,5)]} END{for(i in a) printf i" "}')
|
||||
fi
|
||||
|
||||
for arch in ${architectures}
|
||||
do
|
||||
if [ "${ASAN_CMAKE_PARAMS}" == "true" ] ; then
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH%-*}/lib/cmake;${ROCM_PATH%-*}/$ASAN_LIBDIR;${ROCM_PATH%-*}/llvm;${ROCM_PATH%-*}" \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE:-'RelWithDebInfo'} \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_LIB_RPATH" \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_EXE_RPATH" \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH="${ROCM_PATH}" \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DPROFILER_ONLY=ON \
|
||||
-DENABLE_ASAN_PACKAGING=true \
|
||||
-DGPU_ARCH="${arch}" \
|
||||
"$COMPONENT_SRC"
|
||||
else
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH%-*}" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN' \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN/../lib' \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH="${ROCM_PATH}" \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DPROFILER_ONLY=ON \
|
||||
-DGPU_ARCH="${arch}" \
|
||||
"$COMPONENT_SRC"
|
||||
fi
|
||||
|
||||
cmake --build . -- -j${PROC} package
|
||||
cp ./*ckprofiler*.${PKGTYPE} $PACKAGE_DIR
|
||||
rm -rf *
|
||||
done
|
||||
rm -rf _CPack_Packages/ && find -name '*.o' -delete
|
||||
|
||||
echo "Finished building Composable Kernel"
|
||||
show_build_cache_stats
|
||||
}
|
||||
|
||||
clean_miopen_ck() {
|
||||
echo "Cleaning MIOpen-CK build directory: ${BUILD_DIR} ${PACKAGE_DIR}"
|
||||
rm -rf "$BUILD_DIR" "$PACKAGE_DIR"
|
||||
|
||||
@@ -42,6 +42,7 @@ DEB_PATH="$(getDebPath $PROJ_NAME)"
|
||||
RPM_PATH="$(getRpmPath $PROJ_NAME)"
|
||||
INSTALL_PATH="${ROCM_INSTALL_PATH}/lib/llvm"
|
||||
LLVM_ROOT_LCL="${LLVM_ROOT}"
|
||||
ROCM_WHEEL_DIR="${BUILD_PATH}/_wheel"
|
||||
|
||||
TARGET="all"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
@@ -149,6 +150,7 @@ ENABLE_RUNTIMES="$ENABLE_RUNTIMES;libcxx;libcxxabi"
|
||||
BOOTSTRAPPING_BUILD_LIBCXX=1
|
||||
|
||||
clean_lightning() {
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf "$BUILD_PATH"
|
||||
rm -rf "$DEB_PATH"
|
||||
rm -rf "$RPM_PATH"
|
||||
@@ -330,6 +332,15 @@ build_lightning() {
|
||||
echo "End Workaround for race condition"
|
||||
cmake --build . -- $MAKEOPTS
|
||||
|
||||
case "$DISTRO_ID" in
|
||||
(rhel*|centos*)
|
||||
RHEL_BUILD=1
|
||||
;;
|
||||
(*)
|
||||
RHEL_BUILD=0
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ $SKIP_LIT_TESTS -eq 0 ]; then
|
||||
if [ $RHEL_BUILD -eq 1 ]; then
|
||||
cmake --build . -- $MAKEOPTS check-lld check-mlir
|
||||
@@ -1147,4 +1158,9 @@ case $TARGET in
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
|
||||
171
tools/rocm-build/build_omniperf.sh
Executable file
171
tools/rocm-build/build_omniperf.sh
Executable file
@@ -0,0 +1,171 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -w, --wheel Creates python wheel package of omniperf.
|
||||
It needs to be used along with -r option"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="omniperf"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
ROCM_WHEEL_DIR="${BUILD_DIR}/_wheel"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="$DASH_JAY -C $BUILD_DIR"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
WHEEL_PACKAGE=false
|
||||
|
||||
|
||||
#parse the arguments
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,static,address_sanitizer,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="Release" ; shift ;;
|
||||
-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2" ; shift 2 ;;
|
||||
-w | --wheel)
|
||||
WHEEL_PACKAGE=true ; shift ;;
|
||||
--) shift; break;; # end delimiter
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars "$API_NAME" "$TARGET" "$BUILD_TYPE" "$SHARED_LIBS" "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build() {
|
||||
echo "Building $PROJ_NAME"
|
||||
if [ "$DISTRO_ID" = centos-7 ]; then
|
||||
echo "Skip make and uploading packages for Omniperf on Centos7 distro, due to python dependency"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
|
||||
print_lib_type $SHARED_LIBS
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCHECK_PYTHON_DEPS=NO \
|
||||
-DPYTHON_DEPS=${BUILD_DIR}/python-libs \
|
||||
-DMOD_INSTALL_PATH=${BUILD_DIR}/modulefiles \
|
||||
"$OMNIPERF_ROOT"
|
||||
fi
|
||||
|
||||
make $MAKE_OPTS
|
||||
make $MAKE_OPTS install
|
||||
make $MAKE_OPTS package
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
echo "Creating Omniperf wheel package"
|
||||
|
||||
# Copy the setup.py generator to build folder
|
||||
mkdir -p "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/generate_setup_py.py "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/repackage_wheel.sh "$ROCM_WHEEL_DIR"
|
||||
cd "$ROCM_WHEEL_DIR" || exit
|
||||
|
||||
# Currently only supports python3.6
|
||||
./repackage_wheel.sh "$BUILD_DIR"/*.rpm python3.6
|
||||
|
||||
# Copy the wheel created to RPM folder which will be uploaded to artifactory
|
||||
copy_if WHL "WHL" "$PACKAGE_RPM" "$ROCM_WHEEL_DIR"/dist/*.whl
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo "${PACKAGE_DEB}";;
|
||||
("rpm")
|
||||
echo "${PACKAGE_RPM}";;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
(clean) clean ;;
|
||||
(build) build ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
191
tools/rocm-build/build_omnitrace.sh
Executable file
191
tools/rocm-build/build_omnitrace.sh
Executable file
@@ -0,0 +1,191 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -w, --wheel Creates python wheel package of omnitrace.
|
||||
It needs to be used along with -r option"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="omnitrace"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="-j 8"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
ASAN=0
|
||||
|
||||
#parse the arguments
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,address_sanitizer,static,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage
|
||||
exit 0
|
||||
;;
|
||||
-c | --clean)
|
||||
TARGET="clean"
|
||||
((CLEAN_OR_OUT |= 1))
|
||||
shift
|
||||
;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="RelWithDebInfo"
|
||||
shift
|
||||
;;
|
||||
-a | --address_sanitizer)
|
||||
ack_and_ignore_asan
|
||||
|
||||
ASAN=1
|
||||
shift
|
||||
;;
|
||||
-s | --static)
|
||||
SHARED_LIBS="OFF"
|
||||
shift
|
||||
;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"
|
||||
PKGTYPE=$2
|
||||
((CLEAN_OR_OUT |= 2))
|
||||
shift 2
|
||||
;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2"
|
||||
shift 2
|
||||
;;
|
||||
-w | --wheel)
|
||||
echo "omnitrace: wheel build option accepted and ignored"
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] " >&2
|
||||
exit 20
|
||||
;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build_omnitrace() {
|
||||
echo "Building $PROJ_NAME"
|
||||
if [ "$DISTRO_ID" = "mariner-2.0" ] || [ "$DISTRO_ID" = "ubuntu-24.04" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ]; then
|
||||
echo "Skip make and uploading packages for Omnitrace on \"${DISTRO_ID}\" distro"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Skip make and uploading packages for Omnitrace on ASAN build"
|
||||
exit 0
|
||||
fi
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
echo "Created build directory: $BUILD_DIR"
|
||||
fi
|
||||
|
||||
echo "Build directory: $BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
print_lib_type $SHARED_LIBS
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Address Sanitizer path"
|
||||
|
||||
else
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DOMNITRACE_BUILD_{LIBUNWIND,DYNINST}=ON \
|
||||
-DDYNINST_BUILD_{TBB,BOOST,ELFUTILS,LIBIBERTY}=ON \
|
||||
"$OMNITRACE_ROOT"
|
||||
fi
|
||||
|
||||
|
||||
popd || exit
|
||||
|
||||
echo "Make Options: $MAKE_OPTS"
|
||||
cmake --build "$BUILD_DIR" --target all -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target install -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target package -- $MAKE_OPTS
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
"deb")
|
||||
echo "${PACKAGE_DEB}"
|
||||
;;
|
||||
"rpm")
|
||||
echo "${PACKAGE_RPM}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
clean) clean ;;
|
||||
build) build_omnitrace ;;
|
||||
outdir) print_output_directory ;;
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
141
tools/rocm-build/build_opencl_icd_loader.sh
Executable file
141
tools/rocm-build/build_opencl_icd_loader.sh
Executable file
@@ -0,0 +1,141 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
PROJ_NAME=OpenCL-ICD-Loader
|
||||
TARGET="build"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
BUILD_TYPE="Debug"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_DEB="$PACKAGE_ROOT/deb/${PROJ_NAME,,}"
|
||||
PACKAGE_RPM="$PACKAGE_ROOT/rpm/${PROJ_NAME,,}"
|
||||
CLEAN_OR_OUT=0;
|
||||
PKGTYPE="deb"
|
||||
MAKETARGET="deb"
|
||||
API_NAME="rocm-opencl-icd-loader"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -o, --outdir Print path of output directory containing packages"
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
return 0
|
||||
}
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $TARGET $BUILD_TYPE $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean_opencl_icd_loader() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME,,}"
|
||||
}
|
||||
|
||||
copy_pkg_files_to_rocm() {
|
||||
local comp_folder=$1
|
||||
local comp_pkg_name=$2
|
||||
|
||||
cd "${OUT_DIR}/${PKGTYPE}/${comp_folder}"|| exit 2
|
||||
if [ "${PKGTYPE}" = 'deb' ]; then
|
||||
dpkg-deb -x ${comp_pkg_name}_*.deb pkg/
|
||||
else
|
||||
mkdir pkg && pushd pkg/ || exit 2
|
||||
if [[ "${comp_pkg_name}" != *-dev* ]]; then
|
||||
rpm2cpio ../${comp_pkg_name}-*.rpm | cpio -idmv
|
||||
else
|
||||
rpm2cpio ../${comp_pkg_name}el-*.rpm | cpio -idmv
|
||||
fi
|
||||
popd || exit 2
|
||||
fi
|
||||
ls ./pkg -alt
|
||||
cp -r ./pkg/*/rocm*/* "${ROCM_PATH}" || exit 2
|
||||
rm -rf pkg/
|
||||
}
|
||||
|
||||
build_opencl_icd_loader() {
|
||||
echo "Downloading $PROJ_NAME" package
|
||||
if [ "$DISTRO_NAME" = ubuntu ]; then
|
||||
mkdir -p "$PACKAGE_DEB"
|
||||
local rocm_ver=${ROCM_VERSION}
|
||||
if [ ${ROCM_VERSION##*.} = 0 ]; then
|
||||
rocm_ver=${ROCM_VERSION%.*}
|
||||
fi
|
||||
local url="https://repo.radeon.com/rocm/apt/${rocm_ver}/pool/main/r/${API_NAME}/"
|
||||
local package
|
||||
package=$(curl -s "$url" | grep -Po 'href="\K[^"]*' | grep "${DISTRO_RELEASE}" | head -n 1)
|
||||
|
||||
if [ -z "$package" ]; then
|
||||
echo "No package found for Ubuntu version $DISTRO_RELEASE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
wget -t3 -P "$PACKAGE_DEB" "${url}${package}"
|
||||
copy_pkg_files_to_rocm ${PROJ_NAME,,} ${API_NAME}
|
||||
else
|
||||
echo "$DISTRO_ID is not supported..."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Installing $PROJ_NAME" package
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo ${PACKAGE_DEB};;
|
||||
("rpm")
|
||||
echo ${PACKAGE_RPM};;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
VALID_STR=`getopt -o hcraswlo:p: --long help,clean,release,outdir:,package: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
(-c | --clean )
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
(-r | --release )
|
||||
BUILD_TYPE="RelWithDebInfo" ; shift ;;
|
||||
(-h | --help )
|
||||
printUsage ; exit 0 ;;
|
||||
(-a | --address_sanitizer)
|
||||
ack_and_ignore_asan ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
MAKETARGET="$2" ; shift 2;;
|
||||
(-s | --static)
|
||||
echo "-s parameter accepted but ignored" ; shift ;;
|
||||
--) shift; break;;
|
||||
(*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
done
|
||||
|
||||
case $TARGET in
|
||||
(clean) clean_opencl_icd_loader ;;
|
||||
(build) build_opencl_icd_loader ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -32,6 +32,7 @@ ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
|
||||
ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
|
||||
ROCM_CMAKE_PACKAGE_DEB="$(getPackageRoot)/deb/rocm-cmake"
|
||||
ROCM_CMAKE_PACKAGE_RPM="$(getPackageRoot)/rpm/rocm-cmake"
|
||||
ROCM_WHEEL_DIR="${ROCM_CMAKE_BUILD_DIR}/_wheel"
|
||||
ROCM_CMAKE_BUILD_TYPE="debug"
|
||||
BUILD_TYPE="Debug"
|
||||
SHARED_LIBS="ON"
|
||||
@@ -55,6 +56,8 @@ do
|
||||
ack_and_ignore_asan ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
(-w | --wheel)
|
||||
WHEEL_PACKAGE=true ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
@@ -75,6 +78,7 @@ fi
|
||||
|
||||
|
||||
clean_rocm_cmake() {
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf $ROCM_CMAKE_BUILD_DIR
|
||||
rm -rf $ROCM_CMAKE_PACKAGE_DEB
|
||||
rm -rf $ROCM_CMAKE_PACKAGE_RPM
|
||||
@@ -102,6 +106,19 @@ build_rocm_cmake() {
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$ROCM_CMAKE_PACKAGE_RPM" $ROCM_CMAKE_BUILD_DIR/rocm-cmake*.rpm
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
echo "Creating rocm-cmake wheel package"
|
||||
# Copy the setup.py generator to build folder
|
||||
mkdir -p $ROCM_WHEEL_DIR
|
||||
cp -f $SCRIPT_ROOT/generate_setup_py.py $ROCM_WHEEL_DIR
|
||||
cp -f $SCRIPT_ROOT/repackage_wheel.sh $ROCM_WHEEL_DIR
|
||||
cd $ROCM_WHEEL_DIR
|
||||
# Currently only supports python3.6
|
||||
./repackage_wheel.sh $ROCM_CMAKE_BUILD_DIR/rocm-cmake*.rpm python3.6
|
||||
# Copy the wheel created to RPM folder which will be uploaded to artifactory
|
||||
copy_if WHL "WHL" "$ROCM_CMAKE_PACKAGE_RPM" "$ROCM_WHEEL_DIR"/dist/*.whl
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
@@ -121,4 +138,9 @@ case $TARGET in
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
|
||||
@@ -7,6 +7,7 @@ bison
|
||||
bridge-utils
|
||||
build-essential
|
||||
bzip2
|
||||
ccache
|
||||
check
|
||||
chrpath
|
||||
cifs-utils
|
||||
@@ -120,9 +121,11 @@ python3-yaml
|
||||
python3.8-dev
|
||||
re2c
|
||||
redis-tools
|
||||
# Eventually we should be able to remove rpm for debian builds.
|
||||
rpm
|
||||
rsync
|
||||
ssh
|
||||
# This makes life more pleasent inside the container
|
||||
strace
|
||||
sudo
|
||||
systemtap-sdt-dev
|
||||
|
||||
285
tools/rocm-build/docker/ubuntu22/install-prerequisities.sh
Normal file
285
tools/rocm-build/docker/ubuntu22/install-prerequisities.sh
Normal file
@@ -0,0 +1,285 @@
|
||||
#! /usr/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
apt-get -y update
|
||||
DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install --no-install-recommends -y $(sed 's/#.*//' /tmp/packages)
|
||||
apt-get clean
|
||||
rm -rf /var/cache/apt/ /var/lib/apt/lists/* /etc/apt/apt.conf.d/01proxy
|
||||
|
||||
#Install 2.17.1 version of git as we are seeing issues with 2.25 , where it was not allowing to add git submodules if the user is different for parent git directory
|
||||
curl -o git.tar.gz https://cdn.kernel.org/pub/software/scm/git/git-2.17.1.tar.gz
|
||||
tar -zxf git.tar.gz
|
||||
cd git-*
|
||||
make prefix=/usr/local all
|
||||
make prefix=/usr/local install
|
||||
git --version
|
||||
|
||||
#install argparse and CppHeaderParser python modules for roctracer and rocprofiler
|
||||
#install rocm-docs-core for the docs-as-code project. Only needed on one OS
|
||||
# CppHeader needs setuptools. setuptools needs wheel.
|
||||
# Looks like I need them as seperate commands
|
||||
# Sigh, install both python2 and python 3 version
|
||||
pip3 install --no-cache-dir setuptools wheel tox
|
||||
pip3 install --no-cache-dir CppHeaderParser argparse requests lxml barectf recommonmark jinja2==3.0.0 websockets matplotlib numpy scipy minimal msgpack pytest sphinx joblib PyYAML rocm-docs-core cmake==3.25.2 pandas myst-parser
|
||||
|
||||
# Allow sudo for everyone user
|
||||
echo 'ALL ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/everyone
|
||||
|
||||
# Install OCaml packages to build LLVM's OCaml bindings to be used in lightning compiler test pipeline
|
||||
wget -nv https://sourceforge.net/projects/opam.mirror/files/2.1.4/opam-2.1.4-x86_64-linux -O /usr/local/bin/opam
|
||||
chmod +x /usr/local/bin/opam
|
||||
opam init --yes --disable-sandboxing
|
||||
opam install ctypes --yes
|
||||
|
||||
# Install and modify git-repo (#!/usr/bin/env python -> #!/usr/bin/env python3)
|
||||
curl https://storage.googleapis.com/git-repo-downloads/repo > /usr/bin/repo
|
||||
chmod a+x /usr/bin/repo
|
||||
|
||||
# Build ccache from the source
|
||||
cd /tmp
|
||||
git clone https://github.com/ccache/ccache -b v4.7.5
|
||||
cd ccache
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make
|
||||
make install
|
||||
cd /tmp
|
||||
rm -rf ccache
|
||||
|
||||
# Install sharp from MLNX_OFED_LINUX as dependency for rccl-rdma-sharp-plugins
|
||||
cd /var/tmp
|
||||
mkdir mlnx
|
||||
wget -O mlnx/tar.tgz https://content.mellanox.com/ofed/MLNX_OFED-24.01-0.3.3.1/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.tgz
|
||||
tar -xz -C mlnx -f mlnx/tar.tgz
|
||||
apt-key add mlnx/*/RPM-GPG-KEY-Mellanox
|
||||
echo "deb [arch=amd64] file:$(echo $PWD/mlnx/*/DEBS) ./" > /etc/apt/sources.list.d/sharp.list
|
||||
apt update
|
||||
apt install -y sharp
|
||||
apt clean
|
||||
rm -rf /var/cache/apt/ /var/lib/apt/lists/* mlnx /etc/apt/sources.list.d/sharp.list
|
||||
|
||||
apt update
|
||||
apt -y install libunwind-dev
|
||||
apt -y install libgoogle-glog-dev
|
||||
|
||||
# Install python3.8 from source
|
||||
curl -LO https://www.python.org/ftp/python/3.8.13/Python-3.8.13.tar.xz
|
||||
tar -xvf Python-3.8.13.tar.xz
|
||||
pwd
|
||||
ls /var/tmp/
|
||||
ls Python-3.8.13
|
||||
mv Python-3.8.13 /opt/
|
||||
apt install build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libsqlite3-dev libreadline-dev libffi-dev curl libbz2-dev pkg-config make -y
|
||||
cd /opt/Python-3.8.13/
|
||||
./configure --enable-optimizations --enable-shared
|
||||
make
|
||||
make -j 6
|
||||
make altinstall
|
||||
ldconfig /opt/Python3.8.13
|
||||
python3.8 --version
|
||||
|
||||
# roctracer and rocprofiler needs this python3.8
|
||||
python3.8 -m pip install setuptools wheel
|
||||
python3.8 -m pip install CppHeaderParser argparse requests lxml PyYAML joblib
|
||||
|
||||
#Install older version of hwloc-devel package for rocrtst
|
||||
curl -lO https://download.open-mpi.org/release/hwloc/v1.11/hwloc-1.11.13.tar.bz2
|
||||
tar -xvf hwloc-1.11.13.tar.bz2
|
||||
cd hwloc-1.11.13
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
cp /usr/local/lib/libhwloc.so.5 /usr/lib
|
||||
hwloc-info --version
|
||||
|
||||
# Install gtest
|
||||
mkdir -p /tmp/gtest
|
||||
cd /tmp/gtest
|
||||
wget https://github.com/google/googletest/archive/refs/tags/v1.14.0.zip -O googletest.zip
|
||||
unzip googletest.zip
|
||||
cd googletest-1.14.0/
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$(nproc)
|
||||
make install
|
||||
rm -rf /tmp/gtest
|
||||
|
||||
## Install gRPC from source
|
||||
## RDC Pre-requisites
|
||||
GRPC_ARCHIVE=grpc-1.61.0.tar.gz
|
||||
mkdir /tmp/grpc
|
||||
mkdir /usr/grpc
|
||||
cd /tmp
|
||||
git clone --recurse-submodules -b v1.61.0 https://github.com/grpc/grpc
|
||||
cd grpc
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake -DgRPC_INSTALL=ON -DBUILD_SHARED_LIBS=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX=/usr/grpc -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=14 -DCMAKE_SHARED_LINKER_FLAGS_INIT=-Wl,--enable-new-dtags,--build-id=sha1,--rpath,'$ORIGIN' ..
|
||||
make -j $(nproc) install
|
||||
rm -rf /tmp/grpc
|
||||
|
||||
## rocBLAS Pre-requisites
|
||||
## Download prebuilt AMD multithreaded blis (2.0)
|
||||
## Reference : https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/install.sh#L403
|
||||
mkdir -p /tmp/blis
|
||||
cd /tmp/blis
|
||||
wget -O - https://github.com/amd/blis/releases/download/2.0/aocl-blis-mt-ubuntu-2.0.tar.gz | tar xfz -
|
||||
mv amd-blis-mt /usr/blis
|
||||
cd /
|
||||
rm -rf /tmp/blis
|
||||
|
||||
## rocBLAS Pre-requisites(SWDEV-404612)
|
||||
## Download aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
mkdir -p /tmp/aocl
|
||||
cd /tmp/aocl
|
||||
wget -nv https://download.amd.com/developer/eula/aocl/aocl-4-2/aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
apt install ./aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
rm -rf /tmp/aocl
|
||||
|
||||
## hipBLAS Pre-requisites
|
||||
## lapack(3.9.1v)
|
||||
## Reference https://github.com/ROCmSoftwarePlatform/rocSOLVER/blob/develop/install.sh#L174
|
||||
lapack_version=3.9.1
|
||||
lapack_srcdir=lapack-$lapack_version
|
||||
lapack_blddir=lapack-$lapack_version-bld
|
||||
mkdir -p /tmp/lapack
|
||||
cd /tmp/lapack
|
||||
rm -rf "$lapack_srcdir" "$lapack_blddir"
|
||||
wget -O - https://github.com/Reference-LAPACK/lapack/archive/refs/tags/v3.9.1.tar.gz | tar xzf -
|
||||
cmake -H$lapack_srcdir -B$lapack_blddir -DCMAKE_BUILD_TYPE=Release -DCMAKE_Fortran_FLAGS=-fno-optimize-sibling-calls -DBUILD_TESTING=OFF -DCBLAS=ON -DLAPACKE=OFF
|
||||
make -j$(nproc) -C "$lapack_blddir"
|
||||
make -C "$lapack_blddir" install
|
||||
cd $lapack_blddir
|
||||
cp -r ./include/* /usr/local/include/
|
||||
cp -r ./lib/* /usr/local/lib
|
||||
cd /
|
||||
rm -rf /tmp/lapack
|
||||
|
||||
## rocSOLVER Pre-requisites
|
||||
## FMT(7.1.3v)
|
||||
## Reference https://github.com/ROCmSoftwarePlatform/rocSOLVER/blob/develop/install.sh#L152
|
||||
fmt_version=7.1.3
|
||||
fmt_srcdir=fmt-$fmt_version
|
||||
fmt_blddir=fmt-$fmt_version-bld
|
||||
mkdir -p /tmp/fmt
|
||||
cd /tmp/fmt
|
||||
rm -rf "$fmt_srcdir" "$fmt_blddir"
|
||||
wget -O - https://github.com/fmtlib/fmt/archive/refs/tags/7.1.3.tar.gz | tar xzf -
|
||||
cmake -H$fmt_srcdir -B$fmt_blddir -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_CXX_EXTENSIONS=OFF -DCMAKE_CXX_STANDARD_REQUIRED=ON -DFMT_DOC=OFF -DFMT_TEST=OFF
|
||||
make -j$(nproc) -C "$fmt_blddir"
|
||||
make -C "$fmt_blddir" install
|
||||
|
||||
# Build and install libjpeg-turbo
|
||||
mkdir -p /tmp/libjpeg-turbo
|
||||
cd /tmp/libjpeg-turbo
|
||||
wget -nv https://github.com/rrawther/libjpeg-turbo/archive/refs/heads/2.0.6.2.zip -O libjpeg-turbo-2.0.6.2.zip
|
||||
unzip libjpeg-turbo-2.0.6.2.zip
|
||||
cd libjpeg-turbo-2.0.6.2
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=RELEASE -DENABLE_STATIC=FALSE -DCMAKE_INSTALL_DEFAULT_LIBDIR=lib ..
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/libjpeg-turbo
|
||||
|
||||
# Get released ninja from source
|
||||
mkdir -p /tmp/ninja
|
||||
cd /tmp/ninja
|
||||
wget -nv https://codeload.github.com/Kitware/ninja/zip/refs/tags/v1.11.1.g95dee.kitware.jobserver-1 -O ninja.zip
|
||||
unzip ninja.zip
|
||||
cd ninja-1.11.1.g95dee.kitware.jobserver-1
|
||||
./configure.py --bootstrap
|
||||
cp ninja /usr/local/bin/
|
||||
rm -rf /tmp/ninja
|
||||
|
||||
# Install FFmpeg and dependencies
|
||||
# Build NASM
|
||||
mkdir -p /tmp/nasm-2.15.05
|
||||
cd /tmp
|
||||
wget -qO- "https://distfiles.macports.org/nasm/nasm-2.15.05.tar.bz2" | tar -xvj
|
||||
cd nasm-2.15.05
|
||||
./autogen.sh
|
||||
./configure --prefix="/usr/local"
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/nasm-2.15.05
|
||||
|
||||
# Build YASM
|
||||
mkdir -p /tmp/yasm-1.3.0
|
||||
cd /tmp
|
||||
wget -qO- "http://www.tortall.net/projects/yasm/releases/yasm-1.3.0.tar.gz" | tar -xvz
|
||||
cd yasm-1.3.0
|
||||
./configure --prefix="/usr/local"
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/yasm-1.3.0
|
||||
|
||||
# Build x264
|
||||
mkdir -p /tmp/x264-snapshot-20191217-2245-stable
|
||||
cd /tmp
|
||||
wget -qO- "https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-20191217-2245-stable.tar.bz2" | tar -xvj
|
||||
cd /tmp/x264-snapshot-20191217-2245-stable
|
||||
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig" ./configure --prefix="/usr/local" --enable-shared
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/x264-snapshot-20191217-2245-stable
|
||||
|
||||
# Build x265
|
||||
mkdir -p /tmp/x265_2.7
|
||||
cd /tmp
|
||||
wget -qO- "https://get.videolan.org/x265/x265_2.7.tar.gz" | tar -xvz
|
||||
cd /tmp/x265_2.7/build/linux
|
||||
cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="/usr/local" -DENABLE_SHARED:bool=on ../../source
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/x265_2.7
|
||||
|
||||
# Build fdk-aac
|
||||
mkdir -p /tmp/fdk-aac-2.0.2
|
||||
cd /tmp
|
||||
wget -qO- "https://sourceforge.net/projects/opencore-amr/files/fdk-aac/fdk-aac-2.0.2.tar.gz" | tar -xvz
|
||||
cd /tmp/fdk-aac-2.0.2
|
||||
autoreconf -fiv
|
||||
./configure --prefix="/usr/local" --enable-shared --disable-static
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/fdk-aac-2.0.2
|
||||
|
||||
# Build FFmpeg
|
||||
cd /tmp
|
||||
git clone -b release/4.4 https://git.ffmpeg.org/ffmpeg.git ffmpeg
|
||||
cd ffmpeg
|
||||
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig"
|
||||
./configure --prefix="/usr/local" --extra-cflags="-I/usr/local/include" --extra-ldflags="-L/usr/local/lib" --extra-libs=-lpthread --extra-libs=-lm --enable-shared --disable-static --enable-libx264 --enable-libx265 --enable-libfdk-aac --enable-gpl --enable-nonfree
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/ffmpeg
|
||||
|
||||
cp /tmp/local-pin-600 /etc/apt/preferences.d
|
||||
|
||||
command -v lbzip2
|
||||
ln -sf $(command -v lbzip2) /usr/local/bin/compressor || ln -sf $(command -v bzip2) /usr/local/bin/compressor
|
||||
|
||||
# Install Google Benchmark
|
||||
mkdir -p /tmp/Gbenchmark
|
||||
cd /tmp/Gbenchmark
|
||||
wget -qO- https://github.com/google/benchmark/archive/refs/tags/v1.6.1.tar.gz | tar xz
|
||||
cmake -Sbenchmark-1.6.1 -Bbuild -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DBENCHMARK_ENABLE_TESTING=OFF -DCMAKE_CXX_STANDARD=14
|
||||
make -j -C build
|
||||
cd /tmp/Gbenchmark/build
|
||||
make install
|
||||
|
||||
# Build boost-1.85.0 from source for RPP
|
||||
# Installing in a non-standard location since the test packages of hipFFT and rocFFT pick up the version of
|
||||
# the installed Boost library and declare a package dependency on that specific version of Boost.
|
||||
# For example, if this was installed in the standard location it would declare a dependency on libboost-dev(el)1.85.0
|
||||
# which is not available as a package in any distro.
|
||||
# Once this is fixed, we can remove the Boost package from the requirements list and install this
|
||||
# in the standard location
|
||||
mkdir -p /tmp/boost-1.85.0
|
||||
cd /tmp/boost-1.85.0
|
||||
wget -nv https://sourceforge.net/projects/boost/files/boost/1.85.0/boost_1_85_0.tar.bz2 -O ./boost_1_85_0.tar.bz2
|
||||
tar -xf boost_1_85_0.tar.bz2 --use-compress-program="/usr/local/bin/compressor"
|
||||
cd boost_1_85_0
|
||||
./bootstrap.sh --prefix=${RPP_DEPS_LOCATION} --with-python=python3
|
||||
./b2 stage -j$(nproc) threading=multi link=shared cxxflags="-std=c++11"
|
||||
./b2 install threading=multi link=shared --with-system --with-filesystem
|
||||
./b2 stage -j$(nproc) threading=multi link=static cxxflags="-std=c++11 -fpic" cflags="-fpic"
|
||||
./b2 install threading=multi link=static --with-system --with-filesystem
|
||||
rm -rf /tmp/boost-1.85.0
|
||||
@@ -7,6 +7,7 @@ bison
|
||||
bridge-utils
|
||||
build-essential
|
||||
bzip2
|
||||
ccache
|
||||
check
|
||||
chrpath
|
||||
cifs-utils
|
||||
|
||||
Reference in New Issue
Block a user