Compare commits
197 Commits
docs/6.1.1
...
env_variab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
848159f6c3 | ||
|
|
ddf810a781 | ||
|
|
a7cc71df62 | ||
|
|
5c4674027b | ||
|
|
f111d5654c | ||
|
|
7f43dbbbb7 | ||
|
|
526db1c474 | ||
|
|
d612ae390c | ||
|
|
cca2dc23c0 | ||
|
|
0a88853ca3 | ||
|
|
4164cdc606 | ||
|
|
712e63d0ad | ||
|
|
f2adaebbcd | ||
|
|
3f74a73220 | ||
|
|
84e3063e0a | ||
|
|
06e8d93bf9 | ||
|
|
55ee1d1b95 | ||
|
|
97619286df | ||
|
|
f0a0d4e738 | ||
|
|
517b8645b4 | ||
|
|
367d6cdf5e | ||
|
|
4db7ffeb69 | ||
|
|
afb343fce6 | ||
|
|
fcc99a324a | ||
|
|
6367a53775 | ||
|
|
e19b947c26 | ||
|
|
f181f84b97 | ||
|
|
1f2d583372 | ||
|
|
f8d46afdd2 | ||
|
|
9068df3bb7 | ||
|
|
9adeb56ebd | ||
|
|
37775f2ff4 | ||
|
|
9e7a8a93cd | ||
|
|
8e90fdbc4a | ||
|
|
325a2fd54c | ||
|
|
a552f9f6b8 | ||
|
|
accb1347ea | ||
|
|
699b604f00 | ||
|
|
ce08245f4c | ||
|
|
5c9d071e85 | ||
|
|
356ad4ab47 | ||
|
|
57d59bfcc6 | ||
|
|
791285772d | ||
|
|
217830fe25 | ||
|
|
f07608bc92 | ||
|
|
1435634f5c | ||
|
|
ee384ba0e0 | ||
|
|
bb0090882c | ||
|
|
22e9f6f373 | ||
|
|
d994302df7 | ||
|
|
9d4eb5eff2 | ||
|
|
8b95ab0a02 | ||
|
|
e74245fbe4 | ||
|
|
778c8e2c05 | ||
|
|
361983fa48 | ||
|
|
3dff636d40 | ||
|
|
1d976a1871 | ||
|
|
ebfec1b7c1 | ||
|
|
66b71ba3c8 | ||
|
|
e903ffa952 | ||
|
|
fe1c2e9529 | ||
|
|
923141f300 | ||
|
|
c91e15a580 | ||
|
|
d24b3fab61 | ||
|
|
e864aa50ac | ||
|
|
2531f0aa03 | ||
|
|
13e14363cc | ||
|
|
664c047311 | ||
|
|
78fdcdf48d | ||
|
|
c4181b9245 | ||
|
|
7a13a6ee86 | ||
|
|
ace708935d | ||
|
|
cff1b2b021 | ||
|
|
d7eacf56e3 | ||
|
|
bddbc6b444 | ||
|
|
67f04977fb | ||
|
|
f500c32989 | ||
|
|
3c1d39f251 | ||
|
|
93f524586b | ||
|
|
b36de1d3d4 | ||
|
|
627d38412a | ||
|
|
1be99075e2 | ||
|
|
05d7992361 | ||
|
|
98f2e183a2 | ||
|
|
ab1c62464a | ||
|
|
2e73c56275 | ||
|
|
f8151b6cb5 | ||
|
|
52bccc1819 | ||
|
|
2b492056ec | ||
|
|
b12e5c32ca | ||
|
|
8db9220935 | ||
|
|
30851e9c85 | ||
|
|
fdd0ed080b | ||
|
|
d3f634ea33 | ||
|
|
6c73abbaea | ||
|
|
c49877adc9 | ||
|
|
49404d69f8 | ||
|
|
d17e602769 | ||
|
|
2fdbc8b475 | ||
|
|
7d3fb25725 | ||
|
|
8c3eaa1fda | ||
|
|
acca214a29 | ||
|
|
b7c6671e06 | ||
|
|
27bd772bbe | ||
|
|
68c45d30b5 | ||
|
|
35835c4289 | ||
|
|
73b7b02c4f | ||
|
|
ba7afa9808 | ||
|
|
ae6eac2823 | ||
|
|
6eb6a5bd90 | ||
|
|
55bb127e9a | ||
|
|
e65e9307f5 | ||
|
|
6494885359 | ||
|
|
266f502010 | ||
|
|
bf08674992 | ||
|
|
8826b10b92 | ||
|
|
17f12a11e7 | ||
|
|
b2f0f0acdf | ||
|
|
a11c0512e1 | ||
|
|
eec71da8dd | ||
|
|
39891fe185 | ||
|
|
14ee171649 | ||
|
|
a96ec80cb0 | ||
|
|
e7bff21d3e | ||
|
|
57506ba947 | ||
|
|
4b67c8725b | ||
|
|
6abe5b50a2 | ||
|
|
df864f8f79 | ||
|
|
7290ce9030 | ||
|
|
d6d18d7cd4 | ||
|
|
258e504595 | ||
|
|
156215efcc | ||
|
|
7c448eec8f | ||
|
|
30f10e0145 | ||
|
|
1e55e01af3 | ||
|
|
9a347aa168 | ||
|
|
29f9b4ab23 | ||
|
|
6e99bef8f4 | ||
|
|
fed33835a0 | ||
|
|
f52bc2bc68 | ||
|
|
205790159d | ||
|
|
5025a03f79 | ||
|
|
9679a84a8b | ||
|
|
d34f7d7777 | ||
|
|
16fca72626 | ||
|
|
1a6ce7f6e0 | ||
|
|
35c17fcce5 | ||
|
|
bf19dd1dc8 | ||
|
|
5fec2e1ca4 | ||
|
|
527840e502 | ||
|
|
a65db6b47d | ||
|
|
1975889da1 | ||
|
|
b69b997d69 | ||
|
|
b9c4490f96 | ||
|
|
7fcb0f19a9 | ||
|
|
625c18371c | ||
|
|
52f8a0ad36 | ||
|
|
9dd6e42122 | ||
|
|
9d27863954 | ||
|
|
04561cc60f | ||
|
|
14a3e80a1b | ||
|
|
ad9cdaa2a9 | ||
|
|
32334fd826 | ||
|
|
61d18252ab | ||
|
|
2d8eba0404 | ||
|
|
cfaa056ae0 | ||
|
|
6a5defb825 | ||
|
|
6864f1546e | ||
|
|
58f543c010 | ||
|
|
7504e6bc13 | ||
|
|
7e1a1bc7c2 | ||
|
|
a2574adc73 | ||
|
|
7207d815d1 | ||
|
|
5930282993 | ||
|
|
e63ff81549 | ||
|
|
cd575e2926 | ||
|
|
3a68f43df7 | ||
|
|
a8c7faeae3 | ||
|
|
892c0957b8 | ||
|
|
82ed9e9ffd | ||
|
|
32592f436b | ||
|
|
cd5c6768d7 | ||
|
|
97129c0972 | ||
|
|
885ad0da42 | ||
|
|
80d7feeebc | ||
|
|
518a2069b3 | ||
|
|
2160ee6556 | ||
|
|
657a27758a | ||
|
|
0ba6bb43ef | ||
|
|
cf53fda864 | ||
|
|
aac6898385 | ||
|
|
d86c23a847 | ||
|
|
06c960aa97 | ||
|
|
3edc3e9759 | ||
|
|
41da494ef0 | ||
|
|
c0fbd1ca5b | ||
|
|
7f38465770 |
29
.azuredevops/ci-builds/aomp.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: release_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/aomp
|
||||
ref: aomp-dev
|
||||
- repository: llvm-project_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/llvm-project
|
||||
ref: amd-staging
|
||||
pipelines:
|
||||
- pipeline: rocr-runtime_pipeline
|
||||
source: \ROCR-Runtime
|
||||
trigger: true
|
||||
# this job will only be triggered after successful build sequence of llvm-project and ROCR-Runtime
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
|
||||
jobs:
|
||||
- template: ${{ variables.CI_COMPONENT_PATH }}/aomp.yml
|
||||
parameters:
|
||||
checkoutRepo: release_repo
|
||||
@@ -84,8 +84,8 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm/llvm;$(Agent.BuildDirectory)/rocm
|
||||
|
||||
@@ -9,20 +9,29 @@ parameters:
|
||||
type: object
|
||||
default:
|
||||
- software-properties-common
|
||||
- python3-pip
|
||||
- cmake
|
||||
- ninja-build
|
||||
- composablekernel-dev
|
||||
- half
|
||||
- rocrand
|
||||
- rocblas
|
||||
- libsqlite3-dev
|
||||
- libbz2-dev
|
||||
- nlohmann-json3-dev
|
||||
- libgtest-dev
|
||||
- libdrm-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocMLIR
|
||||
- rocRAND
|
||||
- rocBLAS
|
||||
- half
|
||||
- composable_kernel
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- rocprofiler-register
|
||||
- clr
|
||||
- rocminfo
|
||||
- roctracer
|
||||
|
||||
jobs:
|
||||
- job: MIOpen
|
||||
@@ -30,8 +39,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -70,9 +77,8 @@ jobs:
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DMIOPEN_BACKEND=HIP
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH="$(Agent.BuildDirectory)/rocm"
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DMIOPEN_ENABLE_AI_KERNEL_TUNING=OFF
|
||||
-DMIOPEN_ENABLE_AI_IMMED_MODE_FALLBACK=OFF
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
|
||||
@@ -14,10 +14,6 @@ parameters:
|
||||
- wget
|
||||
- unzip
|
||||
- pkg-config
|
||||
- half
|
||||
- rocblas-dev
|
||||
- miopen-hip-dev
|
||||
- migraphx-dev
|
||||
- protobuf-compiler
|
||||
- libprotoc-dev
|
||||
- ffmpeg
|
||||
@@ -25,10 +21,6 @@ parameters:
|
||||
- libavformat-dev
|
||||
- libavutil-dev
|
||||
- libswscale-dev
|
||||
- rpp
|
||||
- rpp-dev
|
||||
- rocdecode
|
||||
- rocdecode-dev
|
||||
- build-essential
|
||||
- libgtk2.0-dev
|
||||
- libavcodec-dev
|
||||
@@ -41,6 +33,7 @@ parameters:
|
||||
- libtiff-dev
|
||||
- libdc1394-dev
|
||||
- libgmp-dev
|
||||
- libopencv-dev
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
@@ -50,6 +43,21 @@ parameters:
|
||||
- google==3.0.0
|
||||
- protobuf==3.12.4
|
||||
- onnx==1.12.0
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- half
|
||||
- rocBLAS
|
||||
- MIOpen
|
||||
- AMDMIGraphX
|
||||
- rpp
|
||||
- rocDecode
|
||||
|
||||
jobs:
|
||||
- job: MIVisionX
|
||||
@@ -58,8 +66,6 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -71,11 +77,23 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DROCM_PATH=/opt/rocm
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DROCM_DEP_ROCMCORE=ON
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,6 +10,13 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- llvm-project
|
||||
- rocminfo
|
||||
- ROCR-Runtime
|
||||
|
||||
jobs:
|
||||
- job: ROCdbgapi
|
||||
@@ -18,8 +25,6 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -30,9 +35,22 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,22 +10,33 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- rocblas
|
||||
- libyaml-cpp-dev
|
||||
- libpci-dev
|
||||
- libpci3
|
||||
- googletest
|
||||
- git
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- llvm-project
|
||||
- rocBLAS
|
||||
- rocm-cmake
|
||||
- rocm_smi_lib
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- ROCR-Runtime
|
||||
- ROCT-Thunk-Interface
|
||||
|
||||
jobs:
|
||||
- job: ROCmValidationSuite
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -36,11 +47,23 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DROCM_PATH=/opt/rocm
|
||||
-DCMAKE_PREFIX_PATH=/opt/rocm
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX='$(Build.BinariesDirectory)'
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX=$(Build.BinariesDirectory)
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -5,6 +5,9 @@ parameters:
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: ''
|
||||
- name: offloadEnabled
|
||||
type: boolean
|
||||
default: false
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
@@ -94,18 +97,18 @@ jobs:
|
||||
cmakeBuildDir: $(Build.SourcesDirectory)/llvm-project/openmp/build
|
||||
installDir: $(Build.BinariesDirectory)/llvm
|
||||
# offload does not exist for recent releases, so use CI conditional
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- ${{ if eq(parameters.offloadEnabled, true) }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
componentName: llvm-offload
|
||||
extraBuildFlags: >-
|
||||
-DOPENMP_ENABLE_LIBOMPTARGET=1
|
||||
-DOPENMP_TEST_C_COMPILER==$(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
-DOPENMP_TEST_CXX_COMPILER==$(Agent.BuildDirectory)/rocm/llvm/bin/clang++
|
||||
-DCMAKE_C_COMPILER==$(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
-DCMAKE_CXX_COMPILER==$(Agent.BuildDirectory)/rocm/llvm/bin/clang++
|
||||
-DOPENMP_TEST_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
-DOPENMP_TEST_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang++
|
||||
-DLIBOMPTARGET_AMDGCN_GFXLIST=gfx700;gfx701;gfx801;gfx803;gfx900;gfx902;gfx906;gfx908;gfx90a;gfx90c;gfx940;gfx941;gfx942;gfx1030;gfx1031;gfx1035;gfx1036;gfx1100;gfx1101;gfx1102;gfx1103
|
||||
-DLLVM_DIR==$(Agent.BuildDirectory)/rocm/llvm
|
||||
-DLLVM_DIR=$(Agent.BuildDirectory)/rocm/llvm
|
||||
-DLLVM_MAIN_INCLUDE_DIR=$(Build.SourcesDirectory)/llvm-project/llvm/include
|
||||
-DLIBOMPTARGET_LLVM_INCLUDE_DIRS=$(Build.SourcesDirectory)/llvm-project/llvm/include
|
||||
-DCUDA_TOOLKIT_ROOT_DIR=OFF
|
||||
|
||||
@@ -11,15 +11,25 @@ parameters:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- git
|
||||
- python3-pip
|
||||
- libdrm-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
|
||||
jobs:
|
||||
- job: composable_kernel
|
||||
timeoutInMinutes: 210
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
pool: ${{ variables.ULTRA_BUILD_POOL }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -30,12 +40,25 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DGPU_TARGETS=gfx1030;gfx1100
|
||||
-DINSTANCES_ONLY=ON
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -8,8 +8,17 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- python3-pip
|
||||
- cmake
|
||||
- ninja-build
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
|
||||
jobs:
|
||||
- job: half
|
||||
@@ -18,8 +27,6 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -30,9 +37,22 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,25 +10,33 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- rocblas-dev
|
||||
- rocsparse
|
||||
- rocsolver-dev
|
||||
- gfortran
|
||||
- googletest
|
||||
- git
|
||||
- libgtest-dev
|
||||
- wget
|
||||
- python3-pip
|
||||
- libomp-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- rocBLAS
|
||||
- rocSPARSE
|
||||
- rocSOLVER
|
||||
- aomp
|
||||
|
||||
jobs:
|
||||
- job: hipBLAS
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: LD_LIBRARY_PATH
|
||||
value: '/lib:/usr/lib:/usr/local/lib'
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -51,20 +59,24 @@ jobs:
|
||||
targetType: inline
|
||||
script: sudo apt install --yes ./aocl-linux-aocc-4.1.0_1_amd64.deb
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- task: Bash@3
|
||||
displayName: 'ldconfig'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo ldconfig
|
||||
workingDirectory: '/usr/local/lib'
|
||||
- script: 'ls -1R /usr/local'
|
||||
displayName: 'Artifact listing'
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH=/opt/rocm
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DHIP_PLATFORM=amd
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
|
||||
@@ -8,25 +8,44 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- python3-venv
|
||||
- libmsgpack-dev
|
||||
- hipblas-dev
|
||||
- git
|
||||
- python3-pip
|
||||
- libdrm-dev
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
- joblib
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- hipBLAS
|
||||
|
||||
jobs:
|
||||
- job: hipBLASLt
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
- name: TENSILE_ROCM_ASSEMBLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
- name: CMAKE_CXX_COMPILER
|
||||
value: $(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
- name: TENSILE_ROCM_OFFLOAD_BUNDLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang-offload-bundler
|
||||
- name: TENSILE_ROCM_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
- name: PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin:$(Agent.BuildDirectory)/rocm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -34,21 +53,36 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- script: sudo ln -s $(Agent.BuildDirectory)/rocm /opt/rocm
|
||||
displayName: ROCm symbolic link
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DAMDGPU_TARGETS=gfx90a
|
||||
-DTensile_LOGIC=
|
||||
-DTensile_CPU_THREADS=
|
||||
-DTensile_CODE_OBJECT_VERSION=default
|
||||
-DTensile_LIBRARY_FORMAT=msgpack
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm"
|
||||
-DCMAKE_PREFIX_PATH="$(Agent.BuildDirectory)/rocm"
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
@@ -10,9 +10,17 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- rocprim
|
||||
- googletest
|
||||
- git
|
||||
- python3-pip
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- llvm-project
|
||||
- rocminfo
|
||||
- rocPRIM
|
||||
- ROCR-Runtime
|
||||
|
||||
jobs:
|
||||
- job: hipCUB
|
||||
@@ -20,8 +28,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -32,12 +38,24 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm"
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DBUILD_TEST=ON
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-GNinja
|
||||
|
||||
@@ -10,22 +10,32 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- rocrand
|
||||
- hiprand
|
||||
- rocfft
|
||||
- libboost-program-options-dev
|
||||
- googletest
|
||||
- libgtest-dev
|
||||
- libfftw3-dev
|
||||
|
||||
- python3-pip
|
||||
- libomp-14-dev
|
||||
# rocm dependencies should match dependencies-rocm.yml
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocRAND
|
||||
- hipRAND
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocFFT
|
||||
- aomp
|
||||
jobs:
|
||||
- job: hipFFT
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -36,16 +46,31 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake/hip
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DUSE_HIP_CLANG=ON
|
||||
-DHIP_COMPILER=clang
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
-DBUILD_CLIENTS_BENCH=OFF
|
||||
-DBUILD_CLIENTS_BENCHMARKS=OFF
|
||||
-DBUILD_CLIENTS_SAMPLES=OFF
|
||||
-L
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,15 +10,23 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- rocblas
|
||||
- rocsparse
|
||||
- hipsparse
|
||||
- rocsolver
|
||||
- libsuitesparse-dev
|
||||
- gfortran
|
||||
- git
|
||||
- googletest
|
||||
- libgtest-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- hipSPARSE
|
||||
- llvm-project
|
||||
- rocBLAS
|
||||
- rocm-cmake
|
||||
- rocminfo
|
||||
- ROCR-Runtime
|
||||
- rocSPARSE
|
||||
- rocSOLVER
|
||||
|
||||
jobs:
|
||||
- job: hipSOLVER
|
||||
@@ -27,8 +35,6 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -39,6 +45,18 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
# build external gtest and lapack
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
@@ -52,10 +70,10 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm;$(Pipeline.Workspace)/deps-install"
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Pipeline.Workspace)/deps-install
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
-DUSE_CUDA=OFF
|
||||
|
||||
@@ -13,10 +13,18 @@ parameters:
|
||||
- libboost-program-options-dev
|
||||
- googletest
|
||||
- libfftw3-dev
|
||||
- rocsparse
|
||||
- git
|
||||
- gfortran
|
||||
- libgtest-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- llvm-project
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- ROCR-Runtime
|
||||
- rocSPARSE
|
||||
|
||||
jobs:
|
||||
- job: hipSPARSE
|
||||
@@ -25,8 +33,6 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -37,15 +43,35 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm;/opt/rocm/share/rocm/cmake/"
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/share/rocm/cmake/
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
-DBUILD_CLIENTS_SAMPLES=OFF
|
||||
-DBUILD_CLIENTS_BENCHMARKS=OFF
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
parameters:
|
||||
artifactName: hipSPARSE
|
||||
publish: false
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml
|
||||
parameters:
|
||||
sourceDir: $(Build.SourcesDirectory)/build/clients
|
||||
contentsString: matrices/**
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
parameters:
|
||||
artifactName: testMatrices
|
||||
|
||||
@@ -8,25 +8,43 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- python3-venv
|
||||
- libmsgpack-dev
|
||||
- hipsparse-dev
|
||||
- git
|
||||
- python3-pip
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
- joblib
|
||||
# rocm dependencies should match dependencies-rocm.yml
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- hipSPARSE
|
||||
- rocBLAS
|
||||
|
||||
jobs:
|
||||
- job: hipSPARSELt
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
- name: TENSILE_ROCM_ASSEMBLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
- name: CMAKE_CXX_COMPILER
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/hipcc
|
||||
- name: TENSILE_ROCM_OFFLOAD_BUNDLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang-offload-bundler
|
||||
- name: PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin:$(Agent.BuildDirectory)/rocm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -34,21 +52,35 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DAMDGPU_TARGETS=all
|
||||
-DTensile_LOGIC=
|
||||
-DTensile_CPU_THREADS=
|
||||
-DTensile_CODE_OBJECT_VERSION=default
|
||||
-DTensile_LIBRARY_FORMAT=msgpack
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm"
|
||||
-DCMAKE_PREFIX_PATH="$(Agent.BuildDirectory)/rocm"
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,12 +10,17 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- composablekernel-dev
|
||||
- python3-pip
|
||||
- git
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- composable_kernel
|
||||
|
||||
jobs:
|
||||
- job: hipTensor
|
||||
@@ -23,8 +28,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -50,12 +53,11 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH="$(Agent.BuildDirectory)/rocm/llvm"
|
||||
-DROCM_PATH="$(Agent.BuildDirectory)/rocm"
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/llvm
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DHIPTENSOR_BUILD_TESTS=ON
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-GNinja
|
||||
-DAMDGPU_TARGETS=gfx90a
|
||||
multithreadFlag: -- -j32
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -14,6 +14,7 @@ parameters:
|
||||
- ninja-build
|
||||
- python-is-python3
|
||||
- zlib1g-dev
|
||||
- pkg-config
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
@@ -68,8 +69,6 @@ jobs:
|
||||
-DLIBCXXABI_INSTALL_STATIC_LIBRARY=OFF
|
||||
-DLLVM_BUILD_DOCS=OFF
|
||||
-DLLVM_ENABLE_SPHINX=OFF
|
||||
-DSPHINX_WARNINGS_AS_ERRORS=OFF
|
||||
-DSPHINX_OUTPUT_MAN=OFF
|
||||
-DLLVM_ENABLE_ASSERTIONS=OFF
|
||||
-DLLVM_ENABLE_Z3_SOLVER=OFF
|
||||
-DLLVM_ENABLE_ZLIB=ON
|
||||
@@ -80,7 +79,6 @@ jobs:
|
||||
-DPACKAGE_VENDOR=AMD
|
||||
-DCLANG_LINK_FLANG_LEGACY=ON
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
-DFLANG_INCLUDE_DOCS=OFF
|
||||
-DROCM_LLVM_BACKWARD_COMPAT_LINK=$(Build.BinariesDirectory)/llvm
|
||||
-DROCM_LLVM_BACKWARD_COMPAT_LINK_TARGET=./lib/llvm
|
||||
-GNinja
|
||||
|
||||
@@ -8,21 +8,37 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- python3-pip
|
||||
- cmake
|
||||
- libboost-program-options-dev
|
||||
- googletest
|
||||
- libfftw3-dev
|
||||
- git
|
||||
- ninja-build
|
||||
- libstdc++-12-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocm_smi_lib
|
||||
- rocprofiler-register
|
||||
- rocm-core
|
||||
- HIPIFY
|
||||
- aomp
|
||||
- aomp-extras
|
||||
|
||||
jobs:
|
||||
- job: rccl
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -33,14 +49,29 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- script: chmod +x $(Agent.BuildDirectory)/rocm/bin/hipify-perl
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DHALF_INCLUDE_DIR=$(Agent.BuildDirectory)/rocm/include
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DBUILD_TESTS=ON
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm;/opt/rocm/share/rocm/cmake/"
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/share/rocm/cmake/
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -8,6 +8,7 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- python3-pip
|
||||
- cmake
|
||||
- ninja-build
|
||||
- git
|
||||
@@ -17,6 +18,16 @@ parameters:
|
||||
- autoconf
|
||||
- libtool
|
||||
- pkg-config
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocm_smi_lib
|
||||
- amdsmi
|
||||
|
||||
jobs:
|
||||
- job: rdc
|
||||
@@ -24,8 +35,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -36,6 +45,18 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
# Build grpc
|
||||
- task: Bash@3
|
||||
displayName: 'git clone grpc'
|
||||
@@ -57,6 +78,7 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DGRPC_ROOT="$(Build.SourcesDirectory)/bin"
|
||||
-DBUILD_TESTS=ON
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
138
.azuredevops/components/rocAL.yml
Normal file
@@ -0,0 +1,138 @@
|
||||
parameters:
|
||||
- name: checkoutRepo
|
||||
type: string
|
||||
default: 'self'
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: ''
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- python3-pip
|
||||
- python3-protobuf
|
||||
- cmake
|
||||
- ninja-build
|
||||
- libprotobuf-dev
|
||||
- libprotoc-dev
|
||||
- protobuf-compiler
|
||||
- liblmdb-dev
|
||||
- pkg-config
|
||||
- ffmpeg
|
||||
- libavcodec-dev
|
||||
- libavformat-dev
|
||||
- libavutil-dev
|
||||
- libswscale-dev
|
||||
- libturbojpeg-dev
|
||||
- libjpeg-turbo-official=3.0.2-20240124
|
||||
- libopencv-dev
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
- numpy
|
||||
- opencv-python
|
||||
- torch
|
||||
- pillow
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocDecode
|
||||
- half
|
||||
- rpp
|
||||
- MIVisionX
|
||||
- aomp
|
||||
|
||||
jobs:
|
||||
- job: rocAL
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- task: Bash@3
|
||||
displayName: 'Register libjpeg-turbo packages'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||
wget -q -O- https://packagecloud.io/dcommander/libjpeg-turbo/gpgkey | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/libjpeg-turbo.gpg > /dev/null
|
||||
echo "deb [signed-by=/etc/apt/trusted.gpg.d/libjpeg-turbo.gpg] https://packagecloud.io/dcommander/libjpeg-turbo/any/ any main" | sudo tee /etc/apt/sources.list.d/libjpeg-turbo.list
|
||||
sudo apt update
|
||||
apt-cache show libjpeg-turbo-official | grep Version
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
- task: Bash@3
|
||||
displayName: 'Clone PyBind11'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone --depth 1 -b v2.11.1 https://github.com/pybind/pybind11
|
||||
workingDirectory: '$(Build.SourcesDirectory)'
|
||||
- task: Bash@3
|
||||
displayName: 'Clone RapidJSON'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone --depth 1 https://github.com/Tencent/rapidjson.git
|
||||
workingDirectory: '$(Build.SourcesDirectory)'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
componentName: PyBind11
|
||||
cmakeBuildDir: '$(Build.SourcesDirectory)/pybind11/build'
|
||||
customInstallPath: false
|
||||
installEnabled: false
|
||||
extraBuildFlags: >-
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-GNinja
|
||||
- task: Bash@3
|
||||
displayName: 'Install PyBind11'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo cmake --build . --target install
|
||||
workingDirectory: '$(Build.SourcesDirectory)/pybind11/build'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
componentName: RapidJSON
|
||||
cmakeBuildDir: '$(Build.SourcesDirectory)/rapidjson/build'
|
||||
customInstallPath: false
|
||||
installEnabled: false
|
||||
extraBuildFlags: >-
|
||||
-GNinja
|
||||
- task: Bash@3
|
||||
displayName: 'Install RapidJSON'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo cmake --build . --target install
|
||||
workingDirectory: '$(Build.SourcesDirectory)/rapidjson/build'
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;/opt/libjpeg-turbo
|
||||
-DCMAKE_INSTALL_PREFIX_PYTHON=$Python3_STDARCH
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
@@ -15,16 +15,29 @@ parameters:
|
||||
- git
|
||||
- mpich
|
||||
- ninja-build
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- aomp
|
||||
- clr
|
||||
- llvm-project
|
||||
- rocBLAS
|
||||
- rocminfo
|
||||
- rocPRIM
|
||||
- rocprofiler-register
|
||||
- ROCR-Runtime
|
||||
- rocRAND
|
||||
- rocSPARSE
|
||||
|
||||
jobs:
|
||||
- job: rocALUTION
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -35,13 +48,25 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm;/opt/rocm/share/rocm/cmake/"
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/share/rocm/cmake/
|
||||
-DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/lib/cmake/hip
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
-DBUILD_CLIENTS_BENCHMARKS=OFF
|
||||
|
||||
@@ -18,19 +18,40 @@ parameters:
|
||||
- googletest
|
||||
- libgtest-dev
|
||||
- wget
|
||||
- python3-pip
|
||||
- libdrm-dev
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
- joblib
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- rocm_smi_lib
|
||||
- rocm-core
|
||||
- aomp
|
||||
- aomp-extras
|
||||
|
||||
jobs:
|
||||
- job: rocBLAS
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
- name: TENSILE_ROCM_ASSEMBLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
- name: CMAKE_CXX_COMPILER
|
||||
value: $(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
- name: TENSILE_ROCM_OFFLOAD_BUNDLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang-offload-bundler
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -42,23 +63,60 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- task: Bash@3
|
||||
displayName: 'Download AOCL'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: wget -nv https://download.amd.com/developer/eula/aocl/aocl-4-2/aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- task: Bash@3
|
||||
displayName: 'Install AOCL'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo apt install --yes ./aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- task: Bash@3
|
||||
displayName: 'Download AOCL'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: wget -nv https://download.amd.com/developer/eula/aocl/aocl-4-1/aocl-linux-aocc-4.1.0_1_amd64.deb
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- task: Bash@3
|
||||
displayName: 'Install AOCL'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo apt install --yes ./aocl-linux-aocc-4.1.0_1_amd64.deb
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- script: echo $PATH
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_TOOLCHAIN_FILE=toolchain-linux.cmake
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm;$(Pipeline.Workspace)/deps-install"
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm/llvm;$(Agent.BuildDirectory)/rocm;$(Pipeline.Workspace)/deps-install
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DTensile_CODE_OBJECT_VERSION=default
|
||||
-DTensile_LOGIC=asm_full
|
||||
-DTensile_SEPARATE_ARCHITECTURES=ON
|
||||
-DTensile_LAZY_LIBRARY_LOADING=ON
|
||||
-DTensile_LIBRARY_FORMAT=msgpack
|
||||
-DTENSILE_VENV_UPGRADE_PIP=ON
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
-DBUILD_CLIENTS_BENCHMARKS=OFF
|
||||
-DBUILD_CLIENTS_SAMPLES=OFF
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -8,6 +8,7 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- python3-pip
|
||||
- cmake
|
||||
- ninja-build
|
||||
- pkg-config
|
||||
@@ -18,6 +19,16 @@ parameters:
|
||||
- libstdc++-12-dev
|
||||
- libva-dev
|
||||
- mesa-amdgpu-va-drivers
|
||||
- libdrm-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocm-core
|
||||
|
||||
jobs:
|
||||
- job: rocDecode
|
||||
@@ -26,11 +37,21 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
# Since mesa-amdgpu-multimedia-devel is not directly available from apt, register it
|
||||
- task: Bash@3
|
||||
displayName: 'Register ROCm packages'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||
wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${{ variables.KEYRING_VERSION }}/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/${{ variables.KEYRING_VERSION }} jammy main" | sudo tee --append /etc/apt/sources.list.d/rocm.list
|
||||
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
sudo apt update
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
@@ -38,10 +59,24 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-L
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,20 +10,31 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- rocrand
|
||||
- hiprand
|
||||
- libboost-program-options-dev
|
||||
- libgtest-dev
|
||||
- libfftw3-dev
|
||||
|
||||
- python3-pip
|
||||
# rocm dependencies should match dependencies-rocm.yml
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- hipRAND
|
||||
- rocRAND
|
||||
- rocm-cmake
|
||||
- aomp
|
||||
jobs:
|
||||
- job: rocFFT
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -34,12 +45,24 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH=/opt/rocm
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DUSE_HIP_CLANG=ON
|
||||
|
||||
@@ -10,6 +10,13 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- git
|
||||
- python3-pip
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- llvm-project
|
||||
- rocm-cmake
|
||||
|
||||
jobs:
|
||||
- job: rocMLIR
|
||||
@@ -17,8 +24,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -29,13 +34,25 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH=/opt/rocm
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DBUILD_FAT_LIBROCKCOMPILER=1
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -12,6 +12,15 @@ parameters:
|
||||
- ninja-build
|
||||
- libgtest-dev
|
||||
- git
|
||||
- python3-pip
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
|
||||
jobs:
|
||||
- job: rocPRIM
|
||||
@@ -19,8 +28,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -34,12 +41,24 @@ jobs:
|
||||
# ${{ }} are resolved during compile-time
|
||||
# so this next step is skipped completely until
|
||||
# we define explicit aptPackages needed to install
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DBUILD_BENCHMARK=ON
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DBUILD_TEST=ON
|
||||
-GNinja
|
||||
|
||||
@@ -10,15 +10,25 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- rocblas
|
||||
- rocsparse
|
||||
- hipsparse
|
||||
- libsuitesparse-dev
|
||||
- gfortran
|
||||
- libfmt-dev
|
||||
- git
|
||||
- googletest
|
||||
- libgtest-dev
|
||||
- python3-pip
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocBLAS
|
||||
- rocPRIM
|
||||
- rocSPARSE
|
||||
- hipSPARSE
|
||||
|
||||
jobs:
|
||||
- job: rocSOLVER
|
||||
@@ -26,8 +36,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -44,6 +52,18 @@ jobs:
|
||||
targetType: inline
|
||||
script: git clone --depth 1 --branch v3.9.1 https://github.com/Reference-LAPACK/lapack
|
||||
workingDirectory: '$(Build.SourcesDirectory)'
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
componentName: lapack
|
||||
@@ -59,11 +79,10 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm;$(Pipeline.Workspace)/deps-install"
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Pipeline.Workspace)/deps-install
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DUSE_CUDA=OFF
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
-DBUILD_CLIENTS_BENCHMARKS=OFF
|
||||
-DBUILD_CLIENTS_SAMPLES=OFF
|
||||
|
||||
@@ -8,6 +8,7 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- python3-pip
|
||||
- cmake
|
||||
- ninja-build
|
||||
- libboost-program-options-dev
|
||||
@@ -15,17 +16,28 @@ parameters:
|
||||
- libfftw3-dev
|
||||
- git
|
||||
- gfortran
|
||||
- rocprim-dev
|
||||
- libgtest-dev
|
||||
- libdrm-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocBLAS
|
||||
- rocminfo
|
||||
- rocPRIM
|
||||
- rocprofiler-register
|
||||
|
||||
jobs:
|
||||
- job: rocSPARSE
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -36,16 +48,40 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/hipcc
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DBUILD_CLIENTS_SAMPLES=OFF
|
||||
-DBUILD_CLIENTS_TESTS=ON
|
||||
-DBUILD_CLIENTS_BENCHMARKS=OFF
|
||||
-DCMAKE_MODULE_PATH="/opt/rocm/lib/cmake/hip;/opt/rocm/hip/cmake"
|
||||
-DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake/hip;$(Agent.BuildDirectory)/rocm/hip/cmake
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
parameters:
|
||||
artifactName: rocSPARSE
|
||||
publish: false
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml
|
||||
parameters:
|
||||
sourceDir: $(Build.SourcesDirectory)/build/clients
|
||||
contentsString: matrices/**
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
parameters:
|
||||
artifactName: testMatrices
|
||||
|
||||
@@ -10,12 +10,20 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- hiprand
|
||||
- rocprim-dev
|
||||
- libboost-program-options-dev
|
||||
- googletest
|
||||
- libfftw3-dev
|
||||
- git
|
||||
- python3-pip
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- hipRAND
|
||||
- llvm-project
|
||||
- rocminfo
|
||||
- rocPRIM
|
||||
- ROCR-Runtime
|
||||
|
||||
jobs:
|
||||
- job: rocThrust
|
||||
@@ -23,8 +31,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -35,14 +41,25 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-GNinja
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DROCM_PATH=/opt/rocm
|
||||
-DCMAKE_PREFIX_PATH=/opt/rocm
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-DBUILD_TEST=ON
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -8,6 +8,7 @@ parameters:
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- python3-pip
|
||||
- cmake
|
||||
- ninja-build
|
||||
- libboost-program-options-dev
|
||||
@@ -15,7 +16,18 @@ parameters:
|
||||
- googletest
|
||||
- libfftw3-dev
|
||||
- git
|
||||
- rocblas
|
||||
- libomp-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- llvm-project
|
||||
- ROCR-Runtime
|
||||
- clr
|
||||
- rocminfo
|
||||
- rocBLAS
|
||||
- aomp
|
||||
- rocm_smi_lib
|
||||
|
||||
jobs:
|
||||
- job: rocWMMA
|
||||
@@ -23,8 +35,6 @@ jobs:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -35,11 +45,23 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/bin/amdclang
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DROCWMMA_BUILD_TESTS=ON
|
||||
-DROCWMMA_BUILD_SAMPLES=OFF
|
||||
|
||||
@@ -5,6 +5,30 @@ parameters:
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: ''
|
||||
- name: aptPackages
|
||||
type: object
|
||||
default:
|
||||
- libglfw3-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- AMDMIGraphX
|
||||
- clr
|
||||
- hipBLAS
|
||||
- hipCUB
|
||||
- HIPIFY
|
||||
- hipRAND
|
||||
- hipSOLVER
|
||||
- hipSPARSE
|
||||
- llvm-project
|
||||
- rocBLAS
|
||||
- rocPRIM
|
||||
- rocprofiler-register
|
||||
- ROCR-Runtime
|
||||
- rocRAND
|
||||
- rocSOLVER
|
||||
- rocSPARSE
|
||||
- rocThrust
|
||||
|
||||
jobs:
|
||||
- job: rocm_examples
|
||||
@@ -20,5 +44,28 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
# https://github.com/ROCm/HIP/issues/2203
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DROCM_ROOT=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_HIP_ARCHITECTURES=gfx1030;gfx1100
|
||||
-DCMAKE_EXE_LINKER_FLAGS=-fgpu-rdc
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,21 +10,33 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- ninja-build
|
||||
- python3-pip
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
- CppHeaderParser
|
||||
- argparse
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- llvm-project
|
||||
- rocminfo
|
||||
- rocprofiler-register
|
||||
- ROCR-Runtime
|
||||
- ROCT-Thunk-Interface
|
||||
|
||||
jobs:
|
||||
- job: rocm_bandwidth_test
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: ROCR_INC_DIR
|
||||
value: $(Agent.BuildDirectory)/rocm
|
||||
- name: ROCR_LIB_DIR
|
||||
value: $(Agent.BuildDirectory)/rocm
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -36,11 +48,23 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_BUILD_TYPE=release
|
||||
-DCMAKE_MODULE_PATH="$(Build.SourcesDirectory)/cmake_modules"
|
||||
-DCMAKE_PREFIX_PATH=/opt/rocm
|
||||
-DCMAKE_MODULE_PATH=$(Build.SourcesDirectory)/cmake_modules
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/include;$(Agent.BuildDirectory)/rocm/include/hsa
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -24,4 +24,5 @@ jobs:
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DBUILD_TESTS=ON
|
||||
-DROCM_DEP_ROCMCORE=ON
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -21,4 +21,17 @@ jobs:
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
componentName: rocprofiler-register
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
componentName: rocprofiler-register-tests
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH=$(Build.BinariesDirectory)
|
||||
cmakeBuildDir: 'tests/build'
|
||||
installEnabled: false
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
|
||||
parameters:
|
||||
componentName: rocprofiler-register
|
||||
testDir: 'tests/build'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -10,12 +10,13 @@ parameters:
|
||||
default:
|
||||
- cmake
|
||||
- libgtest-dev
|
||||
- libdrm-dev
|
||||
- libdw-dev
|
||||
- libsystemd-dev
|
||||
- libelf-dev
|
||||
- libnuma-dev
|
||||
- libpciaccess-dev
|
||||
- rocm-llvm-dev
|
||||
- python3-pip
|
||||
- name: pipModules
|
||||
type: object
|
||||
default:
|
||||
@@ -26,15 +27,31 @@ parameters:
|
||||
- lxml
|
||||
- barectf
|
||||
- pandas
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- clr
|
||||
- llvm-project
|
||||
- ROCdbgapi
|
||||
- rocm-cmake
|
||||
- rocm-core
|
||||
- rocm_smi_lib
|
||||
- rocminfo
|
||||
- ROCR-Runtime
|
||||
- rocprofiler-register
|
||||
- ROCT-Thunk-Interface
|
||||
- roctracer
|
||||
|
||||
jobs:
|
||||
- job: rocprofiler
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
- name: HIP_ROCCLR_HOME
|
||||
value: $(Agent.BuildDirectory)/rocm
|
||||
- name: ROCM_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -46,11 +63,46 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# Manually download aqlprofile, hard-coded 6.1.0 version
|
||||
- task: Bash@3
|
||||
displayName: 'Download aqlprofile'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: wget -nv https://repo.radeon.com/rocm/misc/aqlprofile/ubuntu-22.04/hsa-amd-aqlprofile_1.0.0.60200.60200-crdnnh.14213~22.04_amd64.deb
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- task: Bash@3
|
||||
displayName: 'Extract aqlprofile'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
mkdir hsa-amd-aqlprofile
|
||||
dpkg-deb -R hsa-amd-aqlprofile_1.0.0.60200.60200-crdnnh.14213~22.04_amd64.deb hsa-amd-aqlprofile
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- task: Bash@3
|
||||
displayName: 'Move aqlprofile'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
mkdir -p $(Agent.BuildDirectory)/rocm
|
||||
cp -R hsa-amd-aqlprofile/opt/rocm-6.2.0-14213/* $(Agent.BuildDirectory)/rocm
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_MODULE_PATH="$(Build.SourcesDirectory)/cmake_modules;/opt/rocm/lib/cmake"
|
||||
-DCMAKE_PREFIX_PATH="/opt/rocm"
|
||||
-DCMAKE_MODULE_PATH=$(Build.SourcesDirectory)/cmake_modules;$(Agent.BuildDirectory)/rocm/lib/cmake;$(Agent.BuildDirectory)/rocm/lib/cmake/hip
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DENABLE_LDCONFIG=OFF
|
||||
-DUSE_PROF_API=1
|
||||
-DGPU_TARGETS=gfx1030;gfx1100
|
||||
|
||||
@@ -12,6 +12,16 @@ parameters:
|
||||
- ninja-build
|
||||
- libelf-dev
|
||||
- libdw-dev
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- rocm-cmake
|
||||
- clr
|
||||
- llvm-project
|
||||
- ROCdbgapi
|
||||
- rocminfo
|
||||
- ROCR-Runtime
|
||||
- rocprofiler-register
|
||||
|
||||
jobs:
|
||||
- job: rocr_debug_agent
|
||||
@@ -20,8 +30,6 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -32,11 +40,24 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DROCM_PATH=/opt/rocm
|
||||
-DCMAKE_MODULE_PATH=/opt/rocm/lib/cmake
|
||||
-DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake;$(Agent.BuildDirectory)/rocm/lib/cmake/hip
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -67,10 +67,4 @@ jobs:
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DGPU_TARGETS=gfx1030;gfx1100
|
||||
-GNinja
|
||||
# - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
# - task: Bash@3
|
||||
# displayName: 'Tests'
|
||||
# inputs:
|
||||
# targetType: inline
|
||||
# script: ./run.sh
|
||||
# workingDirectory: build
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -9,10 +9,18 @@ parameters:
|
||||
type: object
|
||||
default:
|
||||
- cmake
|
||||
- libomp-dev # needed to pass flag step
|
||||
- ninja-build
|
||||
- clang
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- aomp # needed to pass build step
|
||||
- clr
|
||||
- half
|
||||
- libomp-dev
|
||||
- llvm-project
|
||||
- rocminfo
|
||||
- ROCR-Runtime
|
||||
|
||||
jobs:
|
||||
- job: rpp
|
||||
@@ -21,8 +29,6 @@ jobs:
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
@@ -33,13 +39,27 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
# CI case: download latest default branch build
|
||||
- ${{ if eq(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: staging
|
||||
# manual build case: triggered by ROCm/ROCm repo
|
||||
- ${{ if ne(parameters.checkoutRef, '') }}:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
|
||||
parameters:
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm
|
||||
-DHALF_INCLUDE_DIRS=$(Agent.BuildDirectory)/rocm/include
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DBUILD_CLIENTS=ON
|
||||
-DAMDGPU_TARGETS=gfx1030;gfx1100
|
||||
-GNinja
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
|
||||
@@ -23,7 +23,7 @@ trigger: none
|
||||
pr: none
|
||||
|
||||
jobs:
|
||||
- template: ${{ variables.CI_COMPONENT_PATH }}/rocgdb.yml
|
||||
- template: ${{ variables.CI_COMPONENT_PATH }}/ROCgdb.yml
|
||||
parameters:
|
||||
checkoutRepo: release_repo
|
||||
checkoutRef: ${{ parameters.checkoutRef }}
|
||||
29
.azuredevops/tag-builds/rocAL.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
|
||||
parameters:
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: refs/tags/$(LATEST_RELEASE_TAG)
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: pipelines_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/ROCm
|
||||
- repository: release_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/rocAL
|
||||
ref: ${{ parameters.checkoutRef }}
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
|
||||
jobs:
|
||||
- template: ${{ variables.CI_COMPONENT_PATH }}/rocAL.yml
|
||||
parameters:
|
||||
checkoutRepo: release_repo
|
||||
checkoutRef: ${{ parameters.checkoutRef }}
|
||||
29
.azuredevops/tag-builds/rocm-examples.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
|
||||
parameters:
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: refs/tags/$(LATEST_RELEASE_TAG)
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: pipelines_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/ROCm
|
||||
- repository: release_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/rocm-examples
|
||||
ref: ${{ parameters.checkoutRef }}
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
|
||||
jobs:
|
||||
- template: ${{ variables.CI_COMPONENT_PATH }}/rocm-examples.yml
|
||||
parameters:
|
||||
checkoutRepo: release_repo
|
||||
checkoutRef: ${{ parameters.checkoutRef }}
|
||||
@@ -12,6 +12,7 @@ parameters:
|
||||
- name: defaultBranchList
|
||||
type: object
|
||||
default:
|
||||
amdsmi: develop
|
||||
aomp: aomp-dev
|
||||
aomp-extras: aomp-dev
|
||||
AMDMIGraphX: develop
|
||||
@@ -20,13 +21,20 @@ parameters:
|
||||
half: master
|
||||
HIP: develop
|
||||
hipBLAS: develop
|
||||
hipCUB: develop
|
||||
hipRAND: develop
|
||||
hipSOLVER: develop
|
||||
hipSPARSE: develop
|
||||
llvm-project: amd-staging
|
||||
MIOpen: develop
|
||||
MIVisionX: develop
|
||||
rdc: develop
|
||||
rocBLAS: develop
|
||||
ROCdbgapi : amd-master
|
||||
rocDecode: develop
|
||||
rocFFT: develop
|
||||
rocm-cmake: develop
|
||||
rocm_smi_lib: develop
|
||||
rocminfo: master
|
||||
rocMLIR: develop
|
||||
rocPRIM: develop
|
||||
@@ -36,6 +44,8 @@ parameters:
|
||||
rocSOLVER: develop
|
||||
rocSPARSE: develop
|
||||
ROCT-Thunk-Interface: master
|
||||
rocThrust: develop
|
||||
roctracer: amd-master
|
||||
rpp: master
|
||||
- name: componentsFailureOkay
|
||||
type: object
|
||||
|
||||
@@ -5,6 +5,9 @@ parameters:
|
||||
- name: extraBuildFlags
|
||||
type: string
|
||||
default: ''
|
||||
- name: multithreadFlag
|
||||
type: string
|
||||
default: ''
|
||||
- name: cmakeBuildDir
|
||||
type: string
|
||||
default: 'build'
|
||||
@@ -17,6 +20,12 @@ parameters:
|
||||
- name: installDir
|
||||
type: string
|
||||
default: '$(Build.BinariesDirectory)'
|
||||
- name: customInstallPath
|
||||
type: boolean
|
||||
default: true
|
||||
- name: installEnabled
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
steps:
|
||||
# create workingDirectory if it does not exist and change into it
|
||||
@@ -25,19 +34,23 @@ steps:
|
||||
displayName: '${{parameters.componentName }} CMake Flags'
|
||||
inputs:
|
||||
workingDirectory: ${{ parameters.cmakeBuildDir }}
|
||||
cmakeArgs: -DCMAKE_INSTALL_PREFIX=${{ parameters.installDir }} ${{ parameters.extraBuildFlags }} ..
|
||||
${{ if eq(parameters.customInstallPath, true) }}:
|
||||
cmakeArgs: -DCMAKE_INSTALL_PREFIX=${{ parameters.installDir }} ${{ parameters.extraBuildFlags }} ..
|
||||
${{ else }}:
|
||||
cmakeArgs: ${{ parameters.extraBuildFlags }} ..
|
||||
# equivalent to running make $cmakeTargetDir from $cmakeBuildDir
|
||||
# i.e., cd $cmakeBuildDir; make $cmakeTargetDir
|
||||
- task: CMake@1
|
||||
displayName: '${{parameters.componentName }} Build'
|
||||
inputs:
|
||||
workingDirectory: ${{ parameters.cmakeBuildDir }}
|
||||
cmakeArgs: '--build ${{ parameters.cmakeTargetDir }}'
|
||||
cmakeArgs: '--build ${{ parameters.cmakeTargetDir }} ${{ parameters.multithreadFlag }}'
|
||||
retryCountOnTaskFailure: 10
|
||||
# equivalent to running make $cmakeTarget from $cmakeBuildDir
|
||||
# e.g., make install
|
||||
- task: CMake@1
|
||||
displayName: '${{parameters.componentName }} ${{ parameters.cmakeTarget }}'
|
||||
inputs:
|
||||
workingDirectory: ${{ parameters.cmakeBuildDir }}
|
||||
cmakeArgs: '--build ${{ parameters.cmakeTargetDir }} --target ${{ parameters.cmakeTarget }}'
|
||||
- ${{ if eq(parameters.installEnabled, true) }}:
|
||||
- task: CMake@1
|
||||
displayName: '${{parameters.componentName }} ${{ parameters.cmakeTarget }}'
|
||||
inputs:
|
||||
workingDirectory: ${{ parameters.cmakeBuildDir }}
|
||||
cmakeArgs: '--build ${{ parameters.cmakeTargetDir }} --target ${{ parameters.cmakeTarget }}'
|
||||
|
||||
10
.azuredevops/templates/steps/dependencies-cmake-latest.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
# replace cmake from apt install with newest version using snap install
|
||||
steps:
|
||||
- task: Bash@3
|
||||
displayName: update cmake
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo apt purge cmake
|
||||
sudo snap install cmake --classic
|
||||
hash -r
|
||||
@@ -12,23 +12,31 @@ steps:
|
||||
displayName: 'sudo apt-get update'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo apt-get update
|
||||
script: sudo apt-get --yes update
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- task: Bash@3
|
||||
displayName: 'sudo apt-get upgrade'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo apt-get update
|
||||
script: sudo apt-get --yes upgrade
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- task: Bash@3
|
||||
displayName: 'sudo apt-get fix'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo apt --yes --fix-broken install
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- ${{ if gt(length(parameters.aptPackages), 0) }}:
|
||||
- task: Bash@3
|
||||
displayName: 'sudo apt-get install ...'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: sudo apt-get --yes install ${{ join(' ', parameters.aptPackages) }}
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- ${{ if gt(length(parameters.pipModules), 0) }}:
|
||||
- task: Bash@3
|
||||
displayName: 'pip install ...'
|
||||
|
||||
@@ -23,6 +23,7 @@ parameters:
|
||||
- name: stagingPipelineIdentifiers
|
||||
type: object
|
||||
default:
|
||||
amdsmi: $(amdsmi-pipeline-id)
|
||||
aomp: $(aomp-pipeline-id)
|
||||
aomp-extras: $(aomp-extras-pipeline-id)
|
||||
AMDMIGraphX: $(amdmigraphx-pipeline-id)
|
||||
@@ -30,13 +31,22 @@ parameters:
|
||||
composable_kernel: $(composable-kernel-pipeline-id)
|
||||
half: $(half-pipeline-id)
|
||||
hipBLAS: $(hipblas-pipeline-id)
|
||||
hipCUB: $(hipcub-pipeline-id)
|
||||
HIPIFY: $(hipify-pipeline-id)
|
||||
hipRAND: $(hiprand-pipeline-id)
|
||||
hipSOLVER: $(hipsolver-pipeline-id)
|
||||
hipSPARSE: $(hipsparse-pipeline-id)
|
||||
llvm-project: $(llvm-project-pipeline-id)
|
||||
MIOpen: $(miopen-pipeline-id)
|
||||
MIVisionX: $(mivisionx-pipeline-id)
|
||||
rdc: $(rdc-pipeline-id)
|
||||
rocBLAS: $(rocblas-pipeline-id)
|
||||
rocFFT: $(rotfft-pipeline-id)
|
||||
ROCdbgapi : $(rocdbgapi-pipeline-id)
|
||||
rocDecode: $(rocdecode-pipeline-id)
|
||||
rocFFT: $(rocfft-pipeline-id)
|
||||
rocm-cmake: $(rocm-cmake-pipeline-id)
|
||||
rocm-core: $(rocm-core-pipeline-id)
|
||||
rocm_smi_lib: $(rocm-smi-lib-pipeline-id)
|
||||
rocminfo: $(rocminfo-pipeline-id)
|
||||
rocMLIR: $(rocmlir-pipeline-id)
|
||||
rocPRIM: $(rocprim-pipeline-id)
|
||||
@@ -46,10 +56,13 @@ parameters:
|
||||
rocSOLVER: $(rocsolver-pipeline-id)
|
||||
rocSPARSE: $(rocsparse-pipeline-id)
|
||||
ROCT-Thunk-Interface: $(roct-thunk-interface-pipeline-id)
|
||||
rocThrust: $(rocthrust-pipeline-id)
|
||||
roctracer: $(roctracer-pipeline-id)
|
||||
rpp: $(rpp-pipeline-id)
|
||||
- name: taggedPipelineIdentifiers
|
||||
type: object
|
||||
default:
|
||||
amdsmi: $(amdsmi-tagged-pipeline-id)
|
||||
aomp: $(aomp-tagged-pipeline-id)
|
||||
aomp-extras: $(aomp-extras-tagged-pipeline-id)
|
||||
AMDMIGraphX: $(amdmigraphx-tagged-pipeline-id)
|
||||
@@ -57,13 +70,22 @@ parameters:
|
||||
composable_kernel: $(composable-kernel-tagged-pipeline-id)
|
||||
half: $(half-tagged-pipeline-id)
|
||||
hipBLAS: $(hipblas-tagged-pipeline-id)
|
||||
hipCUB: $(hipcub-tagged-pipeline-id)
|
||||
HIPIFY: $(hipify-tagged-pipeline-id)
|
||||
hipRAND: $(hiprand-tagged-pipeline-id)
|
||||
hipSOLVER: $(hipsolver-tagged-pipeline-id)
|
||||
hipSPARSE: $(hipsparse-tagged-pipeline-id)
|
||||
llvm-project: $(llvm-project-tagged-pipeline-id)
|
||||
MIOpen: $(miopen-tagged-pipeline-id)
|
||||
MIVisionX: $(mivisionx-tagged-pipeline-id)
|
||||
rdc: $(rdc-tagged-pipeline-id)
|
||||
rocBLAS: $(rocblas-tagged-pipeline-id)
|
||||
rocFFT: $(rotfft-tagged-pipeline-id)
|
||||
ROCdbgapi : $(rocdbgapi-tagged-pipeline-id)
|
||||
rocDecode: $(rocdecode-tagged-pipeline-id)
|
||||
rocFFT: $(rocfft-tagged-pipeline-id)
|
||||
rocm-cmake: $(rocm-cmake-tagged-pipeline-id)
|
||||
rocm-core: $(rocm-core-tagged-pipeline-id)
|
||||
rocm_smi_lib: $(rocm-smi-lib-tagged-pipeline-id)
|
||||
rocminfo: $(rocminfo-tagged-pipeline-id)
|
||||
rocMLIR: $(rocmlir-tagged-pipeline-id)
|
||||
rocPRIM: $(rocprim-tagged-pipeline-id)
|
||||
@@ -73,6 +95,8 @@ parameters:
|
||||
rocSOLVER: $(rocsolver-tagged-pipeline-id)
|
||||
rocSPARSE: $(rocsparse-tagged-pipeline-id)
|
||||
ROCT-Thunk-Interface: $(roct-thunk-interface-tagged-pipeline-id)
|
||||
rocThrust: $(rocthrust-tagged-pipeline-id)
|
||||
roctracer: $(roctracer-tagged-pipeline-id)
|
||||
rpp: $(rpp-tagged-pipeline-id)
|
||||
# set to true if you're calling this template file multiple files in same pipeline
|
||||
# only leave last call false to optimize sequence
|
||||
|
||||
@@ -27,3 +27,5 @@ variables:
|
||||
value: rocm/dev-ubuntu-22.04
|
||||
- name: LATEST_DOCKER_VERSION
|
||||
value: 6.1
|
||||
- name: KEYRING_VERSION
|
||||
value: 6.1
|
||||
|
||||
2
.gitignore
vendored
@@ -16,4 +16,4 @@ _readthedocs/
|
||||
docs/CHANGELOG.md
|
||||
docs/contribute/index.md
|
||||
docs/about/release-notes.md
|
||||
docs/about/CHANGELOG.md
|
||||
docs/about/changelog.md
|
||||
|
||||
@@ -3,16 +3,20 @@
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.10"
|
||||
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
formats: [htmlzip]
|
||||
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/sphinx/requirements.txt
|
||||
|
||||
formats: [htmlzip, epub]
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.10"
|
||||
apt_packages:
|
||||
- "doxygen"
|
||||
- "gfortran" # For pre-processing fortran sources
|
||||
- "graphviz" # For dot graphs in doxygen
|
||||
|
||||
@@ -15,6 +15,7 @@ AOMP
|
||||
APIC
|
||||
APIs
|
||||
APU
|
||||
AQL
|
||||
ASIC
|
||||
ASICs
|
||||
ASan
|
||||
@@ -62,6 +63,7 @@ CommonMark
|
||||
Concretized
|
||||
Conda
|
||||
ConnectX
|
||||
DENORM
|
||||
DGEMM
|
||||
DKMS
|
||||
DL
|
||||
@@ -70,6 +72,7 @@ DNN
|
||||
DNNL
|
||||
DPM
|
||||
DRI
|
||||
DRM
|
||||
DW
|
||||
DWORD
|
||||
Dask
|
||||
@@ -85,6 +88,7 @@ ELMo
|
||||
ENDPGM
|
||||
EPYC
|
||||
ESXi
|
||||
FFFFFFF
|
||||
FFT
|
||||
FFTs
|
||||
FFmpeg
|
||||
@@ -122,6 +126,7 @@ GenAI
|
||||
GenZ
|
||||
GitHub
|
||||
Gitpod
|
||||
HBCC
|
||||
HBM
|
||||
HCA
|
||||
HIPCC
|
||||
@@ -132,6 +137,7 @@ HPCG
|
||||
HPE
|
||||
HPL
|
||||
HSA
|
||||
HW
|
||||
HWE
|
||||
Haswell
|
||||
Higgs
|
||||
@@ -157,11 +163,16 @@ Intra
|
||||
Ioffe
|
||||
JSON
|
||||
Jupyter
|
||||
KBytes
|
||||
KERNARG
|
||||
KFD
|
||||
KiB
|
||||
KMD
|
||||
KVM
|
||||
Keras
|
||||
Kernarg
|
||||
Khronos
|
||||
Ki
|
||||
LAPACK
|
||||
LCLK
|
||||
LDS
|
||||
@@ -180,6 +191,7 @@ MiB
|
||||
MIGraphX
|
||||
MIOpen
|
||||
MIOpenGEMM
|
||||
MIPMAP
|
||||
MIVisionX
|
||||
MLM
|
||||
MMA
|
||||
@@ -222,6 +234,7 @@ NousResearch's
|
||||
NumPy
|
||||
OAM
|
||||
OAMs
|
||||
OBJFILE
|
||||
OCP
|
||||
OEM
|
||||
OFED
|
||||
@@ -237,6 +250,7 @@ OpenCV
|
||||
OpenFabrics
|
||||
OpenGL
|
||||
OpenMP
|
||||
OpenMPI
|
||||
OpenSSL
|
||||
OpenVX
|
||||
PCI
|
||||
@@ -261,6 +275,7 @@ RCCL
|
||||
RDC
|
||||
RDMA
|
||||
RDNA
|
||||
RGP
|
||||
RHEL
|
||||
ROC
|
||||
ROCProfiler
|
||||
@@ -274,6 +289,7 @@ ROCmCC
|
||||
ROCmSoftwarePlatform
|
||||
ROCmValidationSuite
|
||||
ROCr
|
||||
RPATH
|
||||
RST
|
||||
RW
|
||||
Radeon
|
||||
@@ -302,10 +318,12 @@ SMEM
|
||||
SMI
|
||||
SMT
|
||||
SPI
|
||||
SQTT
|
||||
SQs
|
||||
SRAM
|
||||
SRAMECC
|
||||
SVD
|
||||
SVM
|
||||
SWE
|
||||
SerDes
|
||||
Shlens
|
||||
@@ -343,6 +361,8 @@ UIF
|
||||
USM
|
||||
UTCL
|
||||
UTIL
|
||||
UNBUNDLER
|
||||
USWC
|
||||
Uncached
|
||||
Unhandled
|
||||
VALU
|
||||
@@ -357,6 +377,8 @@ VSIX
|
||||
VSkipped
|
||||
Vanhoucke
|
||||
Vulkan
|
||||
WERROR
|
||||
WG
|
||||
WGP
|
||||
WGPs
|
||||
WX
|
||||
@@ -388,6 +410,8 @@ allocator
|
||||
allocators
|
||||
amdgpu
|
||||
api
|
||||
arg
|
||||
args
|
||||
atmi
|
||||
atomics
|
||||
autogenerated
|
||||
@@ -400,6 +424,7 @@ bfloat
|
||||
bilinear
|
||||
bitsandbytes
|
||||
blit
|
||||
bool
|
||||
boson
|
||||
bosons
|
||||
buildable
|
||||
@@ -411,6 +436,10 @@ centos
|
||||
centric
|
||||
changelog
|
||||
chiplet
|
||||
clBuildProgram
|
||||
clCompileProgram
|
||||
clLinkProgram
|
||||
clr
|
||||
cmake
|
||||
cmd
|
||||
coalescable
|
||||
@@ -426,6 +455,7 @@ convolutional
|
||||
convolves
|
||||
cpp
|
||||
csn
|
||||
cstring
|
||||
cuBLAS
|
||||
cuFFT
|
||||
cuLIB
|
||||
@@ -443,6 +473,7 @@ deallocation
|
||||
denoise
|
||||
denoised
|
||||
denoises
|
||||
denorm
|
||||
denormalize
|
||||
deserializers
|
||||
detections
|
||||
@@ -457,6 +488,7 @@ embeddings
|
||||
enablement
|
||||
endpgm
|
||||
encodings
|
||||
enqueue
|
||||
env
|
||||
epilog
|
||||
etcetera
|
||||
@@ -480,7 +512,9 @@ heterogenous
|
||||
hipBLAS
|
||||
hipBLASLt
|
||||
hipCUB
|
||||
hipConfig
|
||||
hipFFT
|
||||
hipHostMalloc
|
||||
hipLIB
|
||||
hipRAND
|
||||
hipSOLVER
|
||||
@@ -489,12 +523,15 @@ hipSPARSELt
|
||||
hipTensor
|
||||
hipamd
|
||||
hipblas
|
||||
hipcc
|
||||
hipcub
|
||||
hipfft
|
||||
hipfort
|
||||
hipify
|
||||
hiprtc
|
||||
hipsolver
|
||||
hipsparse
|
||||
hpc
|
||||
hpp
|
||||
hsa
|
||||
hsakmt
|
||||
@@ -509,6 +546,7 @@ initializer
|
||||
inlining
|
||||
installable
|
||||
interprocedural
|
||||
interprocess
|
||||
intra
|
||||
invariants
|
||||
invocating
|
||||
@@ -526,9 +564,12 @@ localscratch
|
||||
logits
|
||||
lossy
|
||||
macOS
|
||||
malloc
|
||||
matchers
|
||||
mem
|
||||
microarchitecture
|
||||
migraphx
|
||||
mipmap
|
||||
miopen
|
||||
miopengemm
|
||||
mivisionx
|
||||
@@ -539,6 +580,8 @@ mvffr
|
||||
namespace
|
||||
namespaces
|
||||
numref
|
||||
nvcc
|
||||
nvidia
|
||||
ocl
|
||||
opencl
|
||||
opencv
|
||||
@@ -559,6 +602,7 @@ prebuilt
|
||||
precompiled
|
||||
prefetch
|
||||
prefetchable
|
||||
prepinned
|
||||
preprocess
|
||||
preprocessed
|
||||
preprocessing
|
||||
@@ -589,6 +633,7 @@ rocFFT
|
||||
rocLIB
|
||||
rocMLIR
|
||||
rocPRIM
|
||||
rocProfiler
|
||||
rocRAND
|
||||
rocSOLVER
|
||||
rocSPARSE
|
||||
@@ -609,11 +654,13 @@ rocsolver
|
||||
rocsparse
|
||||
rocthrust
|
||||
roctracer
|
||||
rpath
|
||||
runtime
|
||||
runtimes
|
||||
sL
|
||||
scalability
|
||||
scalable
|
||||
sdma
|
||||
sendmsg
|
||||
serializers
|
||||
shader
|
||||
@@ -624,13 +671,17 @@ smi
|
||||
softmax
|
||||
spack
|
||||
src
|
||||
stderr
|
||||
stochastically
|
||||
strided
|
||||
stubing
|
||||
suballocaitons
|
||||
subdirectory
|
||||
subexpression
|
||||
subfolder
|
||||
subfolders
|
||||
supercomputing
|
||||
td
|
||||
tensorfloat
|
||||
th
|
||||
tokenization
|
||||
@@ -647,6 +698,8 @@ tqdm
|
||||
tracebacks
|
||||
txt
|
||||
uarch
|
||||
uint
|
||||
unbundler
|
||||
uncached
|
||||
uncorrectable
|
||||
uninstallation
|
||||
@@ -677,12 +730,14 @@ wavefronts
|
||||
whitespaces
|
||||
workgroup
|
||||
workgroups
|
||||
workitems
|
||||
writeback
|
||||
writebacks
|
||||
wrreq
|
||||
wzo
|
||||
xargs
|
||||
xf
|
||||
xz
|
||||
yaml
|
||||
ysvmadyb
|
||||
zypper
|
||||
zypper
|
||||
|
||||
1120
CHANGELOG.md
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (c) 2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
93
README.md
@@ -56,12 +56,103 @@ cd ~/ROCm/
|
||||
|
||||
**Note:** Using this sample code will cause the repo tool to download the open source code associated with the specified ROCm release. Ensure that you have ssh-keys configured on your machine for your GitHub ID prior to the download as explained at [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh).
|
||||
|
||||
### Building the ROCm source code
|
||||
## Building the ROCm source code
|
||||
|
||||
Each ROCm component repository contains directions for building that component, such as the rocSPARSE documentation [Installation and Building for Linux](https://rocm.docs.amd.com/projects/rocSPARSE/en/latest/install/Linux_Install_Guide.html). Refer to the specific component documentation for instructions on building the repository.
|
||||
|
||||
Each release of the ROCm software supports specific hardware and software configurations. Refer to [System requirements (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/system-requirements.html) for the current supported hardware and OS.
|
||||
|
||||
## Build ROCm from source
|
||||
|
||||
The Build will use as many processors as it can find to build in parallel. Some of the compiles can consume as much as 10GB of RAM, so make sure you have plenty of Swap Space !
|
||||
|
||||
By default the ROCm build will compile for all supported GPU architectures and will take approximately 500 CPU hours.
|
||||
The Build time will reduce significantly if we limit the GPU Architecture/s against which we need to build by using the environment variable GPU_ARCHS as mentioned below.
|
||||
|
||||
```bash
|
||||
# --------------------------------------
|
||||
# Step1: clone source code
|
||||
# --------------------------------------
|
||||
|
||||
mkdir -p ~/WORKSPACE/ # Or any folder name other than WORKSPACE
|
||||
cd ~/WORKSPACE/
|
||||
export ROCM_VERSION=6.1.0 # or 6.1.1 6.1.2
|
||||
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.1.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
|
||||
~/bin/repo sync
|
||||
|
||||
# --------------------------------------
|
||||
# Step 2: Prepare build environment
|
||||
# --------------------------------------
|
||||
|
||||
# Option 1: Start a docker container
|
||||
# Pulling required base docker images:
|
||||
# Ubuntu20.04 built from ROCm/tools/rocm-build/docker/ubuntu20/Dockerfile
|
||||
docker pull rocm/rocm-build-ubuntu-20.04:6.1
|
||||
# Ubuntu22.04 built from ROCm/tools/rocm-build/docker/ubuntu22/Dockerfile
|
||||
docker pull rocm/rocm-build-ubuntu-22.04:6.1
|
||||
|
||||
# Start docker container and mount the source code folder:
|
||||
docker run -ti \
|
||||
-e ROCM_VERSION=${ROCM_VERSION} \
|
||||
-e CCACHE_DIR=$HOME/.ccache \
|
||||
-e CCACHE_ENABLED=true \
|
||||
-e DOCK_WORK_FOLD=/src \
|
||||
-w /src \
|
||||
-v $PWD:/src \
|
||||
-v /etc/passwd:/etc/passwd \
|
||||
-v /etc/shadow:/etc/shadow \
|
||||
-v ${HOME}/.ccache:${HOME}/.ccache \
|
||||
-u $(id -u):$(id -g) \
|
||||
<replace_with_required_ubuntu_base_docker_image> bash
|
||||
|
||||
# Option 2: Install required packages into the host machine
|
||||
# For ubuntu20.04 system
|
||||
cd ROCm/tools/rocm-build/docker/ubuntu20
|
||||
bash install-prerequisites.sh
|
||||
# For ubuntu22.04 system
|
||||
cd ROCm/tools/rocm-build/docker/ubuntu22
|
||||
bash install-prerequisities.sh
|
||||
|
||||
# --------------------------------------
|
||||
# Step 3: Run build command line
|
||||
# --------------------------------------
|
||||
|
||||
# Select GPU targets before building:
|
||||
# When GPU_ARCHS is not set, default GPU targets supported by ROCm6.1 will be used.
|
||||
# To build against a subset of GFX architectures you can use the below env variable.
|
||||
# Support MI300 (gfx940, gfx941, gfx942).
|
||||
export GPU_ARCHS="gfx942" # Example
|
||||
export GPU_ARCHS="gfx940;gfx941;gfx942" # Example
|
||||
|
||||
# Pick and run build commands in the docker container:
|
||||
# Build rocm-dev packages
|
||||
make -f ROCm/tools/rocm-build/ROCm.mk -j ${NPROC:-$(nproc)} rocm-dev
|
||||
# Build all ROCm packages
|
||||
make -f ROCm/tools/rocm-build/ROCm.mk -j ${NPROC:-$(nproc)} all
|
||||
# list all ROCm components to find required components
|
||||
make -f ROCm/tools/rocm-build/ROCm.mk list_components
|
||||
# Build a single ROCm packages
|
||||
make -f ROCm/tools/rocm-build/ROCm.mk T_rocblas
|
||||
|
||||
# Find built packages in ubuntu20.04:
|
||||
out/ubuntu-20.04/20.04/deb/
|
||||
# Find built packages in ubuntu22.04:
|
||||
out/ubuntu-22.04/22.04/deb/
|
||||
|
||||
# Find built logs in ubuntu20.04:
|
||||
out/ubuntu-20.04/20.04/logs/
|
||||
# Find built logs in ubuntu22.04:
|
||||
out/ubuntu-22.04/22.04/logs/
|
||||
# All logs pertaining to failed components, end with .errrors extension.
|
||||
out/ubuntu-22.04/22.04/logs/rocblas.errors # Example
|
||||
# All logs pertaining to building components, end with .inprogress extension.
|
||||
out/ubuntu-22.04/22.04/logs/rocblas.inprogress # Example
|
||||
# All logs pertaining to passed components, use the component names.
|
||||
out/ubuntu-22.04/22.04/logs/rocblas # Example
|
||||
```
|
||||
|
||||
Note: [Overview for ROCm.mk](tools/rocm-build/README.md)
|
||||
|
||||
## ROCm documentation
|
||||
|
||||
This repository contains the [manifest file](https://gerrit.googlesource.com/git-repo/+/HEAD/docs/manifest-format.md)
|
||||
|
||||
242
RELEASE.md
@@ -1,4 +1,6 @@
|
||||
# ROCm 6.1.1 release notes
|
||||
# ROCm 6.1.2 release notes
|
||||
<!-- Do not edit this file! This file is autogenerated with -->
|
||||
<!-- tools/autotag/tag_script.py -->
|
||||
|
||||
<!-- Disable lints since this is an auto-generated file. -->
|
||||
<!-- markdownlint-disable blanks-around-headers -->
|
||||
@@ -9,153 +11,137 @@
|
||||
|
||||
<!-- spellcheck-disable -->
|
||||
|
||||
ROCm™ 6.1.1 introduces minor fixes and improvements to some tools and libraries.
|
||||
ROCm 6.1.2 includes enhancements to SMI tools and improvements to some libraries.
|
||||
|
||||
## OS support
|
||||
### OS support
|
||||
|
||||
ROCm 6.1.1 has been tested against a pre-release version of Ubuntu 22.04.5 (kernel: 5.15 [GA], 6.8 [HWE]).
|
||||
ROCm 6.1.2 has been tested against a pre-release version of Ubuntu 22.04.5 (kernel: 5.15 [GA], 6.8 [HWE]).
|
||||
|
||||
## AMD SMI
|
||||
### AMD SMI
|
||||
|
||||
AMD SMI for ROCm 6.1.1
|
||||
|
||||
### Additions
|
||||
|
||||
- Added deferred error correctable counts to `amd-smi metric -ecc -ecc-blocks`.
|
||||
|
||||
### Changes
|
||||
|
||||
- Updated the output of `amd-smi metric --ecc-blocks` to show counters available from blocks.
|
||||
- Updated the output of `amd-smi metric --clock` to reflect each engine.
|
||||
- Updated the output of `amd-smi topology --json` to align with output reported by host and guest systems.
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fixed `amd-smi metric --clock`'s clock lock and deep sleep status.
|
||||
- Fixed an issue that would cause an error when resetting non-AMD GPUs.
|
||||
- Fixed `amd-smi metric --pcie` and `amdsmi_get_pcie_info()` when using RDNA3 (Navi 32 and Navi 31) hardware to prevent "UNKNOWN" reports.
|
||||
- Fixed the output results of `amd-smi process` when getting processes running on a device.
|
||||
|
||||
### Removals
|
||||
|
||||
- Removed the `amdsmi_get_gpu_process_info` API from the Python library. It was removed from the C library in an earlier release.
|
||||
|
||||
### Known issues
|
||||
|
||||
- `amd-smi bad-pages` can result in a `ValueError: Null pointer access` error when using some PMU firmware versions.
|
||||
|
||||
```{note}
|
||||
See the [detailed changelog](https://github.com/ROCm/amdsmi/blob/docs/6.1.1/CHANGELOG.md) with code samples for more information.
|
||||
```
|
||||
|
||||
## HIPCC
|
||||
|
||||
HIPCC for ROCm 6.1.1
|
||||
|
||||
### Changes
|
||||
|
||||
- **Upcoming:** a future release will enable use of compiled binaries `hipcc.bin` and `hipconfig.bin` by default. No action is needed by users. You can continue calling high-level Perl scripts `hipcc` and `hipconfig`. `hipcc.bin` and `hipconfig.bin` will be invoked by the high-level Perl scripts. To revert to the previous behavior and invoke `hipcc.pl` and `hipconfig.pl`, set the `HIP_USE_PERL_SCRIPTS` environment variable to `1`.
|
||||
- **Upcoming:** a subsequent release will remove high-level Perl scripts `hipcc` and `hipconfig`. This release will remove the `HIP_USE_PERL_SCRIPTS` environment variable. It will rename `hipcc.bin` and `hipconfig.bin` to `hipcc` and `hipconfig` respectively. No action is needed by the users. To revert to the previous behavior, invoke `hipcc.pl` and `hipconfig.pl` explicitly.
|
||||
- **Upcoming:** a subsequent release will remove `hipcc.pl` and `hipconfig.pl`.
|
||||
|
||||
## ROCm SMI
|
||||
|
||||
ROCm SMI for ROCm 6.1.1
|
||||
|
||||
### Additions
|
||||
|
||||
* Added the capability to unlock mutex when a process is dead. Added related debug output.
|
||||
* Added the `Partition ID` field to the `rocm-smi` CLI.
|
||||
* Added `NODE`, `GUID`, and `GFX Version` fields to the CLI.
|
||||
* Documentation now includes C++ and Python tutorials, API guides, and reference material.
|
||||
|
||||
### Changes
|
||||
|
||||
* Some `rocm-smi` fields now display `N/A` instead of `unknown/unsupported` for consistency.
|
||||
* Changed stacked ID formatting in the `rocm-smi` CLI to make it easier to spot identifiers.
|
||||
|
||||
### Fixes
|
||||
|
||||
* Fixed HIP and ROCm SMI mismatch on GPU bus assignments.
|
||||
* Fixed memory leaks caused by not closing directories and creating maps nodes instead of using `.at()`.
|
||||
* Fixed initializing calls which reuse `rocmsmi.initializeRsmi()` bindings in the `rocmsmi` Python API.
|
||||
* Fixed an issue causing `rsmi_dev_activity_metric_get` gfx/memory to not update with GPU activity.
|
||||
|
||||
### Known issues
|
||||
|
||||
- ROCm SMI reports GPU utilization incorrectly for RDNA3 GPUs in some situations. See the issue on [GitHub](https://github.com/ROCm/ROCm/issues/3112).
|
||||
|
||||
```{note}
|
||||
See the [detailed ROCm SMI changelog](https://github.com/ROCm/rocm_smi_lib/blob/docs/6.1.1/CHANGELOG.md) with code samples for more information.
|
||||
```
|
||||
|
||||
## Library changes in ROCm 6.1.1
|
||||
|
||||
| Library | Version |
|
||||
| ----------- | -------------------------------------------------------------------------- |
|
||||
| AMDMIGraphX | [2.9](https://github.com/ROCm/AMDMIGraphX/releases/tag/rocm-6.1.1) |
|
||||
| hipBLAS | [2.1.0](https://github.com/ROCm/hipBLAS/releases/tag/rocm-6.1.1) |
|
||||
| hipBLASLt | [0.7.0](https://github.com/ROCm/hipBLASLt/releases/tag/rocm-6.1.1) |
|
||||
| hipCUB | [3.1.0](https://github.com/ROCm/hipCUB/releases/tag/rocm-6.1.1) |
|
||||
| hipFFT | [1.0.14](https://github.com/ROCm/hipFFT/releases/tag/rocm-6.1.1) |
|
||||
| hipRAND | [2.10.17](https://github.com/ROCm/hipRAND/releases/tag/rocm-6.1.1) |
|
||||
| hipSOLVER | 2.1.0 ⇒ [2.1.1](https://github.com/ROCm/hipSOLVER/releases/tag/rocm-6.1.1) |
|
||||
| hipSPARSE | [3.0.1](https://github.com/ROCm/hipSPARSE/releases/tag/rocm-6.1.1) |
|
||||
| hipSPARSELt | [0.2.0](https://github.com/ROCm/hipSPARSELt/releases/tag/rocm-6.1.1) |
|
||||
| hipTensor | [1.2.0](https://github.com/ROCm/hipTensor/releases/tag/rocm-6.1.1) |
|
||||
| MIOpen | [3.1.0](https://github.com/ROCm/MIOpen/releases/tag/rocm-6.1.1) |
|
||||
| MIVisionX | [2.5.0](https://github.com/ROCm/MIVisionX/releases/tag/rocm-6.1.1) |
|
||||
| rccl | [2.18.6](https://github.com/ROCm/rccl/releases/tag/rocm-6.1.1) |
|
||||
| rocALUTION | [3.1.1](https://github.com/ROCm/rocALUTION/releases/tag/rocm-6.1.1) |
|
||||
| rocBLAS | [4.1.0](https://github.com/ROCm/rocBLAS/releases/tag/rocm-6.1.1) |
|
||||
| rocDecode | [0.5.0](https://github.com/ROCm/rocDecode/releases/tag/rocm-6.1.1) |
|
||||
| rocFFT | 1.0.26 ⇒ [1.0.27](https://github.com/ROCm/rocFFT/releases/tag/rocm-6.1.1) |
|
||||
| rocm-cmake | [0.12.0](https://github.com/ROCm/rocm-cmake/releases/tag/rocm-6.1.1) |
|
||||
| rocPRIM | [3.1.0](https://github.com/ROCm/rocPRIM/releases/tag/rocm-6.1.1) |
|
||||
| rocRAND | [3.0.1](https://github.com/ROCm/rocRAND/releases/tag/rocm-6.1.1) |
|
||||
| rocSOLVER | [3.25.0](https://github.com/ROCm/rocSOLVER/releases/tag/rocm-6.1.1) |
|
||||
| rocSPARSE | [3.1.2](https://github.com/ROCm/rocSPARSE/releases/tag/rocm-6.1.1) |
|
||||
| rocThrust | [3.0.1](https://github.com/ROCm/rocThrust/releases/tag/rocm-6.1.1) |
|
||||
| rocWMMA | [1.4.0](https://github.com/ROCm/rocWMMA/releases/tag/rocm-6.1.1) |
|
||||
| rpp | [1.5.0](https://github.com/ROCm/rpp/releases/tag/rocm-6.1.1) |
|
||||
| Tensile | [4.40.0](https://github.com/ROCm/Tensile/releases/tag/rocm-6.1.1) |
|
||||
|
||||
### hipBLASLt 0.7.0
|
||||
|
||||
hipBLASLt 0.7.0 for ROCm 6.1.1
|
||||
AMD SMI for ROCm 6.1.2
|
||||
|
||||
#### Additions
|
||||
|
||||
- Added `hipblasltExtSoftmax` extension API.
|
||||
- Added `hipblasltExtLayerNorm` extension API.
|
||||
- Added `hipblasltExtAMax` extension API.
|
||||
- Added `GemmTuning` extension parameter to set split-k by user.
|
||||
- Added support for mixed precision datatype: fp16/fp8 in with fp16 outk.
|
||||
* Added process isolation and clean shader APIs and CLI commands.
|
||||
* `amdsmi_get_gpu_process_isolation()`
|
||||
* `amdsmi_set_gpu_process_isolation()`
|
||||
* `amdsmi_set_gpu_clear_sram_data()`
|
||||
* Added the `MIN_POWER` metric to output provided by `amd-smi static --limit`.
|
||||
|
||||
#### Deprecations
|
||||
#### Optimizations
|
||||
|
||||
- **Upcoming**: `algoGetHeuristic()` ext API for GroupGemm will be deprecated in a future release of hipBLASLt.
|
||||
|
||||
### hipSOLVER 2.1.1
|
||||
|
||||
hipSOLVER 2.1.1 for ROCm 6.1.1
|
||||
* Updated the `amd-smi monitor --pcie` output to prevent delays with the `monitor` command.
|
||||
|
||||
#### Changes
|
||||
|
||||
- By default, `BUILD_WITH_SPARSE` is now set to OFF on Microsoft Windows.
|
||||
* Updated `amismi_get_power_cap_info` to return values in uW instead of W.
|
||||
* Updated Python library return types for `amdsmi_get_gpu_memory_reserved_pages` and `amdsmi_get_gpu_bad_page_info`.
|
||||
* Updated the output of `amd-smi metric --ecc-blocks` to show counters available from blocks.
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fixed benchmark client build when `BUILD_WITH_SPARSE` is OFF.
|
||||
* `amdsmi_get_gpu_board_info()` no longer returns junk character strings.
|
||||
* `amd-smi metric --power` now correctly details power output for RDNA3, RDNA2, and MI1x devices.
|
||||
* Fixed the `amdsmitstReadWrite.TestPowerCapReadWrite` test for RDNA3, RDNA2, and MI100 devices.
|
||||
* Fixed an issue with the `amdsmi_get_gpu_memory_reserved_pages` and `amdsmi_get_gpu_bad_page_info` Python interface calls.
|
||||
|
||||
### rocFFT 1.0.27
|
||||
#### Removals
|
||||
|
||||
rocFFT 1.0.27 for ROCm 6.1.1
|
||||
* Removed the `amdsmi_get_gpu_process_info` API from the Python library. It was removed from the C library in an earlier release.
|
||||
|
||||
```{note}
|
||||
See the AMD SMI [detailed changelog](https://github.com/ROCm/amdsmi/blob/rocm-6.1.x/CHANGELOG.md) with code samples for more information.
|
||||
```
|
||||
|
||||
### ROCm SMI
|
||||
|
||||
ROCm SMI for ROCm 6.1.2
|
||||
|
||||
#### Additions
|
||||
|
||||
- Enable multi-GPU testing on systems without direct GPU-interconnects.
|
||||
* Added the ring hang event to the `amdsmi_evt_notification_type_t` enum.
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fixed kernel launch failure on execute of very large odd-length real-complex transforms.
|
||||
* Fixed an issue causing ROCm SMI to incorrectly report GPU utilization for RDNA3 GPUs. See the issue on [GitHub](https://github.com/ROCm/ROCm/issues/3112).
|
||||
* Fixed the parsing of `pp_od_clk_voltage` in `get_od_clk_volt_info` to work better with MI-series hardware.
|
||||
|
||||
## Library changes in ROCm 6.1.2
|
||||
|
||||
| Library | Version |
|
||||
|---------|---------|
|
||||
| AMDMIGraphX | [2.9](https://github.com/ROCm/AMDMIGraphX/releases/tag/rocm-6.1.2) |
|
||||
| composable_kernel | [0.2.0](https://github.com/ROCm/composable_kernel/releases/tag/rocm-6.1.2) |
|
||||
| hipBLAS | [2.1.0](https://github.com/ROCm/hipBLAS/releases/tag/rocm-6.1.2) |
|
||||
| hipBLASLt | [0.7.0](https://github.com/ROCm/hipBLASLt/releases/tag/rocm-6.1.2) |
|
||||
| hipCUB | [3.1.0](https://github.com/ROCm/hipCUB/releases/tag/rocm-6.1.2) |
|
||||
| hipFFT | [1.0.14](https://github.com/ROCm/hipFFT/releases/tag/rocm-6.1.2) |
|
||||
| hipRAND | [2.10.17](https://github.com/ROCm/hipRAND/releases/tag/rocm-6.1.2) |
|
||||
| hipSOLVER | [2.1.1](https://github.com/ROCm/hipSOLVER/releases/tag/rocm-6.1.2) |
|
||||
| hipSPARSE | [3.0.1](https://github.com/ROCm/hipSPARSE/releases/tag/rocm-6.1.2) |
|
||||
| hipSPARSELt | [0.2.0](https://github.com/ROCm/hipSPARSELt/releases/tag/rocm-6.1.2) |
|
||||
| hipTensor | [1.2.0](https://github.com/ROCm/hipTensor/releases/tag/rocm-6.1.2) |
|
||||
| MIOpen | [3.1.0](https://github.com/ROCm/MIOpen/releases/tag/rocm-6.1.2) |
|
||||
| MIVisionX | [2.5.0](https://github.com/ROCm/MIVisionX/releases/tag/rocm-6.1.2) |
|
||||
| rccl | [2.18.6](https://github.com/ROCm/rccl/releases/tag/rocm-6.1.2) |
|
||||
| rocALUTION | [3.1.1](https://github.com/ROCm/rocALUTION/releases/tag/rocm-6.1.2) |
|
||||
| rocBLAS | 4.1.0 ⇒ [4.1.2](https://github.com/ROCm/rocBLAS/releases/tag/rocm-6.1.2) |
|
||||
| rocDecode | 0.5.0 ⇒ [0.6.0](https://github.com/ROCm/rocDecode/releases/tag/rocm-6.1.2) |
|
||||
| rocFFT | [1.0.27](https://github.com/ROCm/rocFFT/releases/tag/rocm-6.1.2) |
|
||||
| rocm-cmake | [0.12.0](https://github.com/ROCm/rocm-cmake/releases/tag/rocm-6.1.2) |
|
||||
| rocPRIM | [3.1.0](https://github.com/ROCm/rocPRIM/releases/tag/rocm-6.1.2) |
|
||||
| rocRAND | [3.0.1](https://github.com/ROCm/rocRAND/releases/tag/rocm-6.1.2) |
|
||||
| rocSOLVER | [3.25.0](https://github.com/ROCm/rocSOLVER/releases/tag/rocm-6.1.2) |
|
||||
| rocSPARSE | [3.1.2](https://github.com/ROCm/rocSPARSE/releases/tag/rocm-6.1.2) |
|
||||
| rocThrust | [3.0.1](https://github.com/ROCm/rocThrust/releases/tag/rocm-6.1.2) |
|
||||
| rocWMMA | [1.4.0](https://github.com/ROCm/rocWMMA/releases/tag/rocm-6.1.2) |
|
||||
| rpp | [1.5.0](https://github.com/ROCm/rpp/releases/tag/rocm-6.1.2) |
|
||||
| Tensile | [4.40.0](https://github.com/ROCm/Tensile/releases/tag/rocm-6.1.2) |
|
||||
|
||||
### RCCL
|
||||
|
||||
RCCL 2.18.6 for ROCm 6.1.2
|
||||
|
||||
#### Changes
|
||||
|
||||
* Reduced `NCCL_TOPO_MAX_NODES` to limit stack usage and avoid stack overflow.
|
||||
|
||||
### rocBLAS
|
||||
|
||||
rocBLAS 4.1.2 for ROCm 6.1.2
|
||||
|
||||
#### Optimizations
|
||||
|
||||
* Tuned BBS TN and TT operations on the CDNA3 architecture.
|
||||
|
||||
#### Fixes
|
||||
|
||||
* Fixed an issue related to obtaining solutions for BF16 TT operations.
|
||||
|
||||
### rocDecode
|
||||
|
||||
rocDecode 0.6.0 for ROCm 6.1.2
|
||||
|
||||
#### Additions
|
||||
|
||||
* Added support for FFmpeg v5.x.
|
||||
|
||||
#### Optimizations
|
||||
|
||||
* Updated error checking in the `rocDecode-setup.py` script.
|
||||
|
||||
#### Changes
|
||||
|
||||
* Updated core dependencies.
|
||||
* Updated to support the use of public LibVA headers.
|
||||
|
||||
#### Fixes
|
||||
|
||||
* Fixed some package dependencies.
|
||||
|
||||
## Upcoming changes
|
||||
|
||||
* A future release will enable the use of HIPCC compiled binaries `hipcc.bin` and `hipconfig.bin` by default. No action is needed by users; you may continue calling high-level Perl scripts `hipcc` and `hipconfig`. `hipcc.bin` and `hipconfig.bin` will be invoked by the high-level Perl scripts. To revert to the previous behavior and invoke `hipcc.pl` and `hipconfig.pl`, set the `HIP_USE_PERL_SCRIPTS` environment variable to `1`.
|
||||
* A subsequent release will remove high-level HIPCC Perl scripts from `hipcc` and `hipconfig`. This release will remove the `HIP_USE_PERL_SCRIPTS` environment variable. It will rename `hipcc.bin` and `hipconfig.bin` to `hipcc` and `hipconfig` respectively. No action is needed by the users. To revert to the previous behavior, invoke `hipcc.pl` and `hipconfig.pl` explicitly.
|
||||
* A subsequent release will remove `hipcc.pl` and `hipconfig.pl` for HIPCC.
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<manifest>
|
||||
<remote name="rocm-org" fetch="https://github.com/ROCm/" />
|
||||
<default revision="refs/tags/rocm-6.1.1"
|
||||
<default revision="refs/tags/rocm-6.1.2"
|
||||
remote="rocm-org"
|
||||
sync-c="true"
|
||||
sync-j="4" />
|
||||
<!--list of projects for ROCm-->
|
||||
<project path="ROCm-OpenCL-Runtime/api/opencl/khronos/icd" name="OpenCL-ICD-Loader" remote="KhronosGroup" />
|
||||
<project name="ROCK-Kernel-Driver" />
|
||||
<project name="ROCR-Runtime" />
|
||||
<project name="ROCT-Thunk-Interface" />
|
||||
|
||||
@@ -77,8 +77,7 @@ Obtain the value of `gpu-arch` by running the following command:
|
||||
|
||||
[//]: # (dated link below, needs updating)
|
||||
|
||||
See the complete list of compiler command-line references
|
||||
[here](https://github.com/ROCm/llvm-project/blob/amd-stg-open/clang/docs/CommandGuide/clang.rst).
|
||||
See the complete list of [compiler command-line references](https://github.com/ROCm/llvm-project/blob/amd-staging/openmp/docs/CommandLineArgumentReference.rst).
|
||||
|
||||
### Using `rocprof` with OpenMP
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ following section.
|
||||
|
||||
## ROCm component licenses
|
||||
|
||||
ROCm is released by Advanced Micro Devices, Inc. and is licensed per component separately.
|
||||
ROCm is released by Advanced Micro Devices, Inc. (AMD) and is licensed per component separately.
|
||||
The following table is a list of ROCm components with links to their respective license
|
||||
terms. These components may include third party components subject to
|
||||
additional licenses. Please review individual repositories for more information.
|
||||
@@ -25,66 +25,71 @@ additional licenses. Please review individual repositories for more information.
|
||||
<!-- spellcheck-disable -->
|
||||
| Component | License |
|
||||
|:---------------------|:-------------------------|
|
||||
| [AMDMIGraphX](https://github.com/ROCm/AMDMIGraphX/) | [MIT](https://github.com/ROCm/AMDMIGraphX/blob/develop/LICENSE) |
|
||||
| [HIPCC](https://github.com/ROCm/HIPCC/blob/develop/LICENSE.txt) | [MIT](https://github.com/ROCm/HIPCC/blob/develop/LICENSE.txt) |
|
||||
| [HIPIFY](https://github.com/ROCm/HIPIFY/) | [MIT](https://github.com/ROCm/HIPIFY/blob/amd-staging/LICENSE.txt) |
|
||||
| [HIP](https://github.com/ROCm/HIP/) | [MIT](https://github.com/ROCm/HIP/blob/develop/LICENSE.txt) |
|
||||
| [MIOpenGEMM](https://github.com/ROCm/MIOpenGEMM/) | [MIT](https://github.com/ROCm/MIOpenGEMM/blob/master/LICENSE.txt) |
|
||||
| [MIOpen](https://github.com/ROCm/MIOpen/) | [MIT](https://github.com/ROCm/MIOpen/blob/master/LICENSE.txt) |
|
||||
| [MIVisionX](https://github.com/ROCm/MIVisionX/) | [MIT](https://github.com/ROCm/MIVisionX/blob/master/LICENSE.txt) |
|
||||
| [RCP](https://github.com/GPUOpen-Tools/radeon_compute_profiler/) | [MIT](https://github.com/GPUOpen-Tools/radeon_compute_profiler/blob/master/LICENSE) |
|
||||
| [ROCK-Kernel-Driver](https://github.com/ROCm/ROCK-Kernel-Driver/) | [GPL 2.0 WITH Linux-syscall-note](https://github.com/ROCm/ROCK-Kernel-Driver/blob/master/COPYING) |
|
||||
| [ROCR-Runtime](https://github.com/ROCm/ROCR-Runtime/) | [The University of Illinois/NCSA](https://github.com/ROCm/ROCR-Runtime/blob/master/LICENSE.txt) |
|
||||
| [ROCT-Thunk-Interface](https://github.com/ROCm/ROCT-Thunk-Interface/) | [MIT](https://github.com/ROCm/ROCT-Thunk-Interface/blob/master/LICENSE.md) |
|
||||
| [ROCclr](https://github.com/ROCm/ROCclr/) | [MIT](https://github.com/ROCm/ROCclr/blob/develop/LICENSE.txt) |
|
||||
| [ROCdbgapi](https://github.com/ROCm/ROCdbgapi/) | [MIT](https://github.com/ROCm/ROCdbgapi/blob/amd-master/LICENSE.txt) |
|
||||
| [ROCgdb](https://github.com/ROCm/ROCgdb/) | [GNU General Public License v2.0](https://github.com/ROCm/ROCgdb/blob/amd-master/COPYING) |
|
||||
| [ROCm-CompilerSupport](https://github.com/ROCm/ROCm-CompilerSupport/) | [The University of Illinois/NCSA](https://github.com/ROCm/ROCm-CompilerSupport/blob/amd-stg-open/LICENSE.txt) |
|
||||
| [ROCm-Device-Libs](https://github.com/ROCm/ROCm-Device-Libs/) | [The University of Illinois/NCSA](https://github.com/ROCm/ROCm-Device-Libs/blob/amd-stg-open/LICENSE.TXT) |
|
||||
| [ROCm-OpenCL-Runtime/api/opencl/khronos/icd](https://github.com/KhronosGroup/OpenCL-ICD-Loader/) | [Apache 2.0](https://github.com/KhronosGroup/OpenCL-ICD-Loader/blob/main/LICENSE) |
|
||||
| [ROCm-OpenCL-Runtime](https://github.com/ROCm/ROCm-OpenCL-Runtime/) | [MIT](https://github.com/ROCm/ROCm-OpenCL-Runtime/blob/develop/LICENSE.txt) |
|
||||
| [ROCmValidationSuite](https://github.com/ROCm/ROCmValidationSuite/) | [MIT](https://github.com/ROCm/ROCmValidationSuite/blob/master/LICENSE) |
|
||||
| [HIPCC](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/hipcc) | [MIT](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/hipcc/LICENSE.txt) |
|
||||
| [HIPIFY](https://github.com/ROCm/HIPIFY/) | [MIT](https://github.com/ROCm/HIPIFY/blob/amd-staging/LICENSE.txt) |
|
||||
| [AMDMIGraphX](https://github.com/ROCm/AMDMIGraphX/) | [MIT](https://github.com/ROCm/AMDMIGraphX/blob/develop/LICENSE) |
|
||||
| [MIOpen](https://github.com/ROCm/MIOpen/) | [MIT](https://github.com/ROCm/MIOpen/blob/develop/LICENSE.txt) |
|
||||
| [MIVisionX](https://github.com/ROCm/MIVisionX/) | [MIT](https://github.com/ROCm/MIVisionX/blob/develop/LICENSE.txt) |
|
||||
| [AMD Common Language Runtime (CLR)](https://github.com/ROCm/clr) | [MIT](https://github.com/ROCm/clr/blob/develop/LICENCE) |
|
||||
| [ROCm-Core](https://github.com/ROCm/rocm-core) | [MIT](https://github.com/ROCm/rocm-core/blob/master/copyright) |
|
||||
| [hipamd](https://github.com/ROCm/clr/tree/develop/hipamd) | [MIT](https://github.com/ROCm/clr/blob/develop/hipamd/LICENSE.txt) |
|
||||
| [ROCm-OpenCL-Runtime](https://github.com/ROCm/clr/tree/develop/opencl) | [MIT](https://github.com/ROCm/clr/blob/develop/opencl/LICENSE.txt) |
|
||||
| [Tensile](https://github.com/ROCm/Tensile/) | [MIT](https://github.com/ROCm/Tensile/blob/develop/LICENSE.md) |
|
||||
| [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) |
|
||||
| [aomp](https://github.com/ROCm/aomp/) | [Apache 2.0](https://github.com/ROCm/aomp/blob/aomp-dev/LICENSE) |
|
||||
| [atmi](https://github.com/ROCm/atmi/) | [MIT](https://github.com/ROCm/atmi/blob/master/LICENSE.txt) |
|
||||
| [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) |
|
||||
| [llvm-project](https://github.com/ROCm/llvm-project/) | [Apache](https://github.com/ROCm/llvm-project/blob/amd-staging/LICENSE.TXT) |
|
||||
| [llvm-project/flang](https://github.com/ROCm/llvm-project/tree/amd-staging/flang) | [Apache 2.0](https://github.com/ROCm/llvm-project/blob/amd-staging/flang/LICENSE.TXT) |
|
||||
| [Code Object Manager (Comgr)](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/comgr) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/comgr/LICENSE.txt) |
|
||||
| [ROCm-Device-Libs](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/device-libs) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/device-libs/LICENSE.TXT) |
|
||||
| [clang-ocl](https://github.com/ROCm/clang-ocl/) | [MIT](https://github.com/ROCm/clang-ocl/blob/master/LICENSE) |
|
||||
| [flang](https://github.com/ROCm/flang/) | [Apache 2.0](https://github.com/ROCm/flang/blob/master/LICENSE.txt) |
|
||||
| [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/master/LICENSE.txt) |
|
||||
| [ROCK-Kernel-Driver](https://github.com/ROCm/ROCK-Kernel-Driver/) | [GPL 2.0 WITH Linux-syscall-note](https://github.com/ROCm/ROCK-Kernel-Driver/blob/master/COPYING) |
|
||||
| [ROCT-Thunk-Interface](https://github.com/ROCm/ROCT-Thunk-Interface/) | [MIT](https://github.com/ROCm/ROCT-Thunk-Interface/blob/master/LICENSE.md) |
|
||||
| [ROCR-Runtime](https://github.com/ROCm/ROCR-Runtime/) | [The University of Illinois/NCSA](https://github.com/ROCm/ROCR-Runtime/blob/master/LICENSE.txt) |
|
||||
| [ROCR Debug Agent](https://github.com/ROCm/rocr_debug_agent/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocr_debug_agent/blob/amd-staging/LICENSE.txt) |
|
||||
| [Composable Kernel](https://github.com/ROCm/composable_kernel) | [MIT](https://github.com/ROCm/composable_kernel/blob/develop/LICENSE) |
|
||||
| [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/rocm/LICENSE.txt) |
|
||||
| [hipBLAS](https://github.com/ROCm/hipBLAS/) | [MIT](https://github.com/ROCm/hipBLAS/blob/develop/LICENSE.md) |
|
||||
| [hipBLASLt](https://github.com/ROCm/hipBLASLt/) | [MIT](https://github.com/ROCm/hipBLASLt/blob/develop/LICENSE.md) |
|
||||
| [hipCUB](https://github.com/ROCm/hipCUB/) | [Custom](https://github.com/ROCm/hipCUB/blob/develop/LICENSE.txt) |
|
||||
| [hipFFT](https://github.com/ROCm/hipFFT/) | [MIT](https://github.com/ROCm/hipFFT/blob/develop/LICENSE.md) |
|
||||
| [hipFORT](https://github.com/ROCm/hipfort/) | [MIT](https://github.com/ROCm/hipfort/blob/develop/LICENSE) |
|
||||
| [hipRAND](https://github.com/ROCm/hipRAND/) | [MIT](https://github.com/ROCm/hipRAND/blob/develop/LICENSE.txt) |
|
||||
| [hipSOLVER](https://github.com/ROCm/hipSOLVER/) | [MIT](https://github.com/ROCm/hipSOLVER/blob/develop/LICENSE.md) |
|
||||
| [hipSPARSELt](https://github.com/ROCm/hipSPARSELt/) | [MIT](https://github.com/ROCm/hipSPARSELt/blob/develop/LICENSE.md) |
|
||||
| [hipSPARSE](https://github.com/ROCm/hipSPARSE/) | [MIT](https://github.com/ROCm/hipSPARSE/blob/develop/LICENSE.md) |
|
||||
| [hipSPARSELt](https://github.com/ROCm/hipSPARSELt/) | [MIT](https://github.com/ROCm/hipSPARSELt/blob/develop/LICENSE.md) |
|
||||
| [hipTensor](https://github.com/ROCm/hipTensor) | [MIT](https://github.com/ROCm/hipTensor/blob/develop/LICENSE) |
|
||||
| [hipamd](https://github.com/ROCm/hipamd/) | [MIT](https://github.com/ROCm/hipamd/blob/develop/LICENSE.txt) |
|
||||
| [hipfort](https://github.com/ROCm/hipfort/) | [MIT](https://github.com/ROCm/hipfort/blob/master/LICENSE) |
|
||||
| [llvm-project](https://github.com/ROCm/llvm-project/) | [Apache](https://github.com/ROCm/llvm-project/blob/main/LICENSE.TXT) |
|
||||
| [rccl](https://github.com/ROCm/rccl/) | [Custom](https://github.com/ROCm/rccl/blob/develop/LICENSE.txt) |
|
||||
| [rdc](https://github.com/ROCm/rdc/) | [MIT](https://github.com/ROCm/rdc/blob/master/LICENSE) |
|
||||
| [rocAL](https://github.com/ROCm/rocAL) | [MIT](https://github.com/ROCm/rocAL/blob/develop/LICENSE.txt) |
|
||||
| [rocALUTION](https://github.com/ROCm/rocALUTION/) | [MIT](https://github.com/ROCm/rocALUTION/blob/develop/LICENSE.md) |
|
||||
| [rocBLAS](https://github.com/ROCm/rocBLAS/) | [MIT](https://github.com/ROCm/rocBLAS/blob/develop/LICENSE.md) |
|
||||
| [rocDecode](https://github.com/ROCm/rocDecode) | [MIT](https://github.com/ROCm/rocDecode/blob/develop/LICENSE) |
|
||||
| [rocFFT](https://github.com/ROCm/rocFFT/) | [MIT](https://github.com/ROCm/rocFFT/blob/develop/LICENSE.md) |
|
||||
| [rocPRIM](https://github.com/ROCm/rocPRIM/) | [MIT](https://github.com/ROCm/rocPRIM/blob/develop/LICENSE.txt) |
|
||||
| [ROCm Performance Primitives (RPP)](https://github.com/ROCm/rpp) | [MIT](https://github.com/ROCm/rpp/blob/develop/LICENSE) |
|
||||
| [rocRAND](https://github.com/ROCm/rocRAND/) | [MIT](https://github.com/ROCm/rocRAND/blob/develop/LICENSE.txt) |
|
||||
| [rocSOLVER](https://github.com/ROCm/rocSOLVER/) | [BSD-2-Clause](https://github.com/ROCm/rocSOLVER/blob/develop/LICENSE.md) |
|
||||
| [rocSPARSE](https://github.com/ROCm/rocSPARSE/) | [MIT](https://github.com/ROCm/rocSPARSE/blob/develop/LICENSE.md) |
|
||||
| [rocThrust](https://github.com/ROCm/rocThrust/) | [Apache 2.0](https://github.com/ROCm/rocThrust/blob/develop/LICENSE) |
|
||||
| [rocWMMA](https://github.com/ROCm/rocWMMA/) | [MIT](https://github.com/ROCm/rocWMMA/blob/develop/LICENSE.md) |
|
||||
| [rocm-cmake](https://github.com/ROCm/rocm-cmake/) | [MIT](https://github.com/ROCm/rocm-cmake/blob/develop/LICENSE) |
|
||||
| [rocm_bandwidth_test](https://github.com/ROCm/rocm_bandwidth_test/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [rocm_smi_lib](https://github.com/ROCm/rocm_smi_lib/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocm_smi_lib/blob/master/License.txt) |
|
||||
| [rocminfo](https://github.com/ROCm/rocminfo/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocminfo/blob/master/License.txt) |
|
||||
| [rocprofiler](https://github.com/ROCm/rocprofiler/) | [MIT](https://github.com/ROCm/rocprofiler/blob/amd-master/LICENSE) |
|
||||
| [rocr_debug_agent](https://github.com/ROCm/rocr_debug_agent/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocr_debug_agent/blob/master/LICENSE.txt) |
|
||||
| [roctracer](https://github.com/ROCm/roctracer/) | [MIT](https://github.com/ROCm/roctracer/blob/amd-master/LICENSE) |
|
||||
| rocm-llvm-alt | [AMD Proprietary License](https://www.amd.com/en/support/amd-software-eula)
|
||||
| [ROCm Communication Collectives Library (RCCL)](https://github.com/ROCm/rccl/) | [Custom](https://github.com/ROCm/rccl/blob/develop/LICENSE.txt) |
|
||||
| [ROCm Data Center (RDC)](https://github.com/ROCm/rdc/) | [MIT](https://github.com/ROCm/rdc/blob/develop/LICENSE) |
|
||||
| [ROCm CMake](https://github.com/ROCm/rocm-cmake/) | [MIT](https://github.com/ROCm/rocm-cmake/blob/develop/LICENSE) |
|
||||
| [ROCdbgapi](https://github.com/ROCm/ROCdbgapi/) | [MIT](https://github.com/ROCm/ROCdbgapi/blob/amd-staging/LICENSE.txt) |
|
||||
| [ROCgdb](https://github.com/ROCm/ROCgdb/) | [GNU General Public License v2.0](https://github.com/ROCm/ROCgdb/blob/amd-master/COPYING) |
|
||||
| [ROCm SMI Lib](https://github.com/ROCm/rocm_smi_lib/) | [MIT](https://github.com/ROCm/rocm_smi_lib/blob/develop/License.txt) |
|
||||
| [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/develop/LICENSE) |
|
||||
| [rocminfo](https://github.com/ROCm/rocminfo/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocminfo/blob/amd-staging/License.txt) |
|
||||
| [ROCProfiler](https://github.com/ROCm/rocprofiler/) | [MIT](https://github.com/ROCm/rocprofiler/blob/amd-master/LICENSE) |
|
||||
| [ROCTracer](https://github.com/ROCm/roctracer/) | [MIT](https://github.com/ROCm/roctracer/blob/amd-master/LICENSE) |
|
||||
| [ROCm Bandwidth Test](https://github.com/ROCm/rocm_bandwidth_test/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [TransferBench](https://github.com/ROCm/TransferBench) | [MIT](https://github.com/ROCm/TransferBench/blob/develop/LICENSE.md) |
|
||||
| [ROCmValidationSuite](https://github.com/ROCm/ROCmValidationSuite/) | [MIT](https://github.com/ROCm/ROCmValidationSuite/blob/master/LICENSE) |
|
||||
| hsa-amd-aqlprofile | [AMD Software EULA](https://www.amd.com/en/legal/eula/amd-software-eula.html)
|
||||
|
||||
Open sourced ROCm components are released via public GitHub
|
||||
repositories, packages on https://repo.radeon.com and other distribution channels.
|
||||
Proprietary products are only available on https://repo.radeon.com. Currently, only
|
||||
one component of ROCm, rocm-llvm-alt is governed by a proprietary license.
|
||||
repositories, packages on [https://repo.radeon.com](https://repo.radeon.com) and other distribution channels.
|
||||
Proprietary products are only available on [https://repo.radeon.com](https://repo.radeon.com). Currently, only
|
||||
one component of ROCm, `rocm-llvm-alt` is governed by a proprietary license.
|
||||
Proprietary components are organized in a proprietary subdirectory in the package
|
||||
repositories to distinguish from open sourced packages.
|
||||
|
||||
@@ -92,7 +97,7 @@ repositories to distinguish from open sourced packages.
|
||||
The following additional terms and conditions apply to your use of ROCm technical documentation.
|
||||
```
|
||||
|
||||
©2023 Advanced Micro Devices, Inc. All rights reserved.
|
||||
©2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
The information presented in this document is for informational purposes only
|
||||
and may contain technical inaccuracies, omissions, and typographical errors. The
|
||||
@@ -125,8 +130,8 @@ companies.
|
||||
|
||||
:::{attention}
|
||||
AQL Profiler and AOCC CPU optimization are both provided in binary form, each
|
||||
subject to the license agreement enclosed in the directory for the binary and is
|
||||
available here: `/opt/rocm/share/doc/rocm-llvm-alt/EULA`. By using, installing,
|
||||
subject to the license agreement enclosed in the directory for the binary available
|
||||
in `/opt/rocm/share/doc/hsa-amd-aqlprofile/EULA`. By using, installing,
|
||||
copying or distributing AQL Profiler and/or AOCC CPU Optimizations, you agree to
|
||||
the terms and conditions of this license agreement. If you do not agree to the
|
||||
terms of this agreement, do not install, copy or use the AQL Profiler and/or the
|
||||
@@ -134,9 +139,8 @@ AOCC CPU Optimizations.
|
||||
:::
|
||||
|
||||
For the rest of the ROCm packages, you can find the licensing information at the
|
||||
following location: `/opt/rocm/share/doc/<component-name>/`
|
||||
following location: `/opt/rocm/share/doc/<component-name>/` or in the locations
|
||||
specified in the preceding table.
|
||||
|
||||
For example, you can fetch the licensing information of the `_amd_comgr_`
|
||||
component (Code Object Manager) from the `amd_comgr` folder. A file named
|
||||
`LICENSE.txt` contains the license details at:
|
||||
`/opt/rocm-5.4.3/share/doc/amd_comgr/LICENSE.txt`
|
||||
For example, you can fetch the licensing information of the `amd_comgr`
|
||||
component (Code Object Manager) from the `/opt/rocm/share/doc/amd_comgr/LICENSE.txt` file.
|
||||
|
||||
@@ -17,10 +17,11 @@ Use this matrix to view the ROCm compatibility across successive major and minor
|
||||
|
||||
:doc:`Operating Systems <rocm-install-on-linux:reference/system-requirements>`, "Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3"
|
||||
,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
|
||||
,"RHEL 9.3, 9.2","RHEL 9.3, 9.2"
|
||||
,"RHEL 9.4 [#red-hat94]_, 9.3, 9.2","RHEL 9.3, 9.2"
|
||||
,"RHEL 8.9, 8.8","RHEL 8.9, 8.8"
|
||||
,"SLES 15 SP5, SP4","SLES 15 SP5, SP4"
|
||||
,CentOS 7.9,CentOS 7.9
|
||||
,"Oracle Linux 8.9 [#oracle89]_"
|
||||
,,
|
||||
:doc:`GFX Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2
|
||||
@@ -36,9 +37,9 @@ Use this matrix to view the ROCm compatibility across successive major and minor
|
||||
,,
|
||||
ECOSYSTEM SUPPORT:,,
|
||||
:doc:`PyTorch <rocm-install-on-linux:how-to/3rd-party/pytorch-install>`,"2.1, 2.0, 1.13","2.1, 2.0, 1.13"
|
||||
:doc:`Tensorflow <rocm-install-on-linux:how-to/3rd-party/tensorflow-install>`,"2.15, 2.14, 2.13","2.14, 2.13, 2.12"
|
||||
:doc:`TensorFlow <rocm-install-on-linux:how-to/3rd-party/tensorflow-install>`,"2.15, 2.14, 2.13","2.14, 2.13, 2.12"
|
||||
:doc:`JAX <rocm-install-on-linux:how-to/3rd-party/jax-install>`,0.4.26,0.4.26
|
||||
`ONNX-RT <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.14.1
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.14.1
|
||||
,,
|
||||
3RD PARTY COMMUNICATION LIBS:,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.2.0,>=1.2.0
|
||||
@@ -52,12 +53,12 @@ Use this matrix to view the ROCm compatibility across successive major and minor
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.9.0,2.8.0
|
||||
:doc:`MIOpen <miopen:index>`,3.1.0,3.0.0
|
||||
:doc:`MIVisionX <mivisionx:doxygen/html/index>`,2.5.0,2.5.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,2.5.0,2.5.0
|
||||
:doc:`rocDecode <rocdecode:index>`,0.5.0,N/A
|
||||
:doc:`RPP <rpp:index>`,1.5.0,1.4.0
|
||||
:doc:`ROCm Performance Primitives (RPP) <rpp:index>`,1.5.0,1.4.0
|
||||
,,
|
||||
COMMUNICATION:,,
|
||||
:doc:`rccl <rccl:index>`,2.18.6,2.18.3
|
||||
:doc:`RCCL <rccl:index>`,2.18.6,2.18.3
|
||||
,,
|
||||
MATH LIBS:,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0
|
||||
@@ -86,7 +87,7 @@ Use this matrix to view the ROCm compatibility across successive major and minor
|
||||
,,
|
||||
SUPPORT LIBS:,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,6.1.40091,6.0.32830
|
||||
`rocm-cmake <https://github.com/ROCm/rocm-cmake>`_,0.12.0,0.11.0
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.12.0,0.11.0
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.1.0,6.0.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,20240125.3.30,20231016.2.245
|
||||
,,
|
||||
@@ -94,20 +95,19 @@ Use this matrix to view the ROCm compatibility across successive major and minor
|
||||
:doc:`AMD SMI <amdsmi:index>`,24.4.1,23.4.2
|
||||
:doc:`HIPIFY <hipify:index>`,17.0.0,17.0.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.71.0,0.71.0
|
||||
`ROCdebug-Agent <https://github.com/ROCm/rocr_debug_agent>`_,2.0.3,2.0.3
|
||||
:doc:`rocGDB <rocgdb:index>`,14.1.0,13.2.0
|
||||
:doc:`rocProfiler <rocprofiler:profiler_home_page>`,2.0.60100,2.0.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60100,2.0.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.3.0,N/A
|
||||
:doc:`rocTracer <roctracer:index>`,4.1.60100,4.1.0
|
||||
`rocm_bandwidth_test <https://github.com/ROCm/rocm_bandwidth_test>`_,1.4.0,1.4.0
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.60100,4.1.0
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0
|
||||
`rocminfo <https://github.com/ROCm/rocminfo>`_,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI Lib <rocm_smi_lib:index>`,7.0.0,6.0.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,14.1.0,13.2.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.0.0,6.0.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,rocm-6.1.0,rocm-6.0.0
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3
|
||||
:doc:`TransferBench <transferbench:index>`,1.48,1.46
|
||||
,,
|
||||
COMPILERS:,,
|
||||
`AOMP <https://github.com/ROCm/aomp>`_,17.60.0,17.60.0
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,0.5.0,0.5.0
|
||||
`Flang <https://github.com/ROCm/flang>`_,17.0.0.24103,17.0.0.23483
|
||||
`llvm-project <https://github.com/ROCm/llvm-project>`_,17.0.0.24103,17.0.0.23483
|
||||
@@ -116,11 +116,13 @@ Use this matrix to view the ROCm compatibility across successive major and minor
|
||||
RUNTIMES:,,
|
||||
:doc:`HIP <hip:index>`,6.1.40091,6.0.32830
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0
|
||||
`ROCR Runtime <https://github.com/ROCm/ROCR-Runtime>`_,1.13.0,1.12.0
|
||||
:doc:`ROCR-Runtime <rocr-runtime:index>`,1.13.0,1.12.0
|
||||
|
||||
|
||||
.. rubric:: Footnotes
|
||||
.. [#] **For ROCm 6.1** - MI300A (gfx942) is supported on Ubuntu 22.04.4, RHEL 9.3 & 8.9 and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.4.
|
||||
|
||||
.. [#red-hat94] **For ROCm 6.1** - RHEL 9.4 is supported only on AMD Instinct MI300A.
|
||||
.. [#oracle89] **For ROCm 6.1.1** - Oracle Linux is supported only on AMD Instinct MI300X.
|
||||
.. [#] **For ROCm 6.1** - MI300A (gfx942) is supported on Ubuntu 22.04.4, RHEL 9.4, RHEL 9.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.4.
|
||||
.. [#] **For ROCm 6.0** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9 and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3.
|
||||
|
||||
|
||||
|
||||
@@ -13,7 +13,9 @@ This document provides documentation on using ROCm ASan.
|
||||
|
||||
For information about LLVM ASan, see the [LLVM documentation](https://clang.llvm.org/docs/AddressSanitizer.html).
|
||||
|
||||
**Note:** The beta release of LLVM ASan for ROCm is currently tested and validated on Ubuntu 20.04.
|
||||
:::{note}
|
||||
The beta release of LLVM ASan for ROCm is currently tested and validated on Ubuntu 20.04.
|
||||
:::
|
||||
|
||||
## Compiling for ASan
|
||||
|
||||
@@ -34,9 +36,13 @@ Recommendations for doing this are:
|
||||
|
||||
Other architectures are allowed, but their device code will not be instrumented and a warning will be emitted.
|
||||
|
||||
**Note:** It is not an error to compile some files without ASan instrumentation, but doing so reduces the ability of the process to detect addressing errors. However, if the main program "`a.out`" does not directly depend on the ASan runtime (`libclang_rt.asan-x86_64.so`) after the build completes (check by running `ldd` (List Dynamic Dependencies) or `readelf`), the application will immediately report an error at runtime as described in the next section.
|
||||
:::{tip}
|
||||
It is not an error to compile some files without ASan instrumentation, but doing so reduces the ability of the process to detect addressing errors. However, if the main program "`a.out`" does not directly depend on the ASan runtime (`libclang_rt.asan-x86_64.so`) after the build completes (check by running `ldd` (List Dynamic Dependencies) or `readelf`), the application will immediately report an error at runtime as described in the next section.
|
||||
:::
|
||||
|
||||
**Note:** When compiling OpenMP programs with ASan instrumentation, it is currently necessary to set the environment variable `LIBRARY_PATH` to `/opt/rocm-<version>/lib/llvm/lib/asan:/opt/rocm-<version>/lib/asan`. At runtime, it may be necessary to add `/opt/rocm-<version>/lib/llvm/lib/asan` to `LD_LIBRARY_PATH`.
|
||||
:::{note}
|
||||
When compiling OpenMP programs with ASan instrumentation, it is currently necessary to set the environment variable `LIBRARY_PATH` to `/opt/rocm-<version>/lib/llvm/lib/asan:/opt/rocm-<version>/lib/asan`. At runtime, it may be necessary to add `/opt/rocm-<version>/lib/llvm/lib/asan` to `LD_LIBRARY_PATH`.
|
||||
:::
|
||||
|
||||
### About compilation time
|
||||
|
||||
@@ -92,15 +98,23 @@ If it does not appear, when executed the application will quickly output an ASan
|
||||
|
||||
There is an environment variable, `ASAN_OPTIONS`, that can be used to adjust the runtime behavior of the ASan runtime itself. There are more than a hundred "flags" that can be adjusted (see an old list at [flags](https://github.com/google/sanitizers/wiki/AddressSanitizerFlags)) but the default settings are correct and should be used in most cases. It must be noted that these options only affect the host ASan runtime. The device runtime only currently supports the default settings for the few relevant options.
|
||||
|
||||
There are two `ASAN_OPTION` flags of particular note.
|
||||
There are three `ASAN_OPTION` flags of note.
|
||||
|
||||
* `halt_on_error=0/1 default 1`.
|
||||
|
||||
This tells the ASan runtime to halt the application immediately after detecting and reporting an addressing error. The default makes sense because the application has entered the realm of undefined behavior. If the developer wishes to have the application continue anyway, this option can be set to zero. However, the application and libraries should then be compiled with the additional option `-fsanitize-recover=address`. Note that the ROCm optional ASan instrumented libraries are not compiled with this option and if an error is detected within one of them, but halt_on_error is set to 0, more undefined behavior will occur.
|
||||
This tells the ASan runtime to halt the application immediately after detecting and reporting an addressing error. The default makes sense because the application has entered the realm of undefined behavior. If the developer wishes to have the application continue anyway, this option can be set to zero. However, the application and libraries should then be compiled with the additional option `-fsanitize-recover=address`. Note that the ROCm optional ASan instrumented libraries are not compiled with this option and if an error is detected within one of them, but halt_on_error is set to 0, more undefined behavior will occur.
|
||||
|
||||
* `detect_leaks=0/1 default 1`.
|
||||
|
||||
This option directs the ASan runtime to enable the [Leak Sanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) (LSan). Unfortunately, for heterogeneous applications, this default will result in significant output from the leak sanitizer when the application exits due to allocations made by the language runtime which are not considered to be leaks. This output can be avoided by adding `detect_leaks=0` to the `ASAN_OPTIONS`, or alternatively by producing an LSan suppression file (syntax described [here](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer)) and activating it with environment variable `LSAN_OPTIONS=suppressions=/path/to/suppression/file`. When using a suppression file, a suppression report is printed by default. The suppression report can be disabled by using the `LSAN_OPTIONS` flag `print_suppressions=0`.
|
||||
This option directs the ASan runtime to enable the [Leak Sanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) (LSan). For heterogeneous applications, this default results in significant output from the leak sanitizer when the application exits due to allocations made by the language runtime which are not considered to be leaks. This output can be avoided by adding `detect_leaks=0` to the `ASAN_OPTIONS`, or alternatively by producing an LSan suppression file (syntax described [here](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer)) and activating it with environment variable `LSAN_OPTIONS=suppressions=/path/to/suppression/file`. When using a suppression file, a suppression report is printed by default. The suppression report can be disabled by using the `LSAN_OPTIONS` flag `print_suppressions=0`.
|
||||
|
||||
* `quarantine_size_mb=N default 256`
|
||||
|
||||
This option defines the number of megabytes (MB) `N` of memory that the ASan runtime will hold after it is `freed` to detect use-after-free situations. This memory is unavailable for other purposes. The default of 256 MB may be too small to detect some use-after-free situations, especially given that the large size of many GPU memory allocations may push `freed` allocations out of quarantine before the attempted use.
|
||||
|
||||
:::{note}
|
||||
Setting the value of `quarantine_size_mb` larger may enable more problematic uses to be detected, but at the cost of reducing memory available for other purposes.
|
||||
:::
|
||||
|
||||
## Runtime overhead
|
||||
|
||||
@@ -186,7 +200,7 @@ or
|
||||
|
||||
currently may include one or two surprising CPU side tracebacks mentioning :`hostcall`". This is due to how `malloc` and `free` are implemented for GPU code and these call stacks can be ignored.
|
||||
|
||||
### Running with `rocgdb`
|
||||
## Running ASan with `rocgdb`
|
||||
|
||||
`rocgdb` can be used to further investigate ASan detected errors, with some preparation.
|
||||
|
||||
@@ -238,7 +252,7 @@ $ rocgdb <path to application>
|
||||
(gdb) c
|
||||
```
|
||||
|
||||
### Using ASan with a short HIP application
|
||||
## Using ASan with a short HIP application
|
||||
|
||||
Consider the following simple and short demo of using the Address Sanitizer with a HIP application:
|
||||
|
||||
@@ -402,7 +416,7 @@ Shadow byte legend (one shadow byte represents 8 application bytes):
|
||||
==2817==ABORTING
|
||||
```
|
||||
|
||||
### Known issues with using GPU sanitizer
|
||||
## Known issues with using GPU sanitizer
|
||||
|
||||
* Red zones must have limited size. It is possible for an invalid access to completely miss a red zone and not be detected.
|
||||
|
||||
|
||||
38
docs/conf.py
@@ -23,7 +23,7 @@ for template in templates:
|
||||
|
||||
shutil.copy2('../RELEASE.md','./about/release-notes.md')
|
||||
# Keep capitalization due to similar linking on GitHub's markdown preview.
|
||||
shutil.copy2('../CHANGELOG.md','./about/CHANGELOG.md')
|
||||
shutil.copy2('../CHANGELOG.md','./about/changelog.md')
|
||||
|
||||
latex_engine = "xelatex"
|
||||
latex_elements = {
|
||||
@@ -38,8 +38,8 @@ latex_elements = {
|
||||
project = "ROCm Documentation"
|
||||
author = "Advanced Micro Devices, Inc."
|
||||
copyright = "Copyright (c) 2024 Advanced Micro Devices, Inc. All rights reserved."
|
||||
version = "6.1.1"
|
||||
release = "6.1.1"
|
||||
version = "6.1.2"
|
||||
release = "6.1.2"
|
||||
setting_all_article_info = True
|
||||
all_article_info_os = ["linux", "windows"]
|
||||
all_article_info_author = ""
|
||||
@@ -49,42 +49,18 @@ article_pages = [
|
||||
{
|
||||
"file":"about/release-notes",
|
||||
"os":["linux", "windows"],
|
||||
"date":"2024-01-31"
|
||||
"date":"2024-06-04"
|
||||
},
|
||||
{
|
||||
"file":"about/CHANGELOG",
|
||||
"file":"about/changelog",
|
||||
"os":["linux", "windows"],
|
||||
"date":"2024-01-31"
|
||||
"date":"2024-06-04"
|
||||
},
|
||||
|
||||
{"file":"install/windows/install-quick", "os":["windows"]},
|
||||
{"file":"install/linux/install-quick", "os":["linux"]},
|
||||
|
||||
{"file":"install/linux/install", "os":["linux"]},
|
||||
{"file":"install/linux/install-options", "os":["linux"]},
|
||||
{"file":"install/linux/prerequisites", "os":["linux"]},
|
||||
|
||||
{"file":"install/docker", "os":["linux"]},
|
||||
{"file":"install/magma-install", "os":["linux"]},
|
||||
{"file":"install/pytorch-install", "os":["linux"]},
|
||||
{"file":"install/tensorflow-install", "os":["linux"]},
|
||||
|
||||
{"file":"install/windows/install", "os":["windows"]},
|
||||
{"file":"install/windows/prerequisites", "os":["windows"]},
|
||||
{"file":"install/windows/cli/index", "os":["windows"]},
|
||||
{"file":"install/windows/gui/index", "os":["windows"]},
|
||||
|
||||
{"file":"about/compatibility/docker-image-support-matrix", "os":["linux"]},
|
||||
{"file":"about/compatibility/user-kernel-space-compat-matrix", "os":["linux"]},
|
||||
|
||||
{"file":"reference/library-index", "os":["linux"]},
|
||||
|
||||
{"file":"how-to/deep-learning-rocm", "os":["linux"]},
|
||||
{"file":"how-to/gpu-enabled-mpi", "os":["linux"]},
|
||||
{"file":"how-to/system-debugging", "os":["linux"]},
|
||||
{"file":"how-to/tuning-guides", "os":["linux", "windows"]},
|
||||
|
||||
{"file":"rocm-a-z", "os":["linux", "windows"]},
|
||||
]
|
||||
|
||||
exclude_patterns = ['temp']
|
||||
@@ -108,5 +84,5 @@ html_theme_options = {
|
||||
}
|
||||
|
||||
redirects = {
|
||||
"reference/openmp/openmp": "../../about/compatibility/openmp.html"
|
||||
"reference/openmp/openmp": "../../about/compatibility/openmp.html"
|
||||
}
|
||||
|
||||
|
Before Width: | Height: | Size: 108 KiB After Width: | Height: | Size: 95 KiB |
|
After Width: | Height: | Size: 44 KiB |
BIN
docs/data/how-to/llm-fine-tuning-optimization/ck-comparisons.jpg
Normal file
|
After Width: | Height: | Size: 112 KiB |
BIN
docs/data/how-to/llm-fine-tuning-optimization/ck-compilation.jpg
Normal file
|
After Width: | Height: | Size: 188 KiB |
|
After Width: | Height: | Size: 138 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 27 KiB |
|
After Width: | Height: | Size: 86 KiB |
|
After Width: | Height: | Size: 49 KiB |
BIN
docs/data/how-to/llm-fine-tuning-optimization/compute-unit.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
docs/data/how-to/llm-fine-tuning-optimization/occupancy-vgpr.png
Normal file
|
After Width: | Height: | Size: 288 KiB |
|
After Width: | Height: | Size: 153 KiB |
|
After Width: | Height: | Size: 219 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
After Width: | Height: | Size: 73 KiB |
BIN
docs/data/how-to/llm-fine-tuning-optimization/tunableop.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
|
After Width: | Height: | Size: 43 KiB |
BIN
docs/data/how-to/llm-fine-tuning-optimization/weight-update.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
docs/data/how-to/rocm-for-hpc/hpc-stack-2024_6_20.png
Normal file
|
After Width: | Height: | Size: 187 KiB |
@@ -8,44 +8,14 @@ Installing deep learning frameworks for ROCm
|
||||
|
||||
ROCm provides a comprehensive ecosystem for deep learning development, including
|
||||
:ref:`libraries <artificial-intelligence-apis>` for optimized deep learning operations and ROCm-aware versions of popular
|
||||
deep learning frameworks and libraries such as PyTorch, TensorFlow, JAX, and MAGMA. ROCm works closely with these
|
||||
deep learning frameworks and libraries such as PyTorch, TensorFlow, and JAX. ROCm works closely with these
|
||||
frameworks to ensure that framework-specific optimizations take advantage of AMD accelerator and GPU architectures.
|
||||
|
||||
The following guides cover installation processes for ROCm-aware deep learning frameworks.
|
||||
|
||||
.. grid::
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
|
||||
:doc:`PyTorch for ROCm <rocm-install-on-linux:how-to/3rd-party/pytorch-install>`
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
|
||||
:doc:`TensorFlow for ROCm <rocm-install-on-linux:how-to/3rd-party/tensorflow-install>`
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
|
||||
:doc:`JAX for ROCm <rocm-install-on-linux:how-to/3rd-party/jax-install>`
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
|
||||
:doc:`MAGMA for ROCm <rocm-install-on-linux:how-to/3rd-party/magma-install>`
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
|
||||
.. grid-item::
|
||||
:columns: 3
|
||||
* :doc:`PyTorch for ROCm <rocm-install-on-linux:how-to/3rd-party/pytorch-install>`
|
||||
* :doc:`TensorFlow for ROCm <rocm-install-on-linux:how-to/3rd-party/tensorflow-install>`
|
||||
* :doc:`JAX for ROCm <rocm-install-on-linux:how-to/3rd-party/jax-install>`
|
||||
|
||||
The following chart steps through typical installation workflows for installing deep learning frameworks for ROCm.
|
||||
|
||||
@@ -60,6 +30,9 @@ Find information on version compatibility and framework release notes in :doc:`T
|
||||
|
||||
For guidance on installing ROCm itself, refer to :doc:`ROCm installation for Linux <rocm-install-on-linux:index>`.
|
||||
|
||||
Learn how to use your ROCm deep learning environment for training, fine-tuning, and inference through the following guides.
|
||||
Learn how to use your ROCm deep learning environment for training, fine-tuning, inference, and performance optimization
|
||||
through the following guides.
|
||||
|
||||
* :doc:`rocm-for-ai/index`
|
||||
|
||||
* :doc:`llm-fine-tuning-optimization/index`
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, inference, usage, tutorial
|
||||
|
||||
*************************
|
||||
Fine-tuning and inference
|
||||
*************************
|
||||
|
||||
Fine-tuning using ROCm involves leveraging AMD's GPU-accelerated :doc:`libraries <rocm:reference/api-libraries>` and
|
||||
:doc:`tools <rocm:reference/rocm-tools>` to optimize and train deep learning models. ROCm provides a comprehensive
|
||||
ecosystem for deep learning development, including open-source libraries for optimized deep learning operations and
|
||||
ROCm-aware versions of :doc:`deep learning frameworks <../deep-learning-rocm>` such as PyTorch, TensorFlow, and JAX.
|
||||
|
||||
Single-accelerator systems, such as a machine equipped with a single accelerator or GPU, are commonly used for
|
||||
smaller-scale deep learning tasks, including fine-tuning pre-trained models and running inference on moderately
|
||||
sized datasets. See :doc:`single-gpu-fine-tuning-and-inference`.
|
||||
|
||||
Multi-accelerator systems, on the other hand, consist of multiple accelerators working in parallel. These systems are
|
||||
typically used in LLMs and other large-scale deep learning tasks where performance, scalability, and the handling of
|
||||
massive datasets are crucial. See :doc:`multi-gpu-fine-tuning-and-inference`.
|
||||
37
docs/how-to/llm-fine-tuning-optimization/index.rst
Normal file
@@ -0,0 +1,37 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial
|
||||
|
||||
*******************************************
|
||||
Fine-tuning LLMs and inference optimization
|
||||
*******************************************
|
||||
|
||||
ROCm empowers the fine-tuning and optimization of large language models, making them accessible and efficient for
|
||||
specialized tasks. ROCm supports the broader AI ecosystem to ensure seamless integration with open frameworks,
|
||||
models, and tools.
|
||||
|
||||
For more information, see `What is ROCm? <https://rocm.docs.amd.com/en/latest/what-is-rocm.html>`_
|
||||
|
||||
Throughout the following topics, this guide discusses the goals and :ref:`challenges of fine-tuning a large language
|
||||
model <fine-tuning-llms-concept-challenge>` like Llama 2. Then, it introduces :ref:`common methods of optimizing your
|
||||
fine-tuning <fine-tuning-llms-concept-optimizations>` using techniques like LoRA with libraries like PEFT. In the
|
||||
sections that follow, you'll find practical guides on libraries and tools to accelerate your fine-tuning.
|
||||
|
||||
- :doc:`Conceptual overview of fine-tuning LLMs <overview>`
|
||||
|
||||
- :doc:`Fine-tuning and inference <fine-tuning-and-inference>` using a
|
||||
:doc:`single-accelerator <single-gpu-fine-tuning-and-inference>` or
|
||||
:doc:`multi-accelerator <multi-gpu-fine-tuning-and-inference>` system.
|
||||
|
||||
- :doc:`Model quantization <model-quantization>`
|
||||
|
||||
- :doc:`Model acceleration libraries <model-acceleration-libraries>`
|
||||
|
||||
- :doc:`LLM inference frameworks <llm-inference-frameworks>`
|
||||
|
||||
- :doc:`Optimizing with Composable Kernel <optimizing-with-composable-kernel>`
|
||||
|
||||
- :doc:`Optimizing Triton kernels <optimizing-triton-kernel>`
|
||||
|
||||
- :doc:`Profiling and debugging <profiling-and-debugging>`
|
||||
|
||||
@@ -0,0 +1,211 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, inference, vLLM, TGI, text generation inference
|
||||
|
||||
************************
|
||||
LLM inference frameworks
|
||||
************************
|
||||
|
||||
This section discusses how to implement `vLLM <https://docs.vllm.ai/en/latest>`_ and `Hugging Face TGI
|
||||
<https://huggingface.co/docs/text-generation-inference/en/index>`_ using
|
||||
:doc:`single-accelerator <single-gpu-fine-tuning-and-inference>` and
|
||||
:doc:`multi-accelerator <multi-gpu-fine-tuning-and-inference>` systems.
|
||||
|
||||
.. _fine-tuning-llms-vllm:
|
||||
|
||||
vLLM inference
|
||||
==============
|
||||
|
||||
vLLM is renowned for its paged attention algorithm that can reduce memory consumption and increase throughput thanks to
|
||||
its paging scheme. Instead of allocating GPU high-bandwidth memory (HBM) for the maximum output token lengths of the
|
||||
models, the paged attention of vLLM allocates GPU HBM dynamically for its actual decoding lengths. This paged attention
|
||||
is also effective when multiple requests share the same key and value contents for a large value of beam search or
|
||||
multiple parallel requests.
|
||||
|
||||
vLLM also incorporates many modern LLM acceleration and quantization algorithms, such as Flash Attention, HIP and CUDA
|
||||
graphs, tensor parallel multi-GPU, GPTQ, AWQ, and token speculation.
|
||||
|
||||
Installing vLLM
|
||||
---------------
|
||||
|
||||
.. _fine-tuning-llms-vllm-rocm-docker-image:
|
||||
|
||||
1. Run the following commands to build a Docker image ``vllm-rocm``.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://github.com/vllm-project/vllm.git
|
||||
cd vllm
|
||||
docker build -f Dockerfile.rocm -t vllm-rocm .
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: vLLM on a single-accelerator system
|
||||
:sync: single
|
||||
|
||||
2. To use vLLM as an API server to serve reference requests, first start a container using the :ref:`vllm-rocm
|
||||
Docker image <fine-tuning-llms-vllm-rocm-docker-image>`.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker run -it \
|
||||
--network=host \
|
||||
--group-add=video \
|
||||
--ipc=host \
|
||||
--cap-add=SYS_PTRACE \
|
||||
--security-opt seccomp=unconfined \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
-v <path/to/model>:/app/model \
|
||||
vllm-rocm \
|
||||
bash
|
||||
|
||||
3. Inside the container, start the API server to run on a single accelerator on port 8000 using the following command.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
python -m vllm.entrypoints.api_server --model /app/model --dtype float16 --port 8000 &
|
||||
|
||||
The following log message is displayed in your command line indicates that the server is listening for requests.
|
||||
|
||||
.. image:: ../../data/how-to/llm-fine-tuning-optimization/vllm-single-gpu-log.png
|
||||
:alt: vLLM API server log message
|
||||
:align: center
|
||||
|
||||
4. To test, send it a curl request containing a prompt.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
curl http://localhost:8000/generate -H "Content-Type: application/json" -d '{"prompt": "What is AMD Instinct?", "max_tokens": 80, "temperature": 0.0 }'
|
||||
|
||||
You should receive a response like the following.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
{"text":["What is AMD Instinct?\nAmd Instinct is a brand new line of high-performance computing (HPC) processors from Advanced Micro Devices (AMD). These processors are designed to deliver unparalleled performance for HPC workloads, including scientific simulations, data analytics, and machine learning.\nThe Instinct lineup includes a range of processors, from the entry-level Inst"]}
|
||||
|
||||
.. tab-item:: vLLM on a multi-accelerator system
|
||||
:sync: multi
|
||||
|
||||
2. To use vLLM as an API server to serve reference requests, first start a container using the :ref:`vllm-rocm
|
||||
Docker image <fine-tuning-llms-vllm-rocm-docker-image>`.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker run -it \
|
||||
--network=host \
|
||||
--group-add=video \
|
||||
--ipc=host \
|
||||
--cap-add=SYS_PTRACE \
|
||||
--security-opt seccomp=unconfined \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
-v <path/to/model>:/app/model \
|
||||
vllm-rocm \
|
||||
bash
|
||||
|
||||
|
||||
3. To run API server on multiple GPUs, use the ``-tp`` or ``--tensor-parallel-size`` parameter. For example, to use two
|
||||
GPUs, start the API server using the following command.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
python -m vllm.entrypoints.api_server --model /app/model --dtype float16 -tp 2 --port 8000 &
|
||||
|
||||
4. To run multiple instances of API Servers, specify different ports for each server, and use ``ROCR_VISIBLE_DEVICES`` to
|
||||
isolate each instance to a different accelerator.
|
||||
|
||||
For example, to run two API servers, one on port 8000 using GPU 0 and 1, one on port 8001 using GPU 2 and 3, use a
|
||||
a command like the following.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
ROCR_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.api_server --model /data/llama-2-7b-chat-hf --dtype float16 –tp 2 --port 8000 &
|
||||
ROCR_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.api_server --model /data/llama-2-7b-chat-hf --dtype float16 –tp 2--port 8001 &
|
||||
|
||||
5. To test, send it a curl request containing a prompt.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
curl http://localhost:8000/generate -H "Content-Type: application/json" -d '{"prompt": "What is AMD Instinct?", "max_tokens": 80, "temperature": 0.0 }'
|
||||
|
||||
You should receive a response like the following.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
{"text":["What is AMD Instinct?\nAmd Instinct is a brand new line of high-performance computing (HPC) processors from Advanced Micro Devices (AMD). These processors are designed to deliver unparalleled performance for HPC workloads, including scientific simulations, data analytics, and machine learning.\nThe Instinct lineup includes a range of processors, from the entry-level Inst"]}
|
||||
|
||||
.. _fine-tuning-llms-tgi:
|
||||
|
||||
Hugging Face TGI
|
||||
================
|
||||
|
||||
Text Generation Inference (TGI) is LLM serving framework from Hugging
|
||||
Face, and it also supports the majority of high-performance LLM
|
||||
acceleration algorithms such as Flash Attention, Paged Attention,
|
||||
CUDA/HIP graph, tensor parallel multi-GPU, GPTQ, AWQ, and token
|
||||
speculation.
|
||||
|
||||
.. tip::
|
||||
|
||||
In addition to LLM serving capability, TGI also provides the `Text Generation Inference benchmarking tool
|
||||
<https://github.com/huggingface/text-generation-inference/blob/main/benchmark/README.md>`_.
|
||||
|
||||
Install TGI
|
||||
-----------
|
||||
|
||||
1. Launch the TGI Docker container in the host machine.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker run --name tgi --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined
|
||||
--device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 256g
|
||||
--net host -v $PWD:/data
|
||||
--entrypoint "/bin/bash"
|
||||
--env HUGGINGFACE_HUB_CACHE=/data
|
||||
ghcr.io/huggingface/text-generation-inference:latest-rocm
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: TGI on a single-accelerator system
|
||||
:sync: single
|
||||
|
||||
2. Inside the container, launch a model using TGI server on a single accelerator.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export ROCM_USE_FLASH_ATTN_V2_TRITON=True
|
||||
text-generation-launcher --model-id NousResearch/Meta-Llama-3-70B --dtype float16 --port 8000 &
|
||||
|
||||
3. To test, send it a curl request containing a prompt.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
curl http://localhost:8000/generate_stream -X POST -d '{"inputs":"What is AMD Instinct?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'
|
||||
|
||||
You should receive a response like the following.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
data:{"index":20,"token":{"id":304,"text":" in","logprob":-1.2822266,"special":false},"generated_text":" AMD Instinct is a new family of data center GPUs designed to accelerate the most demanding workloads in","details":null}
|
||||
|
||||
.. tab-item:: TGI on a multi-accelerator system
|
||||
|
||||
2. Inside the container, launch a model using TGI server on multiple accelerators (4 in this case).
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export ROCM_USE_FLASH_ATTN_V2_TRITON=True
|
||||
text-generation-launcher --model-id NousResearch/Meta-Llama-3-8B --dtype float16 --port 8000 --num-shard 4 &
|
||||
|
||||
3. To test, send it a curl request containing a prompt.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
curl http://localhost:8000/generate_stream -X POST -d '{"inputs":"What is AMD Instinct?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'
|
||||
|
||||
You should receive a response like the following.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
data:{"index":20,"token":{"id":304,"text":" in","logprob":-1.2773438,"special":false},"generated_text":" AMD Instinct is a new family of data center GPUs designed to accelerate the most demanding workloads in","details":null}
|
||||
@@ -0,0 +1,251 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, Flash Attention, Hugging Face, xFormers, vLLM, PyTorch
|
||||
|
||||
****************************
|
||||
Model acceleration libraries
|
||||
****************************
|
||||
|
||||
This section discusses model acceleration techniques and libraries to improve memory efficiency and performance.
|
||||
|
||||
Flash Attention 2
|
||||
=================
|
||||
|
||||
Flash Attention is a technique designed to reduce memory movements between GPU SRAM and high-bandwidth memory (HBM). By
|
||||
using a tiling approach, Flash Attention 2 improves memory locality in the nested loops of query, key, and value
|
||||
computations within the Attention modules of LLMs. These modules include Multi-Head Attention (MHA), Group-Query
|
||||
Attention (GQA), and Multi-Query Attention (MQA). This reduction in memory movements significantly decreases the
|
||||
time-to-first-token (TTFT) latency for large batch sizes and long prompt sequences, thereby enhancing overall
|
||||
performance.
|
||||
|
||||
.. image:: ../../data/how-to/llm-fine-tuning-optimization/attention-module.png
|
||||
:alt: Attention module of a large language module utilizing tiling
|
||||
:align: center
|
||||
|
||||
Installing Flash Attention 2
|
||||
----------------------------
|
||||
|
||||
ROCm provides two different implementations of Flash Attention 2 modules. They can be deployed interchangeably:
|
||||
|
||||
* ROCm `Composable Kernel <https://github.com/ROCm/composable_kernel/tree/develop/example/01_gemm>`_
|
||||
(CK) Flash Attention 2
|
||||
|
||||
* `OpenAI Triton <https://triton-lang.org/main/index.html>`_ Flash Attention 2
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: CK Flash Attention 2
|
||||
|
||||
To install CK Flash Attention 2, use the following commands.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Install from source
|
||||
git clone https://github.com/ROCm/flash-attention.git
|
||||
cd flash-attention/
|
||||
GPU_ARCHS=gfx942 python setup.py install #MI300 series
|
||||
|
||||
Hugging Face Transformers can easily deploy the CK Flash Attention 2 module by passing an argument
|
||||
``attn_implementation="flash_attention_2"`` in the ``from_pretrained`` class.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||
model_name = "NousResearch/Meta-Llama-3-8B"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name, torch_dtype=torch.float16, use_fast=False)
|
||||
inputs = tokenizer('Today is', return_tensors='pt').to(device)
|
||||
|
||||
model_eager = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, attn_implementation="eager").cuda(device)
|
||||
model_ckFAv2 = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, attn_implementation="flash_attention_2").cuda(device)
|
||||
|
||||
print("eager GQA: ", tokenizer.decode(model_eager.generate(**inputs, max_new_tokens=10)[0], skip_special_tokens=True))
|
||||
print("ckFAv2 GQA: ", tokenizer.decode(model_ckFAv2.generate(**inputs, max_new_tokens=10)[0], skip_special_tokens=True))
|
||||
|
||||
# eager GQA: Today is the day of the Lord, and we are the
|
||||
# ckFAv2 GQA: Today is the day of the Lord, and we are the
|
||||
|
||||
.. tab-item:: Triton Flash Attention 2
|
||||
|
||||
The Triton Flash Attention 2 module is implemented in Python and uses OpenAI’s JIT compiler. This module has been
|
||||
upstreamed into the vLLM serving toolkit, discussed in :doc:'llm-inference-frameworks'.
|
||||
|
||||
1. To install Triton Flash Attention 2 and run the benchmark, use the following commands.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Install from the source
|
||||
pip uninstall pytorch-triton-rocm triton -y
|
||||
git clone https://github.com/ROCm/triton.git
|
||||
cd triton/python
|
||||
GPU_ARCHS=gfx942 python setup.py install #MI300 series
|
||||
pip install matplotlib pandas
|
||||
|
||||
2. To test, run the Triton Flash Attention 2 performance benchmark.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Test the triton FA v2 kernel
|
||||
python https://github.com/ROCm/triton/blob/triton-mlir/python/perf-kernels/flash-attention.py
|
||||
# Results (Okay to release TFLOPS number ???)
|
||||
fused-attention-fwd-d128:
|
||||
BATCH HQ HK N_CTX_Q N_CTX_K TFLOPS
|
||||
0 16.0 16.0 16.0 1024.0 1024.0 287.528411
|
||||
1 8.0 16.0 16.0 2048.0 2048.0 287.490806
|
||||
2 4.0 16.0 16.0 4096.0 4096.0 345.966031
|
||||
3 2.0 16.0 16.0 8192.0 8192.0 361.369510
|
||||
4 1.0 16.0 16.0 16384.0 16384.0 356.873720
|
||||
5 2.0 48.0 48.0 1024.0 1024.0 216.916235
|
||||
6 2.0 48.0 48.0 2048.0 1024.0 271.027578
|
||||
7 2.0 48.0 48.0 4096.0 8192.0 337.367372
|
||||
8 2.0 48.0 48.0 8192.0 4096.0 363.481649
|
||||
9 2.0 48.0 48.0 16384.0 8192.0 375.013622
|
||||
10 8.0 16.0 16.0 1989.0 15344.0 321.791333
|
||||
11 4.0 16.0 16.0 4097.0 163.0 122.104888
|
||||
12 2.0 16.0 16.0 8122.0 2159.0 337.060283
|
||||
13 1.0 16.0 16.0 16281.0 7.0 5.234012
|
||||
14 2.0 48.0 48.0 1021.0 1020.0 214.657425
|
||||
15 2.0 48.0 48.0 2001.0 2048.0 314.429118
|
||||
16 2.0 48.0 48.0 3996.0 9639.0 330.411368
|
||||
17 2.0 48.0 48.0 8181.0 1021.0 324.614980
|
||||
|
||||
xFormers
|
||||
========
|
||||
|
||||
xFormers also improves the performance of attention modules. Although xFormers attention performs very
|
||||
similarly to Flash Attention 2 due to its tiling behavior of query, key, and value, it’s widely used for LLMs and
|
||||
Stable Diffusion models with the Hugging Face Diffusers library.
|
||||
|
||||
Installing CK xFormers
|
||||
----------------------
|
||||
|
||||
Use the following commands to install CK xFormers.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Install from source
|
||||
git clone https://github.com/ROCm/xformers.git
|
||||
cd xformers/
|
||||
git submodule update --init --recursive
|
||||
PYTORCH_ROCM_ARCH=gfx942 python setup.py install #Instinct MI300-series
|
||||
|
||||
PyTorch built-in acceleration
|
||||
=============================
|
||||
|
||||
`PyTorch compilation
|
||||
mode <https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html>`__
|
||||
synthesizes the model into a graph and then lowers it to prime
|
||||
operators. These operators are compiled using TorchInductor, which uses
|
||||
OpenAI Triton as a building block for GPU acceleration. One advantage of
|
||||
PyTorch compilation mode is that its GPU kernels are written in Python,
|
||||
making modifying and extending them easier. PyTorch compilation mode
|
||||
often delivers higher performance, as model operations are fused before
|
||||
runtime, which allows for easy deployment of high-performance kernels.
|
||||
|
||||
PyTorch compilation
|
||||
-------------------
|
||||
|
||||
To utilize the PyTorch compilation mode, specific layers of the model
|
||||
must be explicitly assigned as compilation targets. In the case of LLM,
|
||||
where autoregressive token decoding generates dynamically changing
|
||||
key/value sizes, limiting the key/value size to a static dimension,
|
||||
``max_cache_length``, is necessary to utilize the performance benefits
|
||||
of the PyTorch compilation.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Sample script to run LLM with the static key-value cache and PyTorch compilation
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, StaticCache
|
||||
import torch
|
||||
from typing import Optional
|
||||
import os
|
||||
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
model_name = "NousResearch/Meta-Llama-3-8B"
|
||||
prompts = []
|
||||
|
||||
for b in range(1):
|
||||
prompts.append("New york city is where "
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to(device).eval()
|
||||
inputs = tokenizer(prompts, return_tensors="pt").to(model.device)
|
||||
|
||||
def decode_one_tokens(model, cur_token, input_pos, cache_position):
|
||||
logits = model(cur_token, position_ids=input_pos, cache_position=cache_position, return_dict=False, use_cache=True)[0]
|
||||
new_token = torch.argmax(logits[:, -1], dim=-1)[:, None]
|
||||
return new_token
|
||||
|
||||
batch_size, seq_length = inputs["input_ids"].shape
|
||||
|
||||
# Static key-value cache
|
||||
max_cache_length = 1024
|
||||
max_new_tokens = 10
|
||||
model._setup_cache(StaticCache, batch_size, max_cache_len=max_cache_length)
|
||||
cache_position = torch.arange(seq_length, device=device)
|
||||
generated_ids = torch.zeros(batch_size, seq_length + max_new_tokens + 1, dtype=torch.int, device=device)
|
||||
generated_ids[:, cache_position] = inputs["input_ids"].to(device).to(torch.int)
|
||||
|
||||
logits = model(**inputs, cache_position=cache_position, return_dict=False, use_cache=True)[0]
|
||||
next_token = torch.argmax(logits[:, -1], dim=-1)[:, None]
|
||||
|
||||
# torch compilation
|
||||
decode_one_tokens = torch.compile(decode_one_tokens, mode="max-autotune-no-cudagraphs",fullgraph=True)
|
||||
|
||||
generated_ids[:, seq_length] = next_token[:, 0]
|
||||
cache_position = torch.tensor([seq_length + 1], device=device)
|
||||
|
||||
with torch.no_grad():
|
||||
for _ in range(1, max_new_tokens):
|
||||
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True):
|
||||
next_token = decode_one_tokens(model, next_token.clone(), None, cache_position)
|
||||
generated_ids[:, cache_position] = next_token.int()
|
||||
cache_position += 1
|
||||
|
||||
.. _fine-tuning-llms-pytorch-tunableop:
|
||||
|
||||
PyTorch TunableOp
|
||||
------------------
|
||||
|
||||
ROCm PyTorch (2.2.0 and later) allows users to use high-performance ROCm
|
||||
GEMM kernel libraries through PyTorch's built-in TunableOp options.
|
||||
This enables users to automatically pick up the best-performing GEMM
|
||||
kernels from :doc:`rocBLAS <rocblas:index>` and :doc:`hipBLASLt <hipblaslt:index>` libraries during runtime.
|
||||
|
||||
During warm-up runs or offline profiling steps, users can create a GEMM Table
|
||||
that enumerates the kernel information. During the model's run, the best-performing kernel substitutes
|
||||
``torch.nn.functional.linear(input, weight, bias=None)`` with the kernel specified in the GEMM table. The
|
||||
`Tunable GitHub <https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/cuda/tunable/README.md>`_
|
||||
page describes the options.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# To turn on TunableOp, simply set this environment variable
|
||||
export PYTORCH_TUNABLEOP_ENABLED=1
|
||||
|
||||
# Python
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
A = torch.rand(100, 20, device="cuda")
|
||||
W = torch.rand(200, 20, device="cuda")
|
||||
Out = F.linear(A, W)
|
||||
print(Out.size())
|
||||
|
||||
# tunableop_results0.csv
|
||||
Validator,PT_VERSION,2.4.0
|
||||
Validator,ROCM_VERSION,6.1.0.0-82-5fabb4c
|
||||
Validator,HIPBLASLT_VERSION,0.7.0-1549b021
|
||||
Validator,GCN_ARCH_NAME,gfx942:sramecc+:xnack-
|
||||
Validator,ROCBLAS_VERSION,4.1.0-cefa4a9b-dirty
|
||||
GemmTunableOp_float_TN,tn_200_100_20,Gemm_Rocblas_32323,0.00669595
|
||||
|
||||
.. image:: ../../data/how-to/llm-fine-tuning-optimization/tunableop.png
|
||||
:alt: GEMM and TunableOp
|
||||
:align: center
|
||||
|
||||
Learn more about optimizing kernels with TunableOp in
|
||||
:ref:`Optimizing Triton kernels <fine-tuning-llms-triton-tunableop>`.
|
||||
259
docs/how-to/llm-fine-tuning-optimization/model-quantization.rst
Normal file
@@ -0,0 +1,259 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, quantization, GPTQ, transformers, bitsandbytes
|
||||
|
||||
*****************************
|
||||
Model quantization techniques
|
||||
*****************************
|
||||
|
||||
Quantization reduces the model size compared to its native full-precision version, making it easier to fit large models
|
||||
onto accelerators or GPUs with limited memory usage. This section explains how to perform LLM quantization using GPTQ
|
||||
and bitsandbytes on AMD Instinct hardware.
|
||||
|
||||
.. _fine-tune-llms-gptq:
|
||||
|
||||
GPTQ
|
||||
====
|
||||
|
||||
GPTQ is a post-training quantization technique where each row of the weight matrix is quantized independently to find a
|
||||
version of the weights that minimizes error. These weights are quantized to ``int4`` but are restored to ``fp16`` on the
|
||||
fly during inference. This can save your memory usage by a factor of four. A speedup in inference is expected because
|
||||
inference of GPTQ models uses a lower bit width, which takes less time to communicate.
|
||||
|
||||
Before setting up the GPTQ configuration in Transformers, ensure the `AutoGPTQ <https://github.com/AutoGPTQ/AutoGPTQ>`_ library
|
||||
is installed.
|
||||
|
||||
Installing AutoGPTQ
|
||||
-------------------
|
||||
|
||||
The AutoGPTQ library implements the GPTQ algorithm.
|
||||
|
||||
#. Use the following command to install the latest stable release of AutoGPTQ from pip.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# This will install pre-built wheel for a specific ROCm version.
|
||||
|
||||
pip install auto-gptq --no-build-isolation --extra-index-url https://huggingface.github.io/autogptq-index/whl/rocm573/
|
||||
|
||||
Or, install AutoGPTQ from source for the appropriate ROCm version (for example, ROCm 6.1).
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Clone the source code.
|
||||
git clone https://github.com/AutoGPTQ/AutoGPTQ.git
|
||||
cd AutoGPTQ
|
||||
|
||||
# Speed up the compilation by specifying PYTORCH_ROCM_ARCH to target device.
|
||||
PYTORCH_ROCM_ARCH=gfx942 ROCM_VERSION=6.1 pip install .
|
||||
|
||||
# Show the package after the installation
|
||||
|
||||
#. Run ``pip show auto-gptq`` to print information for the installed ``auto-gptq`` package. Its output should look like
|
||||
this:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
Name: auto-gptq
|
||||
Version: 0.8.0.dev0+rocm6.1
|
||||
...
|
||||
|
||||
Using GPTQ with AutoGPTQ
|
||||
------------------------
|
||||
|
||||
#. Run the following code snippet.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoTokenizer, TextGenerationPipeline
|
||||
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
||||
base_model_name = "NousResearch/Llama-2-7b-hf"
|
||||
quantized_model_name = "llama-2-7b-hf-gptq"
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model_name, use_fast=True)
|
||||
examples = [
|
||||
tokenizer(
|
||||
"auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
|
||||
)
|
||||
]
|
||||
print(examples)
|
||||
|
||||
The resulting examples should be a list of dictionaries whose keys are ``input_ids`` and ``attention_mask``.
|
||||
|
||||
#. Set up the quantization configuration using the following snippet.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
quantize_config = BaseQuantizeConfig(
|
||||
bits=4, # quantize model to 4-bit
|
||||
group_size=128, # it is recommended to set the value to 128
|
||||
desc_act=False,
|
||||
)
|
||||
|
||||
#. Load the non-quantized model using the AutoGPTQ class and run the quantization.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Import auto_gptq class.
|
||||
from auto_gptq import AutoGPTQForCausalLM
|
||||
|
||||
# Load non-quantized model.
|
||||
base_model = AutoGPTQForCausalLM.from_pretrained(base_model_name, quantize_config, device_map = "auto")
|
||||
base_model.quantize(examples)
|
||||
|
||||
# Save quantized model.
|
||||
base_model.save_quantized(quantized_model_name)
|
||||
|
||||
Using GPTQ with Hugging Face Transformers
|
||||
------------------------------------------
|
||||
|
||||
#. To perform a GPTQ quantization using Hugging Face Transformers, you need to create a ``GPTQConfig`` instance and set the
|
||||
number of bits to quantize to, and a dataset to calibrate the weights.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig
|
||||
|
||||
base_model_name = " NousResearch/Llama-2-7b-hf"
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
||||
gptq_config = GPTQConfig(bits=4, dataset="c4", tokenizer=tokenizer)
|
||||
|
||||
#. Load a model to quantize using ``AutoModelForCausalLM`` and pass the
|
||||
``gptq_config`` to its ``from_pretained`` method. Set ``device_map=”auto”`` to
|
||||
automatically offload the model to available GPU resources.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
quantized_model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model_name,
|
||||
device_map="auto",
|
||||
quantization_config=gptq_config)
|
||||
|
||||
#. Once the model is quantized, you can push the model and tokenizer to Hugging Face Hub for easy share and access.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
quantized_model.push_to_hub("llama-2-7b-hf-gptq")
|
||||
tokenizer.push_to_hub("llama-2-7b-hf-gptq")
|
||||
|
||||
Or, you can save the model locally using the following snippet.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
quantized_model.save_pretrained("llama-2-7b-gptq")
|
||||
tokenizer.save_pretrained("llama-2-7b-gptq")
|
||||
|
||||
ExLlama-v2 support
|
||||
------------------
|
||||
|
||||
ExLlama is a Python/C++/CUDA implementation of the Llama model that is
|
||||
designed for faster inference with 4-bit GPTQ weights. The ExLlama
|
||||
kernel is activated by default when users create a ``GPTQConfig`` object. To
|
||||
boost inference speed even further on Instinct accelerators, use the ExLlama-v2
|
||||
kernels by configuring the ``exllama_config`` parameter as the following.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoModelForCausalLM, GPTQConfig
|
||||
#pretrained_model_dir = "meta-llama/Llama-2-7b"
|
||||
base_model_name = "NousResearch/Llama-2-7b-hf"
|
||||
gptq_config = GPTQConfig(bits=4, dataset="c4", exllama_config={"version":2})
|
||||
quantized_model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model_name,
|
||||
device_map="auto",
|
||||
quantization_config=gptq_config)
|
||||
bitsandbytes
|
||||
============
|
||||
|
||||
The `ROCm-aware bitsandbytes <https://github.com/ROCm/bitsandbytes>`_ library is
|
||||
a lightweight Python wrapper around CUDA custom functions, in particular 8-bit optimizer, matrix multiplication, and
|
||||
8-bit and 4-bit quantization functions. The library includes quantization primitives for 8-bit and 4-bit operations
|
||||
through ``bitsandbytes.nn.Linear8bitLt`` and ``bitsandbytes.nn.Linear4bit`` and 8-bit optimizers through the
|
||||
``bitsandbytes.optim`` module. These modules are supported on AMD Instinct accelerators.
|
||||
|
||||
Installing bitsandbytes
|
||||
-----------------------
|
||||
|
||||
#. To install bitsandbytes for ROCm 6.0 (and later), use the following commands.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Clone the github repo
|
||||
git clone --recurse https://github.com/ROCm/bitsandbytes.git
|
||||
cd bitsandbytes
|
||||
git checkout rocm_enabled
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
# Use -DBNB_ROCM_ARCH to specify target GPU arch
|
||||
cmake -DBNB_ROCM_ARCH="gfx942" -DCOMPUTE_BACKEND=hip -S .
|
||||
|
||||
# Install
|
||||
python setup.py install
|
||||
|
||||
#. Run ``pip show bitsandbytes`` to show the information about the installed bitsandbytes package. Its output should
|
||||
look like the following.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
Name: bitsandbytes
|
||||
Version: 0.44.0.dev0
|
||||
...
|
||||
|
||||
Using bitsandbytes primitives
|
||||
-----------------------------
|
||||
|
||||
To get started with bitsandbytes primitives, use the following code as reference.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import bitsandbytes as bnb
|
||||
|
||||
# Use Int8 Matrix Multiplication
|
||||
bnb.matmul(..., threshold=6.0)
|
||||
|
||||
# Use bitsandbytes 8-bit Optimizers
|
||||
adam = bnb.optim.Adam8bit(model.parameters(), lr=0.001, betas=(0.9, 0.995))
|
||||
|
||||
Using bitsandbytes with Hugging Face Transformers
|
||||
-------------------------------------------------
|
||||
|
||||
To load a Transformers model in 4-bit, set ``load_int_4bt=true`` in ``BitsAndBytesConfig``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoModelForCausalLM
|
||||
from bitsandbytes import BitsAndBytesConfig
|
||||
|
||||
base_model_name = "NousResearch/Llama-2-7b-hf"
|
||||
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
||||
bnb_model_4bit = AutoModelForCausalLM.from_pretrained(
|
||||
base_model_name,
|
||||
device_map="auto",
|
||||
quantization_config=quantization_config)
|
||||
|
||||
# Check the memory footprint with get_memory_footprint method
|
||||
print(bnb_model_4bit.get_memory_footprint())
|
||||
|
||||
To load a model in 8-bit for inference, use the ``load_in_8bit`` option.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from bitsandbytes import BitsAndBytesConfig
|
||||
|
||||
base_model_name = "NousResearch/Llama-2-7b-hf"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
||||
bnb_model_8bit = AutoModelForCausalLM.from_pretrained(
|
||||
base_model_name,
|
||||
device_map="auto",
|
||||
quantization_config=quantization_config)
|
||||
|
||||
prompt = "What is a large language model?"
|
||||
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
||||
generated_ids = model.generate(**inputs)
|
||||
outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
||||
|
||||
@@ -0,0 +1,236 @@
|
||||
.. meta::
|
||||
:description: Model fine-tuning and inference on a multi-GPU system
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, multi-GPU, distributed, inference
|
||||
|
||||
*****************************************************
|
||||
Fine-tuning and inference using multiple accelerators
|
||||
*****************************************************
|
||||
|
||||
This section explains how to fine-tune a model on a multi-accelerator system. See
|
||||
:doc:`Single-accelerator fine-tuning <single-gpu-fine-tuning-and-inference>` for a single accelerator or GPU setup.
|
||||
|
||||
.. _fine-tuning-llms-multi-gpu-env:
|
||||
|
||||
Environment setup
|
||||
=================
|
||||
|
||||
This section was tested using the following hardware and software environment.
|
||||
|
||||
.. list-table::
|
||||
:stub-columns: 1
|
||||
|
||||
* - Hardware
|
||||
- 4 AMD Instinct MI300X accelerators
|
||||
|
||||
* - Software
|
||||
- ROCm 6.1, Ubuntu 22.04, PyTorch 2.1.2, Python 3.10
|
||||
|
||||
* - Libraries
|
||||
- ``transformers`` ``datasets`` ``accelerate`` ``huggingface-hub`` ``peft`` ``trl`` ``scipy``
|
||||
|
||||
* - Base model
|
||||
- ``meta-llama/Llama-2-7b-chat-hf``
|
||||
|
||||
.. _fine-tuning-llms-multi-gpu-env-setup:
|
||||
|
||||
Setting up the base implementation environment
|
||||
----------------------------------------------
|
||||
|
||||
#. Install PyTorch for ROCm. Refer to the
|
||||
:doc:`PyTorch installation guide <rocm-install-on-linux:how-to/3rd-party/pytorch-install>`. For consistent
|
||||
installation, it’s recommended to use official ROCm prebuilt Docker images with the framework pre-installed.
|
||||
|
||||
#. In the Docker container, check the availability of ROCM-capable accelerators using the following command.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
rocm-smi -showproductname
|
||||
|
||||
#. Check that your accelerators are available to PyTorch.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import torch
|
||||
print("Is a ROCm-GPU detected? ", torch.cuda.is_available())
|
||||
print("How many ROCm-GPUs are detected? ", torch.cuda.device_count())
|
||||
|
||||
If successful, your output should look like this:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
>>> print("Is a ROCm-GPU detected? ", torch.cuda.is_available())
|
||||
Is a ROCm-GPU detected? True
|
||||
>>> print("How many ROCm-GPUs are detected? ", torch.cuda.device_count())
|
||||
How many ROCm-GPUs are detected? 4
|
||||
|
||||
.. tip::
|
||||
|
||||
During training and inference, you can check the memory usage by running the ``rocm-smi`` command in your terminal.
|
||||
This tool helps you see shows which accelerators or GPUs are involved.
|
||||
|
||||
|
||||
.. _fine-tuning-llms-multi-gpu-hugging-face-accelerate:
|
||||
|
||||
Hugging Face Accelerate for fine-tuning and inference
|
||||
===========================================================
|
||||
|
||||
`Hugging Face Accelerate <https://huggingface.co/docs/accelerate/en/index>`_ is a library that simplifies turning raw
|
||||
PyTorch code for a single accelerator into code for multiple accelerators for LLM fine-tuning and inference. It is
|
||||
integrated with `Transformers <https://huggingface.co/docs/transformers/en/index>`_ allowing you to scale your PyTorch
|
||||
code while maintaining performance and flexibility.
|
||||
|
||||
As a brief example of model fine-tuning and inference using multiple GPUs, let's use Transformers and load in the Llama
|
||||
2 7B model.
|
||||
|
||||
Here, let's reuse the code in :ref:`Single-accelerator fine-tuning <fine-tuning-llms-single-gpu-download-model-dataset>`
|
||||
to load the base model and tokenizer.
|
||||
|
||||
Now, it's important to adjust how you load the model. Add the ``device_map`` parameter to your base model configuration.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
...
|
||||
base_model_name = "meta-llama/Llama-2-7b-chat-hf"
|
||||
|
||||
# Load base model to GPU memory
|
||||
base_model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model_name,
|
||||
device_map = "auto"
|
||||
trust_remote_code = True)
|
||||
...
|
||||
# Run training
|
||||
sft_trainer.train()
|
||||
|
||||
.. note::
|
||||
|
||||
You can let Accelerate handle the device map computation by setting ``device_map`` to one of the supported options
|
||||
(``"auto"``, ``"balanced"``, ``"balanced_low_0"``, ``"sequential"``).
|
||||
|
||||
It's recommended to set the ``device_map`` parameter to ``“auto”`` to allow Accelerate to automatically and
|
||||
efficiently allocate the model given the available resources (4 accelerators in this case).
|
||||
|
||||
When you have more GPU memory available than the model size, here is the difference between each ``device_map``
|
||||
option:
|
||||
|
||||
* ``"auto"`` and ``"balanced"`` evenly split the model on all available GPUs, making it possible for you to use a
|
||||
batch size greater than 1.
|
||||
|
||||
* ``"balanced_low_0"`` evenly splits the model on all GPUs except the first
|
||||
one, and only puts on GPU 0 what does not fit on the others. This
|
||||
option is great when you need to use GPU 0 for some processing of the
|
||||
outputs, like when using the generate function for Transformers
|
||||
models.
|
||||
|
||||
* ``"sequential"`` will fit what it can on GPU 0, then move on GPU 1 and so forth. Not all GPUs might be used.
|
||||
|
||||
After loading the model in this way, the model is fully ready to use the resources available to it.
|
||||
|
||||
.. _fine-tuning-llms-multi-gpu-torchtune:
|
||||
|
||||
torchtune for fine-tuning and inference
|
||||
=============================================
|
||||
|
||||
`torchtune <https://pytorch.org/torchtune/main/>`_ is a PyTorch-native library for easy single and multi-accelerator or
|
||||
GPU model fine-tuning and inference with LLMs.
|
||||
|
||||
#. Install torchtune using pip.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Install torchtune with PyTorch release 2.2.2+
|
||||
pip install torchtune
|
||||
|
||||
# To confirm that the package is installed correctly
|
||||
tune --help
|
||||
|
||||
The output should look like this:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
usage: tune [-h] {download,ls,cp,run,validate} ...
|
||||
|
||||
Welcome to the TorchTune CLI!
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
|
||||
subcommands:
|
||||
{download,ls,cp,run,validate}
|
||||
|
||||
#. torchtune recipes are designed around easily composable components and workable training loops, with minimal abstraction
|
||||
getting in the way of fine-tuning. Run ``tune ls`` to show built-in torchtune configuration recipes.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
RECIPE CONFIG
|
||||
full_finetune_single_device llama2/7B_full_low_memory
|
||||
llama3/8B_full_single_device
|
||||
mistral/7B_full_low_memory
|
||||
full_finetune_distributed llama2/7B_full
|
||||
llama2/13B_full
|
||||
llama3/8B_full
|
||||
mistral/7B_full
|
||||
gemma/2B_full
|
||||
lora_finetune_single_device llama2/7B_lora_single_device
|
||||
llama2/7B_qlora_single_device
|
||||
llama3/8B_lora_single_device
|
||||
llama3/8B_qlora_single_device
|
||||
llama2/13B_qlora_single_device
|
||||
mistral/7B_lora_single_device
|
||||
|
||||
The ``RECIPE`` column shows the easy-to-use and workable fine-tuning and inference recipes for popular fine-tuning
|
||||
techniques (such as LoRA). The ``CONFIG`` column lists the YAML configurations for easily configuring training,
|
||||
evaluation, quantization, or inference recipes.
|
||||
|
||||
The snippet shows the architecture of a model's YAML configuration file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Model arguments
|
||||
model:
|
||||
_component_: torchtune.models.llama2.lora_llama2_7b
|
||||
lora_attn_modules: ['q_proj', 'v_proj']
|
||||
apply_lora_to_mlp: False
|
||||
apply_lora_to_output: False
|
||||
lora_rank: 8
|
||||
lora_alpha: 16
|
||||
|
||||
tokenizer:
|
||||
_component_: torchtune.models.llama2.llama2_tokenizer
|
||||
path: /tmp/Llama-2-7b-hf/tokenizer.model
|
||||
|
||||
# Dataset and sampler
|
||||
dataset:
|
||||
_component_: torchtune.datasets.alpaca_cleaned_dataset
|
||||
train_on_input: True
|
||||
|
||||
#. This configuration file defines the fine-tuning base model path, data set, hyper-parameters for optimizer and scheduler,
|
||||
and training data type. To download the base model for fine-tuning, run the following command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
tune download meta-llama/Llama-2-7b-hf --output-dir /tmp/Llama-2-7b-hf --hf-token
|
||||
|
||||
The output directory argument for ``--output-dir`` should map the model path specified in YAML config file.
|
||||
|
||||
#. To launch ``lora_finetune_distributed`` on four devices, run the following
|
||||
command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
tune run --nnodes 1 --nproc_per_node 4 lora_finetune_distributed --config llama2/7B_lora
|
||||
|
||||
If successful, you should something like the following output:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
INFO:torchtune.utils.logging:FSDP is enabled. Instantiating Model on CPU for Rank 0 ...
|
||||
INFO:torchtune.utils.logging:Model instantiation took 7.32 secs
|
||||
INFO:torchtune.utils.logging:Memory Stats after model init:
|
||||
{'peak_memory_active': 9.478172672, 'peak_memory_alloc': 8.953868288, 'peak_memory_reserved': 11.112808448}
|
||||
INFO:torchtune.utils.logging:Optimizer and loss are initialized.
|
||||
INFO:torchtune.utils.logging:Dataset and Sampler are initialized.
|
||||
INFO:torchtune.utils.logging:Learning rate scheduler is initialized.
|
||||
1|111|Loss: 1.5790324211120605: 7%|█ | 114/1618
|
||||
|
||||
Read more about inference frameworks in :doc:`LLM inference frameworks <llm-inference-frameworks>`.
|
||||
@@ -0,0 +1,383 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, Triton, kernel, performance, optimization
|
||||
|
||||
*************************
|
||||
Optimizing Triton kernels
|
||||
*************************
|
||||
|
||||
This section introduces the general steps for `Triton <https://openai.com/index/triton/>`_ kernel optimization. Broadly,
|
||||
Triton kernel optimization is similar to HIP and CUDA kernel optimization.
|
||||
|
||||
.. _fine-tuning-llms-triton-memory-access-efficiency:
|
||||
|
||||
Memory access efficiency
|
||||
========================
|
||||
|
||||
The accelerator or GPU contains global memory, local data share (LDS), and registers. Global memory has high access
|
||||
latency, but is large. LDS access has much lower latency, but is smaller. Register access is the fastest yet smallest
|
||||
among the three.
|
||||
|
||||
So, the data in global memory should be loaded and stored as few times as possible. If different threads in a block
|
||||
need to access the same data, these data should be first transferred from global memory to LDS, then accessed by
|
||||
different threads in a workgroup.
|
||||
|
||||
.. _fine-tuning-llms-triton-hardware-resource-utilization:
|
||||
|
||||
Hardware resource utilization
|
||||
=============================
|
||||
|
||||
Each accelerator or GPU has multiple Compute Units (CUs) and various CUs do computation in parallel. So, how many CUs
|
||||
can a compute kernel can allocate its task to? For the :doc:`AMD MI300X accelerator <../../reference/gpu-arch-specs>`, the
|
||||
grid should have at least 1024 thread blocks or workgroups.
|
||||
|
||||
.. figure:: ../../data/how-to/llm-fine-tuning-optimization/compute-unit.png
|
||||
|
||||
Schematic representation of a CU in the CDNA2 or CDNA3 architecture.
|
||||
|
||||
To increase hardware utilization and maximize parallelism, it is necessary to design algorithms that can exploit more
|
||||
parallelism. One approach to achieving this is by using larger split-K techniques for General Matrix Multiply (GEMM)
|
||||
operations, which can further distribute the computation across more CUs, thereby enhancing performance.
|
||||
|
||||
.. tip::
|
||||
|
||||
You can query hardware resources with the command ``rocminfo`` (in the ``/opt/rocm/bin`` directory). For instance,
|
||||
query the number of CUs, number of SIMD, and wavefront size using the following commands.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
rocminfo | grep "Compute Unit"
|
||||
|
||||
rocminfo | grep "SIMD"
|
||||
|
||||
rocminfo | grep "Wavefront Size"
|
||||
|
||||
On an MI300X device, there are 304 CUs, 4 SIMD per CU, and the wavefront size (warp size) is 64. See :doc:`Hardware
|
||||
specifications <../../reference/gpu-arch-specs>` for a full list of AMD accelerators and GPUs.
|
||||
|
||||
.. _fine-tuning-llms-triton-ir-analysis:
|
||||
|
||||
IR analysis
|
||||
===========
|
||||
|
||||
In Triton, there are several layouts including *blocked*, *shared*, *sliced*, and *MFMA*.
|
||||
|
||||
From the Triton GPU IR (intermediate representation), you can know in which memory each computation is
|
||||
performed. The following is a snippet of IR from the Flash Attention decode ``int4`` key-value program. It is to
|
||||
de-quantize the ``int4`` key-value from the ``int4`` data type to ``fp16``.
|
||||
|
||||
.. code-block::
|
||||
|
||||
%190 = tt.load %189 {cache = 1 : i32, evict = 1 : i32, isVolatile =
|
||||
false} : tensor<1x64xi32, #blocked6> loc(#loc159)
|
||||
|
||||
%266 = arith.andi %190, %cst_28 : tensor<1x64xi32, #blocked6>
|
||||
loc(#loc250)
|
||||
|
||||
%267 = arith.trunci %266 : tensor<1x64xi32, #blocked6> to
|
||||
tensor<1x64xi16, #blocked6> loc(#loc251)
|
||||
|
||||
%268 = tt.bitcast %267 : tensor<1x64xi16, #blocked6> -> tensor<1x64xf16,
|
||||
#blocked6> loc(#loc252)
|
||||
|
||||
%269 = triton_gpu.convert_layout %268 : (tensor<1x64xf16, #blocked6>) ->
|
||||
tensor<1x64xf16, #shared1> loc(#loc252)
|
||||
|
||||
%270 = tt.trans %269 : (tensor<1x64xf16, #shared1>) -> tensor<64x1xf16,
|
||||
#shared2> loc(#loc194)
|
||||
|
||||
%276 = triton_gpu.convert_layout %270 : (tensor<64x1xf16, #shared2>) ->
|
||||
tensor<64x1xf16, #blocked5> loc(#loc254)
|
||||
|
||||
%293 = arith.mulf %276, %cst_30 : tensor<64x1xf16, #blocked5>
|
||||
loc(#loc254)
|
||||
|
||||
%295 = arith.mulf %292, %294 : tensor<64x32xf16, #blocked5> loc(#loc264)
|
||||
|
||||
%297 = arith.addf %295, %296 : tensor<64x32xf16, #blocked5> loc(#loc255)
|
||||
|
||||
%298 = triton_gpu.convert_layout %297 : (tensor<64x32xf16, #blocked5>)
|
||||
-> tensor<64x32xf16, #shared1> loc(#loc255)
|
||||
|
||||
%299 = tt.trans %298 : (tensor<64x32xf16, #shared1>) ->
|
||||
tensor<32x64xf16, #shared2> loc(#loc196)
|
||||
|
||||
%300 = triton_gpu.convert_layout %299 : (tensor<32x64xf16, #shared2>) ->
|
||||
tensor<32x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mfma, kWidth
|
||||
= 4}>> loc(#loc197)
|
||||
|
||||
From the IR, you can see ``i32`` data is loaded from global memory to registers. With a few element-wise operations in
|
||||
registers, then it is stored in shared memory for the transpose operation, which needs data movement across different
|
||||
threads. With the transpose done, it is loaded from LDS to register again, and with a few more element-wise operations,
|
||||
they are stored in LDS again. The last step is to load from LDS to registers and convert to the dot-operand layout.
|
||||
|
||||
From the IR, you can see that it uses the LDS twice: one for the transpose, and the other to convert the blocked layout
|
||||
to a dot-operand layout.
|
||||
|
||||
Assembly analysis
|
||||
=================
|
||||
|
||||
In the ISA, ensure ``global_load_dwordx4`` is used, especially when the
|
||||
load happens in a loop.
|
||||
|
||||
In most cases, the LDS load and store should use ``_b128`` as well to
|
||||
minimize the number of LDS access instructions. Note that upstream (or backend) might not have ``_b128`` LDS read/write,
|
||||
so it uses ``_b64``. For most cases, no matter if you use fork or upstream,
|
||||
the LDS access should have ``_b64`` vector width.
|
||||
|
||||
The AMD ISA has the ``s_waitcnt`` instruction to synchronize the dependency
|
||||
of memory access and computations. The ``s_waitcnt`` instruction can
|
||||
have two signals, typically in the context of Triton:
|
||||
|
||||
* ``lgkmcnt(n):`` `lgkm` stands for LDS, GDS, Constant and Message.
|
||||
|
||||
In this context, it is often related to LDS access. The number ``n`` here means the number of such accesses that can
|
||||
be left out to continue. For example, 0 means all ``lgkm`` access must finish before continuing, and 1 means only 1
|
||||
``lgkm`` access can be still running asynchronously before proceeding.
|
||||
|
||||
* ``vmcnt(n):`` `vm` means vector memory.
|
||||
|
||||
This happens when vector memory is accessed, for example, when global load moves from global memory to vector memory.
|
||||
Again, the number ``n`` here means the number of accesses that can be left out to continue.
|
||||
|
||||
Generally recommended guidelines are as follows.
|
||||
|
||||
* Vectorize memory access as much as possible.
|
||||
|
||||
* Ensure synchronization is done efficiently.
|
||||
|
||||
* Overlap of instructions to hide latency, but it requires thoughtful
|
||||
analysis of the algorithms.
|
||||
|
||||
* If you find inefficiencies, you can trace it back to LLVM IR, TTGIR
|
||||
and even TTIR to see where the problem comes from. If you find it
|
||||
during compiler optimization, activate the MLIR dump and check which
|
||||
optimization pass caused the problem.
|
||||
|
||||
.. _fine-tuning-llms-triton-kernel-occupancy:
|
||||
|
||||
Kernel occupancy
|
||||
================
|
||||
|
||||
1. Get the VGPR count, search for ``.vgpr_count`` in the ISA (for example, ``N``).
|
||||
|
||||
2. Get the allocated LDS following the steps (for example, L for the kernel).
|
||||
|
||||
a. ``export MLIR_ENABLE_DUMP=1``
|
||||
|
||||
b. ``rm -rf ~/.triton/cache``
|
||||
|
||||
c. ``python kernel.py | | grep "triton_gpu.shared = " | tail -n 1``
|
||||
|
||||
d. You should see something like ``triton_gpu.shared = 65536``, indicating 65536 bytes of LDS are allocated for the
|
||||
kernel.
|
||||
|
||||
3. Get number of waves per workgroup using the following steps (for example, ``nW``).
|
||||
|
||||
a. ``export MLIR_ENABLE_DUMP=1``
|
||||
|
||||
b. ``rm -rf ~/.triton/cache``
|
||||
|
||||
c. ``python kernel.py | | grep "triton_gpu.num-warps " | tail -n 1``
|
||||
|
||||
d. You should see something like ``“triton_gpu.num-warps" = 8``, indicating 8 waves per workgroup.
|
||||
|
||||
4. Compute occupancy limited by VGPR based on N according to the following table. For example, waves per EU as
|
||||
``occ_vgpr``.
|
||||
|
||||
.. _fine-tuning-llms-occupancy-vgpr-table:
|
||||
|
||||
.. figure:: ../../data/how-to/llm-fine-tuning-optimization/occupancy-vgpr.png
|
||||
:alt: Occupancy related to VGPR usage in an Instinct MI300X accelerator.
|
||||
:align: center
|
||||
|
||||
5. Compute occupancy limited by LDS based on L by: ``occ_lds = floor(65536 / L)``.
|
||||
|
||||
6. Then the occupancy is ``occ = min(floor(occ_vgpr * 4 / nW), occ_lds) * nW / 4``
|
||||
|
||||
a. ``occ_vgpr \* 4`` gives the total number of waves on all 4 execution units (SIMDs)
|
||||
per CU.
|
||||
|
||||
b. ``floor(occ_vgpr * 4 / nW)`` gives the occupancy of workgroups per CU
|
||||
regrading VGPR usage.
|
||||
|
||||
c. The true ``occ`` is the minimum of the two.
|
||||
|
||||
.. _fine-tuning-llms-triton-kernel-configs-env-vars:
|
||||
|
||||
Auto-tunable kernel configurations and environment variables
|
||||
============================================================
|
||||
|
||||
This section relates to the amount of :ref:`memory access <fine-tuning-llms-triton-memory-access-efficiency>` and
|
||||
computation assigned to each CU. It is related to the usage of LDS, registers and the scheduling of different tasks on
|
||||
a CU.
|
||||
|
||||
The following is a list of kernel arguments used for tuning.
|
||||
|
||||
``num_stages=n``
|
||||
Adjusts the number of pipeline stages for different types of kernels. On AMD accelerators, set ``num_stages``
|
||||
according to the following rules:
|
||||
|
||||
* For kernels with a single GEMM, set to ``0``.
|
||||
|
||||
* For kernels with two GEMMs fused (Flash Attention, or any other kernel
|
||||
that fuses 2 GEMMs), set to ``1``.
|
||||
|
||||
* For kernels that fuse a single GEMM with another non-GEMM operator
|
||||
(for example ReLU activation), set to ``0``.
|
||||
|
||||
* For kernels that have no GEMMs, set to ``1``.
|
||||
|
||||
``waves_per_eu=n``
|
||||
Helps to manage Vector General Purpose Registers (VGPR) usage to achieve desired occupancy levels. This argument
|
||||
hints to the compiler to reduce VGPR to achieve ``n`` occupancy. See
|
||||
:ref:`Kernel occupancy <fine-tuning-llms-triton-kernel-occupancy>` for more information about how to compute
|
||||
occupancy.
|
||||
|
||||
This argument is useful if:
|
||||
|
||||
* The occupancy of the kernel is limited by VGPR usage.
|
||||
|
||||
* The current VGPR usage is only a few above a boundary in
|
||||
:ref:`Occupancy related to VGPR usage in an Instinct MI300X accelerator <fine-tuning-llms-occupancy-vgpr-table>`.
|
||||
|
||||
For example, according to the table, the available VGPR is 512 per Execution Unit (EU), and VGPU is allocated at the
|
||||
unit of 16. If the current VGPR usage is 170, the actual requested VGPR will be 176, so the
|
||||
occupancy is only 2 waves per CU since :math:`176 \times 3 > 512`. So, if you set
|
||||
``waves_per_eu`` to 3, the LLVM backend tries to bring VGPR usage down so
|
||||
that it might fit 3 waves per EU.
|
||||
|
||||
``BLOCK_M``, ``BLOCK_N``, ``BLOCK_K``
|
||||
Tile sizes to be tuned to balance the memory-to-computation ratio. You want tile sizes large enough to
|
||||
maximize the efficiency of memory-to-computation ratio, but small enough to parallelize the greatest number of
|
||||
workgroups at the grid level.
|
||||
|
||||
``matrix_instr_nonkdim``
|
||||
Experimental feature for Flash Attention-like kernels that determines the size of the Matrix Fused Multiply-Add
|
||||
(MFMA) instruction used.
|
||||
|
||||
- ``Matrix_instr_nonkdim = 16``: ``mfma_16x16`` is used.
|
||||
|
||||
- ``Matrix_instr_nonkdim = 32``: ``mfma_32x32`` is used.
|
||||
|
||||
For GEMM kernels on an AMD MI300X accelerator, ``mfma_16x16`` typically outperforms ``mfma_32x32``, even for large
|
||||
tile/GEMM sizes.
|
||||
|
||||
The following is an environment variable used for tuning.
|
||||
|
||||
``OPTIMIZE_EPILOGUE``
|
||||
Setting this variable to ``1`` can improve performance by removing the ``convert_layout`` operation in the epilogue.
|
||||
It should be turned on (set to ``1``) in most cases. Setting ``OPTIMIZE_EPILOGUE=1`` stores the MFMA instruction
|
||||
results in the MFMA layout directly; this comes at the cost of reduced global store efficiency, but the impact on
|
||||
kernel execution time is usually minimal.
|
||||
|
||||
By default (``0``), the results of MFMA instruction are converted to blocked layout, which leads to ``global_store``
|
||||
with maximum vector length, that is ``global_store_dwordx4``.
|
||||
|
||||
This is done implicitly with LDS as the intermediate buffer to achieve
|
||||
data exchange between threads. Padding is used in LDS to avoid bank
|
||||
conflicts. This usually leads to extra LDS usage, which might reduce
|
||||
occupancy.
|
||||
|
||||
.. note::
|
||||
|
||||
This variable is not turned on by default because it only
|
||||
works with ``tt.store`` but not ``tt.atomic_add``, which is used in split-k and
|
||||
stream-k GEMM kernels. In the future, it might be enabled with
|
||||
``tt.atomic_add`` and turned on by default.
|
||||
|
||||
See :ref:`IR analysis <fine-tuning-llms-triton-ir-analysis>`.
|
||||
|
||||
TorchInductor with Triton tuning knobs
|
||||
===========================================
|
||||
|
||||
The following are suggestions for optimizing matrix multiplication (GEMM) and convolution (``conv``) operations in PyTorch
|
||||
using ``inductor``, a part of the PyTorch compilation framework. The goal is to leverage Triton to achieve better
|
||||
performance.
|
||||
|
||||
Learn more about TorchInductor environment variables and usage in
|
||||
`PyTorch documentation <https://pytorch.org/docs/2.3/torch.compiler_inductor_profiling.html>`_.
|
||||
|
||||
To enable a ``gemm``/``conv`` lowering to Triton, it requires use of ``inductor``’s ``max_autotune`` mode. This benchmarks a
|
||||
static list of Triton configurations (``conv`` configurations for max auto-tune + ``matmul`` configurations for max
|
||||
auto-tune) and uses the fastest for each shape. Note that the Triton is not used if regular :doc:`MIOpen <miopen:index>`
|
||||
or :doc:`rocBLAS <rocblas:index>` is faster for a specific operation.
|
||||
|
||||
* Set ``torch._inductor.config.max_autotune = True`` or ``TORCHINDUCTOR_MAX_AUTOTUNE=1``.
|
||||
|
||||
* Or, for more fine-grained control:
|
||||
|
||||
``torch._inductor.config.max_autotune.pointwise = True``
|
||||
To enable tuning for ``pointwise``/``reduction`` ops.
|
||||
|
||||
``torch._inductor.config.max_autotune_gemm = True``
|
||||
To enable tuning or lowering of ``mm``/``conv``\s.
|
||||
|
||||
``torch._inductor.max_autotune_gemm_backends/TORCHINDUCTOR_MAX_AUTOTUNE_GEMM_BACKENDS``
|
||||
To select the candidate backends for ``mm`` auto-tuning. Defaults to
|
||||
``TRITON,ATEN,NV``. This also includes the ``CUTLASS`` tuning option. Limiting this to
|
||||
``TRITON`` might improve performance by enabling more fused ``mm`` kernels
|
||||
instead of going to rocBLAS.
|
||||
|
||||
* For ``mm`` tuning, tuning ``coordinate_descent`` might improve performance.
|
||||
|
||||
``torch._inductor.config.coordinate_descent_tuning = True`` or ``TORCHINDUCTOR_COORDINATE_DESCENT_TUNING=1``
|
||||
|
||||
* Inference can see large improvements on AMD GPUs by utilizing
|
||||
``torch._inductor.config.freezing=True`` or the ``TORCHINDUCTOR_FREEZING=1`` variable, which
|
||||
in-lines weights as constants and enables constant folding optimizations.
|
||||
|
||||
* Enabling ``inductor``’s cpp_wrapper might improve overhead. This generates
|
||||
C++ code which launches Triton binaries directly with
|
||||
``hipModuleLaunchKernel`` and relies on `hipification`.
|
||||
|
||||
* For NHWC convolutions workloads
|
||||
``torch._inductor.config.layout_optimization=True`` or ``TORCHINDUCTOR_LAYOUT_OPTIMIZATION=``
|
||||
can help be enforcing channels_last format throughout the graph avoiding
|
||||
any additional transposes added by ``inductor``. Note that
|
||||
``PYTORCH_MIOPEN_SUGGEST_NHWC=1`` is recommended if using this.
|
||||
|
||||
* Extracting the Triton kernel ``TORCH_COMPILE_DEBUG`` creates a
|
||||
``torch_compile_debug/`` directory at current path, in the ``output_code.py``
|
||||
the code-strings for the Triton kernels that are defined. Manual work is
|
||||
then required to strip out the kernel and create kernel
|
||||
compilation and launch via Triton.
|
||||
|
||||
Other guidelines
|
||||
================
|
||||
|
||||
* Performance-critical HIP provides an environment variable, ``export HIP_FORCE_DEV_KERNARG=1``,
|
||||
that can put HIP kernel arguments directly to
|
||||
device memory to reduce the latency of accessing kernel arguments. It
|
||||
can reduce 2 to 3 μs for some kernels. Setting this variable for the FA
|
||||
decode containing ``splitK`` and reduced kernels can reduce the total time
|
||||
by around 6 μs in the benchmark test.
|
||||
|
||||
* Set the clock to deterministic. Use the command ``rocm-smi --setperfdeterminism 1900`` to set the max clock speed to
|
||||
1900MHz instead of the default 2100MHz. This can reduce the chance of clock speed decrease due to chip high temperature
|
||||
by setting a lower cap. You can restore this setting to its default value with ``rocm-smi -r``.
|
||||
|
||||
* Set Non-Uniform Memory Access (NUMA) auto-balance. Run the command ``cat /proc/sys/kernel/numa_balancing`` to check the
|
||||
current setting. An output of ``0`` indicates this setting is available. If output is ``1``, run the command
|
||||
``sudo sh -c \\'echo 0 > /proc/sys/kernel/numa_balancing`` to set this.
|
||||
|
||||
For these settings, the ``env_check.sh`` script automates the setting, resetting, and checking of the such
|
||||
environments. Find the script at `<https://github.com/ROCm/triton/blob/rocm_env/scripts/amd/env_check.sh>`__.
|
||||
|
||||
.. _fine-tuning-llms-triton-tunableop:
|
||||
|
||||
TunableOp
|
||||
---------
|
||||
`TunableOp <https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/cuda/tunable/README.md>`_
|
||||
is a feature used to define and optimize kernels that can have tunable parameters. This is useful in
|
||||
optimizing the performance of custom kernels by exploring different parameter configurations to find the most efficient
|
||||
setup. See more about PyTorch TunableOp :ref:`Model acceleration libraries <fine-tuning-llms-pytorch-tunableop>`.
|
||||
|
||||
You can easily manipulate the behavior TunableOp through environment variables, though you could use the C++ interface
|
||||
``at::cuda::tunable::getTuningContext()``. A Python interface to the ``TuningContext`` does not yet exist.
|
||||
|
||||
The default value is ``0``, which means only 1 iteration is attempted. Remember: there’s an overhead to tuning. To try
|
||||
and minimize the overhead, only a limited number of iterations of a given operation are attempted. If you set this to
|
||||
``10``, each solution for a given operation can run as many iterations as possible within 10ms. There is a hard-coded
|
||||
upper limit of 100 iterations attempted per solution. This is a tuning parameter; if you want the tunings to be chosen
|
||||
based on an average over multiple iterations, increase the allowed tuning duration.
|
||||
@@ -0,0 +1,484 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="SmoothQuant model inference on AMD Instinct MI300X using Composable Kernel">
|
||||
<meta name="keywords" content="Mixed Precision, Kernel, Inference, Linear Algebra">
|
||||
</head>
|
||||
|
||||
# Optimizing with Composable Kernel
|
||||
|
||||
The AMD ROCm™ Composable Kernel (CK) library provides a programming model for writing performance-critical kernels for machine learning workloads. It generates a general-purpose kernel during the compilation phase through a C++ template, enabling developers to achieve operation fusions on different data precisions.
|
||||
|
||||
This article gives a high-level overview of CK General Matrix Multiplication (GEMM) kernel based on the design example of `03_gemm_bias_relu`. It also outlines the steps to construct the kernel and run it. Moreover, the article provides a detailed implementation of running SmoothQuant quantized INT8 models on AMD Instinct MI300X accelerators using CK.
|
||||
|
||||
## High-level overview: a CK GEMM instance
|
||||
|
||||
GEMM is a fundamental block in linear algebra, machine learning, and deep neural networks. It is defined as the operation:
|
||||
{math}`E = α \times (A \times B) + β \times (D)`, with A and B as matrix inputs, α and β as scalar inputs, and D as a pre-existing matrix.
|
||||
Take the commonly used linear transformation in a fully connected layer as an example. These terms correspond to input activation (A), weight (B), bias (D), and output (E), respectively. The example employs a `DeviceGemmMultipleD_Xdl_CShuffle` struct from CK library as the fundamental instance to explore the compute capability of AMD Instinct accelerators for the computation of GEMM. The implementation of the instance contains two phases:
|
||||
|
||||
- [Template parameter definition](#template-parameter-definition)
|
||||
- [Instantiating and running the templated kernel](#instantiating-and-running-the-templated-kernel)
|
||||
|
||||
### Template parameter definition
|
||||
|
||||
The template parameters of the instance are grouped into four parameter types:
|
||||
|
||||
- [Parameters for determining matrix data precision](matrix-data-precision)
|
||||
- [Parameters for determining matrix data layout](matrix-data-layout)
|
||||
- [Parameters for determining extra operations on matrix elements](matrix-element-operation)
|
||||
- [Performance-oriented tunable parameters](tunable-parameters)
|
||||
|
||||
<!--
|
||||
================
|
||||
### Figure 2
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-template_parameters.jpg
|
||||
The template parameters of the selected GEMM kernel are classified into four groups. These template parameter groups should be defined properly before running the instance.
|
||||
```
|
||||
|
||||
(matrix-data-precision)=
|
||||
|
||||
#### Matrix data precision
|
||||
|
||||
A, B, D, and E are defined as half-precision floating-point datatypes. The multiply-add results of matrix A and B are added with a pre-existing matrix D (half-precision), and the final GEMM results are also half-precision floating-points.
|
||||
|
||||
```c++
|
||||
using ADataType = F16;
|
||||
using BDataType = F16;
|
||||
using AccDataType = F32;
|
||||
using CShuffleDataType = F16;
|
||||
using DDataType = F16;
|
||||
using EDataType = F16;
|
||||
```
|
||||
|
||||
`ADataType` and `BDataType` denote the data precision of the A and B input matrices. `AccDataType` determines the data precision used for representing the multiply-add results of A and B elements. These results are stored in a `CShuffle` module in local data share (LDS), a low-latency and high-bandwidth explicitly-addressed memory used for synchronization within a workgroup LDS for later use.
|
||||
|
||||
`CShuffleDataType` denotes the data precision of `CShuffle` in LDS.
|
||||
|
||||
`DDataType` denotes the data precision of the pre-existing D matrix stored in GPU global memory, while `EDatatype` denotes the data precision of the final output. The CK kernel supports a fusion strategy so that `CShuffle` can be added with a single pre-existing matrix in the same GPU kernel for better performance.
|
||||
|
||||
(matrix-data-layout)=
|
||||
|
||||
#### Matrix data layout
|
||||
|
||||
```c++
|
||||
using ALayout = Row;
|
||||
using BLayout = Col;
|
||||
using DLayout = Row;
|
||||
using ELayout = Row;
|
||||
```
|
||||
|
||||
Following the convention of various linear algebra libraries, CK assumes that the input matrix A is an M x K matrix, meaning the matrix has M rows and K columns. Similarly, matrix B is assumed to be K x N, meaning it has K rows and N columns. In computing, row-major order and column-major order are commonly used ways to store matrices in linear storage. After understanding the matrix storage pattern, the underlying optimized memory access manner can be applied to achieve better performance depending on the storage ordering of these matrices.
|
||||
|
||||
(matrix-element-operation)=
|
||||
|
||||
#### Matrix element operation
|
||||
|
||||
```c++
|
||||
using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
using CDEElementOp = AddRelu;
|
||||
```
|
||||
|
||||
CK supports the pre-processing of the matrix before calculating GEMM, that is, `C = AElementOp(A) * BElementOp(B)`. It similarly supports the post-processing of GEMM results the same way, that is, `E = CDEElementOp(C, D)`.
|
||||
|
||||
`AElementOp` and `BElementOp` determine the operation applied to matrix A and B separately before GEMM, which is achieved by binding the operation with a C++ struct function.
|
||||
|
||||
The above `PassThrough` denotes no operations are performed on the target matrix. `CDEELementOp` determines the operations applied to `CShuffle` output and matrix D. The following binding struct `AddRelu` shows an example of adding the `CShuffle` output and matrix D, and ReLU (Rectified Linear Unit) operations to the addition result. It then passes the results to matrix E.
|
||||
|
||||
```c++
|
||||
struct AddRelu
|
||||
{
|
||||
__host__ __device__ void operator()(ck::half_t& e, const ck::half_t& c, const ck::half_t& d) const
|
||||
{
|
||||
const ck::half_t x = c + d;
|
||||
e = x > 0 ? x : 0;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
(tunable-parameters)=
|
||||
|
||||
#### Tunable parameters
|
||||
|
||||
The CK instance includes a series of tunable template parameters to control the parallel granularity of the workload to achieve load balancing on different hardware platforms.
|
||||
|
||||
These parameters include Block Size, M/N/K Per Block, M/N per XDL, AK1, BK1, etc.
|
||||
|
||||
- Block Size determines the number of threads in the thread block.
|
||||
- M/N/K Per Block determines the size of tile that each thread block is responsible for calculating.
|
||||
- M/N Per XDL refers to M/N size for Instinct accelerator Matrix Fused Multiply Add (MFMA) instructions operating on a per-wavefront basis.
|
||||
- A/B K1 is related to the data type. It can be any value ranging from 1 to K Per Block. To achieve the optimal load/store performance, 128bit per load is suggested. In addition, the A/B loading parameters must be changed accordingly to match the A/B K1 value; otherwise, it will result in compilation errors.
|
||||
|
||||
Conditions for achieving computational load balancing on different hardware platforms can vary.
|
||||
|
||||
### Instantiating and running the templated kernel
|
||||
|
||||
After determining the template parameters, we instantiate the kernel with actual arguments. Do one of the following:
|
||||
|
||||
- Use `GetDeviceBuffer` from CK’s custom struct `DeviceMem` to pass the element values of the matrices that need to be calculated.
|
||||
- Allocate device buffer via `hipMalloc`. Ensure the device buffer size can fit the matrix size.
|
||||
- Pass matrix elements through the `data_ptr` method in the `Tensor` object if the matrix to be calculated is of `Tensor` type.
|
||||
|
||||
The row and column, and stride information of input matrices are also passed to the instance. For batched GEMM, you must pass in additional batch count and batch stride values. The extra operations for pre and post-processing are also passed with an actual argument; for example, α and β for GEMM scaling operations. Afterward, the instantiated kernel is launched by the invoker, as illustrated in Figure 3.
|
||||
|
||||
<!--
|
||||
================
|
||||
### Figure 3
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-kernel_launch.jpg
|
||||
Templated kernel launching consists of kernel instantiation, making arguments by passing in actual application parameters, creating an invoker, and running the instance through the invoker.
|
||||
```
|
||||
|
||||
## Developing fused INT8 kernels for SmoothQuant models
|
||||
|
||||
[SmoothQuant](https://github.com/mit-han-lab/smoothquant) (SQ) is a quantization algorithm that enables an INT8 quantization of both weights and activations for all the matrix multiplications in LLM. The required GPU kernel functionalities used to accelerate the inference of SQ models on Instinct accelerators are shown in the following table.
|
||||
|
||||
:::{table} Functionalities used to implement SmoothQuant model inference.
|
||||
|
||||
| Functionality descriptions | Corresponding wrappers |
|
||||
|:-------------------------------------|-----------------------------------------|
|
||||
| {math}`E = α \times (A \times B) + β \times (D)`, where A, B, D, E are INT8 2-D tensors; | E = Linear_ABDE_I8(A, B, D, {math}`\alpha`, {math}`\beta`) |
|
||||
| {math}`E = RELU (α \times (A \times B) + β \times (D))`, where A, B, D, E are INT8 2-D tensors; | E = Linear_ReLU_ABDE_I8(A, B, D, {math}`\alpha`, {math}`\beta`) |
|
||||
| {math}`E = α \times (A \times B) + β \times (D)`, where A, B are INT8 2-D tensors, D and E are FP32 2-D tensors; | E = Linear_AB_I8_DE_F32(A, B, D, {math}`\alpha`, {math}`\beta`) |
|
||||
| {math}`E = α \times (A \times B)`, where A, B, E are INT8 3-D tensors; | E = BMM_ABE_I8(A, B, {math}`\alpha`) |
|
||||
| {math}`E = α \times (A \times B)`, where A, B are INT8 3-D tensors, E is FP32 3-D tensor; | E = BMM_AB_I8_E_F32(A, B, {math}`\alpha`) |
|
||||
:::
|
||||
|
||||
### Operation flow analysis
|
||||
|
||||
The following section discusses the analysis of the operation flow of `Linear_ReLU_ABDE_I8`. The rest of the wrappers in Table 1 can be analyzed similarly.
|
||||
|
||||
The first operation in the process is to perform the multiplication of input matrices A and B. The resulting matrix C is then scaled with α to obtain T1. At the same time, the process performs a scaling operation on D elements to obtain T2. Afterward, the process performs matrix addition between T1 and T2, element activation calculation using ReLU, and element rounding sequentially. The operations to generate E1, E2, and E are encapsulated and completed by a user-defined template function in CK (given in the next sub-section). This template function is integrated into the fundamental instance directly during the compilation phase so that all these steps can be fused in a single GPU kernel.
|
||||
|
||||
<!--
|
||||
================
|
||||
### Figure 4
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-operation_flow.jpg
|
||||
Operation flow.
|
||||
```
|
||||
|
||||
The CK library contains many fundamental instances that implement different functions. Familiarize yourself with the names of various CK instances and determine whether they meet the target functional requirements.
|
||||
|
||||
Second, consider whether the format of input data meets your actual calculation needs. For SQ models, the 8-bit integer data format (INT8) is applied for matrix calculations.
|
||||
|
||||
Third, consider the platform for implementing CK instances. The instances suffixed with `xdl` only run on AMD Instinct accelerators after being compiled and cannot run on Radeon-series GPUs. This is due to the underlying device-specific instruction sets for implementing these basic instances.
|
||||
|
||||
Here, we use [DeviceBatchedGemmMultiD_Xdl](https://github.com/ROCm/composable_kernel/tree/develop/example/24_batched_gemm) as the fundamental instance to implement the functionalities in the previous table.
|
||||
|
||||
<!--
|
||||
================
|
||||
### Figure 5
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-root_instance.jpg
|
||||
Use the ‘DeviceBatchedGemmMultiD_Xdl’ instance as a root.
|
||||
```
|
||||
|
||||
The `DeviceBatchedGemmMultiD_Xdl` instance realizes the batched GEMM `BMM_ABE_I8` and `BMM_AB_I8_E_F32` kernels directly by using the proper input and output data precision types.
|
||||
|
||||
Based on the two batched GEMM kernels, GEMM kernel `Linear_ABDE_I8` and `Linear_AB_I8_DE_F32` can be implemented by expanding their input 2-D tensors to 3-D tensors. Then, the 3-D output tensors produced by the root instance are squeezed back to 2-D output tensors before returning back.
|
||||
|
||||
For example, unsqueeze A (M, K) to A (1, M, K) before assigning it into the root instance and squeeze E (1, M, N) to (M, N) after the calculations of the root instance return back. `Linear_ReLU_ABDE_I8` is implemented by adding a ReLU operation on the result output of `Linear_ABDE_I8`.
|
||||
|
||||
### Developing the complete function
|
||||
|
||||
The inference of SQ quantized models relies on using PyTorch and Transformer libraries, and a tensor type is used to represent matrices and vectors in `torch`, the C++ data types in CK need to be replaced with the `torch::tensor` type. The data types of the input and output matrices should be a `tensor` type.
|
||||
|
||||
In GEMM, the A and B inputs are two-dimensional matrices, and the required input matrices of the selected fundamental CK instance are three-dimensional matrices. Therefore, we must convert the input 2-D tensors to 3-D tensors, by using `tensor`'s `unsqueeze()` method before passing these matrices to the instance. For batched GEMM in the preceding table, ignore this step.
|
||||
|
||||
```c++
|
||||
// Function input and output
|
||||
torch::Tensor linear_relu_abde_i8(
|
||||
torch::Tensor A_,
|
||||
torch::Tensor B_,
|
||||
torch::Tensor D_,
|
||||
float alpha,
|
||||
float beta)
|
||||
{
|
||||
// Convert torch::Tensor A_ (M, K) to torch::Tensor A (1, M, K)
|
||||
auto A = A_.unsqueeze(0);
|
||||
|
||||
// Convert torch::Tensor B_ (K, N) to torch::Tensor A (1, K, N)
|
||||
auto B = B_.unsqueeze(0);
|
||||
...
|
||||
```
|
||||
|
||||
As shown in the following code block, we obtain M, N, and K values using input tensor size values. This stride size information is used to reshape the input vector D and allocate the storage space of tensor E. Stride reflects the exact size of continuous elements in memory, which are passed as important parameters to the fundamental instance for GPU kernel use.
|
||||
|
||||
```c++
|
||||
// Return the batch count from the size of dimension 0
|
||||
int batch_count = A.size(0);
|
||||
|
||||
// Return the M, N, K from the size of dimension 1 & 2
|
||||
int M = A.size(1);
|
||||
int N = B.size(1);
|
||||
int K = A.size(2);
|
||||
|
||||
// Initialize the stride size for A, B, D and E
|
||||
int stride_A = K;
|
||||
int stride_B = K;
|
||||
int stride_D0 = N;
|
||||
int stride_E = N;
|
||||
|
||||
// Initialize the stride size for batched A, B, D and E
|
||||
long long int batch_stride_A = M * K;
|
||||
long long int batch_stride_B = K * N;
|
||||
long long int batch_stride_D0 = M * N;
|
||||
long long int batch_stride_E = M * N;
|
||||
|
||||
// Convert the tensor of 2-D to 3-D
|
||||
auto D = D_.view({1,-1}).repeat({M, 1});
|
||||
|
||||
// Allocate memory for E
|
||||
auto E = torch::empty({batch_count, M, N},
|
||||
torch::dtype(torch::kInt8).device(A.device()));
|
||||
```
|
||||
|
||||
In the following code block, `ADataType`, `BDataType` and `D0DataType` are used to denote the data precision of the input tensors A, B and D, respectively. `EDataType` is used to denote the data precision of output tensor E. These parameters are specified to `I8` data format (8-bit integer data format) to meet the kernel's design requirements.
|
||||
|
||||
`AccDataType` determines the data precision used to represent the multiply-add results of A and B elements. Generally, a larger range data type is applied to store the multiply-add results of A and B to avoid result overflow; `I32` is applied in this case. The `CShuffleDataType I32` data type indicates that the multiply-add results continue to be stored in LDS as an `I32` data format. All of this is implemented through the following code block.
|
||||
|
||||
```c++
|
||||
// Data precision
|
||||
using ADataType = I8;
|
||||
using BDataType = I8;
|
||||
using AccDataType = I32;
|
||||
using CShuffleDataType = I32;
|
||||
using D0DataType = I8;
|
||||
using DsDataType = ck::Tuple<D0DataType>;
|
||||
using EDataType = I8;
|
||||
```
|
||||
|
||||
Following the convention of various linear algebra libraries, row-major and column-major orders are used to denote the ways of storing matrices in linear storage. The advantage of specifying matrix B as column major is that all the relevant matrix elements are stored continuously in GPU global memory when a row in A is multiplied by a column in B, which can help GPU achieve data consistency access to improve access performance.
|
||||
|
||||
```c++
|
||||
// Specify tensor order
|
||||
using ALayout = RowMajor;
|
||||
using BLayout = ColumnMajor;
|
||||
using D0Layout = RowMajor;
|
||||
using DsLayout = ck::Tuple<D0Layout>;
|
||||
using ELayout = RowMajor;
|
||||
```
|
||||
|
||||
In CK, `PassThrough` is a struct denoting if an operation is applied to the tensor it binds to. To fuse the operations between E1, E2, and E introduced in section [Operation flow analysis](#operation-flow-analysis), we define a custom C++ struct, `ScaleScaleAddRelu`, and bind it to `CDEELementOp`. It determines the operations that will be applied to `CShuffle` (A×B results), tensor D, α, and β.
|
||||
|
||||
```c++
|
||||
// No operations bound to the elements of A and B
|
||||
using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
|
||||
// Operations bound to the elements of C, D and E
|
||||
using CDEElementOp = ScaleScaleAddRelu;
|
||||
```
|
||||
|
||||
In the binding struct, `operator()` performs an addition operation between `CShuffle` and matrix D, a ReLU operation on the addition results, and a rounding operation on the output elements. It then returns the results to E.
|
||||
|
||||
```c++
|
||||
struct ScaleScaleAddRelu {
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<I8, I32, I8>(I8& e, const I32& c, const I8& d) const
|
||||
{
|
||||
// Scale AxB result with alpha
|
||||
const F32 c_scale = ck::type_convert<F32>(c) * alpha;
|
||||
|
||||
// Scale D with beta
|
||||
const F32 d_scale = ck::type_convert<F32>(d) * beta;
|
||||
|
||||
// Perform addition operation
|
||||
F32 temp = c_scale + d_scale;
|
||||
|
||||
// Perform RELU operation
|
||||
temp = temp > 0 ? temp : 0;
|
||||
|
||||
// Perform rounding operation
|
||||
temp = temp > 127 ? 127 : temp;
|
||||
|
||||
// Return to E
|
||||
e = ck::type_convert<I8>(temp);
|
||||
}
|
||||
|
||||
F32 alpha;
|
||||
F32 beta;
|
||||
};
|
||||
```
|
||||
|
||||
The original input tensors need to be padded to meet GPU tile-based parallelism.
|
||||
|
||||
```c++
|
||||
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
||||
```
|
||||
|
||||
The template parameters of the target fundamental instance are initialized with the above parameters and includes default tunable parameters. For specific tuning methods, see [Tunable parameters](#tunable-parameters).
|
||||
|
||||
```c++
|
||||
using DeviceOpInstance = ck::tensor_operation::device::DeviceBatchedGemmMultiD_Xdl<
|
||||
// Tensor layout
|
||||
ALayout, BLayout, DsLayout, ELayout,
|
||||
// Tensor data type
|
||||
ADataType, BDataType, AccDataType, CShuffleDataType, DsDataType, EDataType,
|
||||
// Tensor operation
|
||||
AElementOp, BElementOp, CDEElementOp,
|
||||
// Padding strategy
|
||||
GemmDefault,
|
||||
// Tunable parameters
|
||||
tunable parameters>;
|
||||
```
|
||||
|
||||
Return the address of the first element of tensors:
|
||||
|
||||
```c++
|
||||
auto A_ref = A.data_ptr<ADataType>();
|
||||
auto B_ref = B.data_ptr<BDataType>();
|
||||
auto D0_ref = D.data_ptr<D0DataType>();
|
||||
auto E_ref = E.data_ptr<EDataType>();
|
||||
```
|
||||
|
||||
The fundamental instance is then initialized and run with actual arguments:
|
||||
|
||||
```c++
|
||||
auto device_op = DeviceOpInstance{};
|
||||
auto invoker = device_op.MakeInvoker();
|
||||
auto argument = device_op.MakeArgument(
|
||||
A_ref, B_ref, {D0_ref}, E_ref,
|
||||
M, N, K,
|
||||
batch_count,
|
||||
stride_A, stride_B, {stride_D0}, stride_E,
|
||||
batch_stride_A, batch_stride_B, {batch_stride_D0}, batch_stride_E,
|
||||
AElementOp{}, BElementOp{}, CDEElementOp{alpha, beta});
|
||||
|
||||
invoker.Run(argument, StreamConfig{nullptr, 0});
|
||||
```
|
||||
|
||||
The output of the fundamental instance is a calculated batched matrix E (batch, M, N). Before the return, it needs to be converted to a 2-D matrix if a normal GEMM result is required.
|
||||
|
||||
```c++
|
||||
// Convert (1, M, N) to (M, N)
|
||||
return E.squeeze(0);
|
||||
```
|
||||
|
||||
### Binding to Python
|
||||
|
||||
Since these functions are written in C++ and `torch::Tensor`, you can use `pybind11` to bind the functions and import them as Python modules. For the example, the necessary binding code for exposing the functions in the table spans but a few lines.
|
||||
|
||||
```c++
|
||||
#include <torch/extension.h>
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m){
|
||||
m.def("linear_ab_i8_de_f32", &linear_ab_i8_de_f32);
|
||||
m.def("linear_relu_abde_i8", &linear_relu_abde_i8);
|
||||
m.def("linear_abde_i8", &linear_abde_i8);
|
||||
m.def("bmm_abe_i8", &bmm_abe_i8);
|
||||
m.def("bmm_ab_i8_e_f32", &bmm_ab_i8_e_f32);
|
||||
}
|
||||
```
|
||||
|
||||
Build the C++ extension by writing a `setup.py` script that uses `setuptools` to compile the C++ code. A reference implementation of the `setup.py` script is as follows.
|
||||
|
||||
```python
|
||||
import os
|
||||
from setuptools import setup, find_packages
|
||||
from torch.utils import cpp_extension
|
||||
from torch.utils.cpp_extension import BuildExtension
|
||||
|
||||
os.environ["CC"] = "hipcc"
|
||||
os.environ["CXX"] = "hipcc"
|
||||
|
||||
sources = [
|
||||
'torch_int/kernels/linear.cpp',
|
||||
'torch_int/kernels/bmm.cpp',
|
||||
'torch_int/kernels/pybind.cpp',
|
||||
]
|
||||
|
||||
include_dirs = ['torch_int/kernels/include']
|
||||
extra_link_args = ['libutility.a']
|
||||
extra_compile_args = ['-O3','-DNDEBUG', '-std=c++17', '--offload-arch=gfx942', '-DCK_ENABLE_INT8', '-D__HIP_PLATFORM_AMD__=1']
|
||||
|
||||
setup(
|
||||
name='torch_int',
|
||||
ext_modules=[
|
||||
cpp_extension.CUDAExtension(
|
||||
name='torch_int.rocm',
|
||||
sources=sources,
|
||||
include_dirs=include_dirs,
|
||||
extra_link_args=extra_link_args,
|
||||
extra_compile_args=extra_compile_args
|
||||
),
|
||||
],
|
||||
cmdclass={
|
||||
'build_ext': BuildExtension.with_options(use_ninja=False)
|
||||
},
|
||||
packages=find_packages(
|
||||
exclude=['notebook', 'scripts', 'tests']),
|
||||
)
|
||||
```
|
||||
|
||||
Run `python setup.py install` to build and install the extension. It should look something like Figure 6:
|
||||
|
||||
<!--
|
||||
================
|
||||
### Figure 6
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-compilation.jpg
|
||||
Compilation and installation of the INT8 kernels.
|
||||
```
|
||||
|
||||
### INT8 model inference and performance
|
||||
|
||||
The implementation architecture of running SmoothQuant models on MI300X GPUs is illustrated in Figure 7, where (a) shows the decoder layer composition components of the target model, (b) shows the major implementation class for the decoder layer components, and \(c\) denotes the underlying GPU kernels implemented by CK instance.
|
||||
|
||||
<!--
|
||||
================
|
||||
### Figure 7
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-inference_flow.jpg
|
||||
The implementation architecture of running SmoothQuant models on AMD MI300X accelerators.
|
||||
```
|
||||
|
||||
For the target [SQ quantized model](https://huggingface.co/mit-han-lab/opt-13b-smoothquant), each decoder layer contains three major components: attention calculation, layer normalization, and linear transformation in fully connected layers. The corresponding implementation classes for these components are:
|
||||
|
||||
- `Int8OPTAttention`
|
||||
- `W8A8B8O8LinearReLU`
|
||||
- `W8A8BF32OF32Linear`
|
||||
|
||||
These classes' underlying implementation logits will harness the functions in previous table. Note that for the example, the `LayerNormQ` module is implemented by the torch native module.
|
||||
|
||||
Testing environment:
|
||||
The hardware platform used for testing equips with 256 AMD EPYC 9534 64-Core Processor, 8 AMD Instinct MI300X accelerators and 1.5T memory. The testing was done in a publicly available Docker image from Docker Hub:
|
||||
[`rocm/pytorch:rocm6.1_ubuntu22.04_py3.10_pytorch_2.1.2`](https://hub.docker.com/layers/rocm/pytorch/rocm6.1_ubuntu22.04_py3.10_pytorch_2.1.2/images/sha256-f6ea7cee8aae299c7f6368187df7beed29928850c3929c81e6f24b34271d652b)
|
||||
|
||||
The tested models are OPT-1.3B, 2.7B, 6.7B and 13B FP16 models and the corresponding SmoothQuant INT8 OPT models were obtained from Hugging Face.
|
||||
|
||||
Note that since the default values were used for the tunable parameters of the fundamental instance, the performance of the INT8 kernel is suboptimal.
|
||||
|
||||
Figure 8 shows the performance comparisons between the original FP16 and the SmoothQuant-quantized INT8 models on a single MI300X accelerator. The GPU memory footprints of SmoothQuant-quantized models are significantly reduced. It also indicates the per-sample inference latency is significantly reduced for all SmoothQuant-quantized OPT models (illustrated in (b)). Notably, the performance of the CK instance-based INT8 kernel steadily improves with an increase in model size.
|
||||
|
||||
<!--
|
||||
================
|
||||
### Figure 8
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-comparisons.jpg
|
||||
Performance comparisons between the original FP16 and the SmoothQuant-quantized INT8 models on a single MI300X accelerator.
|
||||
```
|
||||
|
||||
For accuracy comparisons between the original FP16 and INT8 models, the evaluation is done by using the first 1,000 samples from the LAMBADA dataset's validation set. We employ the same Last Token Prediction Accuracy method introduced in [SmoothQuant Real-INT8 Inference for PyTorch](https://github.com/mit-han-lab/smoothquant/blob/main/examples/smoothquant_opt_real_int8_demo.ipynb) as our evaluation metric. The comparison results are shown in Table 2.
|
||||
|
||||
:::{table} The inference accuracy comparisons of SmoothQuant quantized models on Instinct MI300X.
|
||||
|
||||
| Models | Hugging Face FP16 model accuracy | SmoothQuant quantized INT8 model accuracy |
|
||||
|:-----------------|----------------------------------------|---------------------------------------------|
|
||||
| opt-1.3B | 0.72 | 0.70 |
|
||||
| opt-2.7B | 0.76 | 0.75 |
|
||||
| opt-6.7B | 0.80 | 0.79 |
|
||||
| opt-13B | 0.79 | 0.77 |
|
||||
:::
|
||||
|
||||
## Conclusion
|
||||
|
||||
CK provides a rich set of template parameters for generating flexible accelerated computing kernels for difference application scenarios.
|
||||
|
||||
CK supports multiple instruction sets of AMD Instinct GPUs, operator fusion and different data precisions. Its composability helps users quickly construct operator performance verification.
|
||||
|
||||
With CK, you can build more effective AI applications with higher flexibility and better performance on different AMD accelerator platforms.
|
||||
104
docs/how-to/llm-fine-tuning-optimization/overview.rst
Normal file
@@ -0,0 +1,104 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, optimzation, LoRA, walkthrough
|
||||
|
||||
***************************************
|
||||
Conceptual overview of fine-tuning LLMs
|
||||
***************************************
|
||||
|
||||
Large language models (LLMs) are trained on massive amounts of text data to generate coherent and fluent text. The
|
||||
underlying *transformer* architecture is the fundamental building block of all LLMs. Transformers
|
||||
enable LLMs to understand and generate text by capturing contextual relationships and long-range dependencies. To better
|
||||
understand the philosophy of the transformer architecture, review the foundational
|
||||
`Attention is all you need <https://arxiv.org/pdf/1706.03762.pdf>`_ paper.
|
||||
|
||||
By further training pre-trained LLMs, the fine-tuned model can gain knowledge related to specific fields or tasks,
|
||||
thereby significantly improving its performance in that field or task. The core idea of fine-tuning is to use the
|
||||
parameters of the pre-trained model as the starting point for new tasks and shape it through a small amount of
|
||||
specific domain or task data, expanding the original model's capability to new tasks or datasets.
|
||||
|
||||
Fine-tuning can effectively improve the performance of existing pre-trained models in specific application scenarios.
|
||||
Continuous training and adjustment of the parameters of the base model in the target domain or task can better capture
|
||||
the semantic characteristics and patterns in specific scenarios, thereby significantly improving the key indicators of
|
||||
the model in that domain or task. For example, by fine-tuning the Llama 2 model, its performance in certain applications
|
||||
can be improve over the base model.
|
||||
|
||||
.. _fine-tuning-llms-concept-challenge:
|
||||
|
||||
The challenge of fine-tuning models
|
||||
===================================
|
||||
|
||||
However, the computational cost of fine-tuning is still high, especially for complex models and large datasets, which
|
||||
poses distinct challenges related to substantial computational and memory requirements. This might be a barrier for
|
||||
accelerators or GPUs with low computing power or limited device memory resources.
|
||||
|
||||
For example, suppose we have a language model with 7 billion (7B) parameters, represented by a weight matrix :math:`W`.
|
||||
During backpropagation, the model needs to learn a :math:`ΔW` matrix, which updates the original weights to minimize the
|
||||
value of the loss function.
|
||||
|
||||
The weight update is as follows: :math:`W_{updated} = W + ΔW`.
|
||||
|
||||
If the weight matrix :math:`W` contains 7B parameters, then the weight update matrix :math:`ΔW` should also
|
||||
contain 7B parameters. Therefore, the :math:`ΔW` calculation is computationally and memory intensive.
|
||||
|
||||
.. figure:: ../../data/how-to/llm-fine-tuning-optimization/weight-update.png
|
||||
:alt: Weight update diagram
|
||||
|
||||
(a) Weight update in regular fine-tuning. (b) Weight update in LoRA where the product of matrix A (:math:`M\times K`)
|
||||
and matrix B (:math:`K\times N`) is :math:`ΔW(M\times N)`; dimension K is a hyperparameter. By representing
|
||||
:math:`ΔW` as the product of two smaller matrices (A and B) with a lower rank K, the number of trainable parameters
|
||||
is significantly reduced.
|
||||
|
||||
.. _fine-tuning-llms-concept-optimizations:
|
||||
|
||||
Optimizations for model fine-tuning
|
||||
===================================
|
||||
|
||||
Low-Rank Adaptation (LoRA) is a technique allowing fast and cost-effective fine-tuning of state-of-the-art LLMs that can
|
||||
overcome this issue of high memory consumption.
|
||||
|
||||
LoRA accelerates the adjustment process and reduces related memory costs. To be precise, LoRA decomposes the portion of
|
||||
weight changes :math:`ΔW` into high-precision low-rank representations, which do not require the calculations of all
|
||||
:math:`ΔW`. It learns the decomposition representation of :math:`ΔW` during training, as shown in
|
||||
the :ref:`weight update diagram <fine-tuning-llms-concept-challenge>`. This is how LoRA saves on
|
||||
computing resources.
|
||||
|
||||
LoRA is integrated into the `Hugging Face Parameter-Efficient Fine-Tuning (PEFT)
|
||||
<https://huggingface.co/docs/peft/en/index>`_ library, as well as other computation and memory efficiency optimization
|
||||
variants for model fine-tuning such as `AdaLoRA <https://huggingface.co/docs/peft/en/package_reference/adalora>`_. This
|
||||
library efficiently adapts large pre-trained models to various downstream applications without fine-tuning all model
|
||||
parameters. PEFT methods only fine-tune a few model parameters, significantly decreasing computational and storage
|
||||
costs while yielding performance comparable to a fully fine-tuned model. PEFT is integrated with the `Hugging Face
|
||||
Transformers <https://huggingface.co/docs/transformers/en/index>`_ library, providing a faster and easier way to load,
|
||||
train, and use large models for inference.
|
||||
|
||||
To simplify running a fine-tuning implementation, the `Transformer Reinforcement Learning (TRL)
|
||||
<https://huggingface.co/docs/trl/en/index>`_ library provides a set of tools to train transformer language models with
|
||||
reinforcement learning, from the Supervised Fine-Tuning step (SFT), Reward Modeling step (RM), to the Proximal Policy
|
||||
Optimization (PPO) step. The ``SFTTrainer`` API in TRL encapsulates these PEFT optimizations so you can easily import
|
||||
their custom training configuration and run the training process.
|
||||
|
||||
.. _fine-tuning-llms-walkthrough-desc:
|
||||
|
||||
Walkthrough
|
||||
===========
|
||||
|
||||
To demonstrate the benefits of LoRA and the ideal compute compatibility of using PEFT and TRL libraries on AMD
|
||||
ROCm-compatible accelerators and GPUs, let's step through a comprehensive implementation of the fine-tuning process
|
||||
using the Llama 2 7B model with LoRA tailored specifically for question-and-answer tasks on AMD MI300X accelerators.
|
||||
|
||||
Before starting, review and understand the key components of this walkthrough:
|
||||
|
||||
- `Llama 2 <https://huggingface.co/meta-llama>`_: a family of large language models developed and publicly released by
|
||||
Meta. Its variants range in scale from 7 billion to 70 billion parameters.
|
||||
|
||||
- Fine-tuning: a critical process that refines LLMs for specialized tasks and optimizes performance.
|
||||
|
||||
- LoRA: a memory-efficient implementation of LLM fine-tuning that significantly reduces the number of trainable
|
||||
parameters.
|
||||
|
||||
- `SFTTrainer <https://huggingface.co/docs/trl/v0.8.6/en/sft_trainer#supervised-fine-tuning-trainer>`_: an optimized
|
||||
trainer with a simple interface to easily fine-tune pre-trained models with PEFT adapters, for example, LoRA, for
|
||||
memory efficiency purposes on a custom dataset.
|
||||
|
||||
Continue the walkthrough in :doc:`Fine-tuning and inference <fine-tuning-and-inference>`.
|
||||
@@ -0,0 +1,217 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, profiling, debugging, performance, Triton
|
||||
|
||||
***********************
|
||||
Profiling and debugging
|
||||
***********************
|
||||
|
||||
This section discusses profiling and debugging tools and some of their common usage patterns with ROCm applications.
|
||||
|
||||
PyTorch Profiler
|
||||
================
|
||||
|
||||
`PyTorch Profiler <https://pytorch.org/docs/stable/profiler.html>`_ can be invoked inside Python scripts, letting you
|
||||
collect CPU and GPU performance metrics while the script is running. See the `PyTorch Profiler tutorial
|
||||
<https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html>`_ for more information.
|
||||
|
||||
You can then visualize and view these metrics using an open-source profile visualization tool like
|
||||
`Perfetto UI <https://ui.perfetto.dev>`_.
|
||||
|
||||
#. Use the following snippet to invoke PyTorch Profiler in your code.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import torch
|
||||
import torchvision.models as models
|
||||
from torch.profiler import profile, record_function, ProfilerActivity
|
||||
model = models.resnet18().cuda()
|
||||
inputs = torch.randn(2000, 3, 224, 224).cuda()
|
||||
|
||||
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:
|
||||
with record_function("model_inference"):
|
||||
model(inputs)
|
||||
prof.export_chrome_trace("resnet18_profile.json")
|
||||
|
||||
#. Profile results in ``resnet18_profile.json`` can be viewed by the Perfetto visualization tool. Go to
|
||||
`<https://ui.perfetto.dev>`__ and import the file. In your Perfetto visualization, you'll see that the upper section
|
||||
shows transactions denoting the CPU activities that launch GPU kernels while the lower section shows the actual GPU
|
||||
activities where it processes the ``resnet18`` inferences layer by layer.
|
||||
|
||||
.. figure:: ../../data/how-to/llm-fine-tuning-optimization/perfetto-trace.svg
|
||||
|
||||
Perfetto trace visualization example.
|
||||
|
||||
ROCm profiling tools
|
||||
====================
|
||||
|
||||
Heterogenous systems, where programs run on both CPUs and GPUs, introduce additional complexities. Understanding the
|
||||
critical path and kernel execution is all the more important; so, performance tuning is a necessary component in the
|
||||
benchmarking process.
|
||||
|
||||
With AMD's profiling tools, developers are able to gain important insight into how efficiently their application is
|
||||
using hardware resources and effectively diagnose potential bottlenecks contributing to poor performance. Developers
|
||||
working with AMD Instinct accelerators have multiple tools depending on their specific profiling needs; these are:
|
||||
|
||||
* :ref:`ROCProfiler <fine-tuning-llms-profiling-rocprof>`
|
||||
* :ref:`Omniperf <fine-tuning-llms-profiling-omniperf>`
|
||||
* :ref:`Omnitrace <fine-tuning-llms-profiling-omnitrace>`
|
||||
|
||||
.. _fine-tuning-llms-profiling-rocprof:
|
||||
|
||||
ROCProfiler
|
||||
-----------
|
||||
:doc:`ROCProfiler <rocprofiler:index>` is primarily a low-level API for accessing and extracting GPU hardware performance
|
||||
metrics, commonly called *performance counters*. These counters quantify the performance of the underlying architecture
|
||||
showcasing which pieces of the computational pipeline and memory hierarchy are being utilized.
|
||||
|
||||
Your ROCm installation contains a script or executable command called ``rocprof`` which provides the ability to list all
|
||||
available hardware counters for your specific accelerator or GPU, and run applications while collecting counters during
|
||||
their execution.
|
||||
|
||||
This ``rocprof`` utility also depends on the :doc:`ROCTracer and ROC-TX libraries <roctracer:index>`, giving it the
|
||||
ability to collect timeline traces of the accelerator software stack as well as user-annotated code regions.
|
||||
|
||||
.. note::
|
||||
|
||||
``rocprof`` is a CLI-only utility so input and output takes the format of ``.txt`` and CSV files. These
|
||||
formats provide a raw view of the data and puts the onus on the user to parse and analyze. Therefore, ``rocprof``
|
||||
gives the user full access and control of raw performance profiling data, but requires extra effort to analyze the
|
||||
collected data.
|
||||
|
||||
.. _fine-tuning-llms-profiling-omniperf:
|
||||
|
||||
Omniperf
|
||||
--------
|
||||
`Omniperf <https://rocm.github.io/omniperf>`_ is a system performance profiler for high-performance computing (HPC) and
|
||||
machine learning (ML) workloads using Instinct accelerators. Under the hood, Omniperf uses
|
||||
:ref:`ROCProfiler <fine-tuning-llms-profiling-rocprof>` to collect hardware performance counters. The Omniperf tool performs
|
||||
system profiling based on all approved hardware counters for Instinct
|
||||
accelerator architectures. It provides high level performance analysis features including System Speed-of-Light, IP
|
||||
block Speed-of-Light, Memory Chart Analysis, Roofline Analysis, Baseline Comparisons, and more.
|
||||
|
||||
Omniperf takes the guesswork out of profiling by removing the need to provide text input files with lists of counters
|
||||
to collect and analyze raw CSV output files as is the case with ROC-profiler. Instead, Omniperf automates the collection
|
||||
of all available hardware counters in one command and provides a graphical interface to help users understand and
|
||||
analyze bottlenecks and stressors for their computational workloads on AMD Instinct accelerators.
|
||||
|
||||
.. note::
|
||||
|
||||
Omniperf collects hardware counters in multiple passes, and will therefore re-run the application during each pass
|
||||
to collect different sets of metrics.
|
||||
|
||||
.. figure:: ../../data/how-to/llm-fine-tuning-optimization/omniperf-analysis.png
|
||||
|
||||
Omniperf memory chat analysis panel.
|
||||
|
||||
In brief, Omniperf provides details about hardware activity for a particular GPU kernel. It also supports both
|
||||
a web-based GUI or command-line analyzer, depending on your preference.
|
||||
|
||||
.. _fine-tuning-llms-profiling-omnitrace:
|
||||
|
||||
Omnitrace
|
||||
---------
|
||||
|
||||
`Omnitrace <https://rocm.github.io/omnitrace>`_ is a comprehensive profiling and tracing tool for parallel applications,
|
||||
including HPC and ML packages, written in C, C++, Fortran, HIP, OpenCL, and Python which execute on the CPU or CPU and
|
||||
GPU. It is capable of gathering the performance information of functions through any combination of binary
|
||||
instrumentation, call-stack sampling, user-defined regions, and Python interpreter hooks.
|
||||
|
||||
Omnitrace supports interactive visualization of comprehensive traces in the web browser in addition to high-level
|
||||
summary profiles with ``mean/min/max/stddev`` statistics. Beyond runtime
|
||||
information, Omnitrace supports the collection of system-level metrics such as CPU frequency, GPU temperature, and GPU
|
||||
utilization. Process and thread level metrics such as memory usage, page faults, context switches, and numerous other
|
||||
hardware counters are also included.
|
||||
|
||||
.. tip::
|
||||
|
||||
When analyzing the performance of an application, it is best not to assume you know where the performance
|
||||
bottlenecks are and why they are happening. Omnitrace is the ideal tool for characterizing where optimization would
|
||||
have the greatest impact on the end-to-end execution of the application and to discover what else is happening on the
|
||||
system during a performance bottleneck.
|
||||
|
||||
.. figure:: ../../data/how-to/llm-fine-tuning-optimization/omnitrace-timeline.png
|
||||
|
||||
Omnitrace timeline trace example.
|
||||
|
||||
For details usage and examples of using these tools, refer to the
|
||||
`Introduction to profiling tools for AMD hardware <https://rocm.blogs.amd.com/software-tools-optimization/profilers/README.html>`_
|
||||
developer blog.
|
||||
|
||||
Debugging with ROCr Debug Agent
|
||||
===============================
|
||||
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`) is a library that can be loaded by the ROCm platform
|
||||
runtime (:doc:`ROCr <rocr-runtime:index>`) to provide the following functionalities for all AMD accelerators and GPUs
|
||||
supported by the ROCm Debugger API (:doc:`ROCdbgapi <rocdbgapi:index>`).
|
||||
|
||||
* Print the state of all AMD accelerator or GPU wavefronts that caused a queue error; for example, causing a memory
|
||||
violation, executing an ``s_trap2``, or executing an illegal instruction.
|
||||
|
||||
* Print the state of all AMD accelerator or GPU wavefronts by sending a ``SIGQUIT`` signal to the process in question;
|
||||
for example, by pressing ``Ctrl + \`` while the process is executing.
|
||||
|
||||
Debugging memory access faults
|
||||
------------------------------
|
||||
|
||||
Identifying a faulting kernel is often enough to triage a memory access fault. To that end, the
|
||||
`ROCr Debug Agent <https://github.com/ROCm/rocr_debug_agent/>`_ can trap a memory access fault and provide a dump of all
|
||||
active wavefronts that caused the error as well as the name of the kernel. The
|
||||
`ROCr Debug Agent Library README <https://github.com/ROCm/rocr_debug_agent/blob/master/README.md>`_ provides full
|
||||
instructions, but in brief:
|
||||
|
||||
* Compiling with ``-ggdb -O0`` is recommended but not required.
|
||||
|
||||
* ``HSA_TOOLS_LIB=/opt/rocm/lib/librocm-debug-agent.so.2 HSA_ENABLE_DEBUG=1 ./my_program``
|
||||
|
||||
When the debug agent traps the fault, it will produce an extremely
|
||||
verbose output of all wavefront registers and memory content.
|
||||
Importantly, it also prints something like:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
Disassembly for function vector_add_assert_trap(int*, int*, int*):
|
||||
|
||||
code object:
|
||||
file:////rocm-debug-agent/build/test/rocm-debug-agent-test#offset=14309&size=31336
|
||||
|
||||
loaded at: [0x7fd4f100c000-0x7fd4f100e070]
|
||||
|
||||
The kernel name and the code object file should be listed. In the
|
||||
example above, the kernel name is ``vector_add_assert_trap``, but this might
|
||||
also look like:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
Disassembly for function memory:///path/to/codeobject#offset=1234&size=567:
|
||||
|
||||
In this case, it is an in-memory kernel that was generated at runtime.
|
||||
|
||||
Using the following environment variable, the debug agent will save all code objects to the current directory (use
|
||||
``--save-code-objects=[DIR]`` to place them in another location). The code objects will be renamed from the URI format
|
||||
with special characters replaced by ``_``.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
ROCM_DEBUG_AGENT_OPTIONS="--all --save-code-objects"
|
||||
|
||||
Use the ``llvm-objdump`` command to disassemble the indicated in-memory
|
||||
code object that has now been saved to disk. The name of the kernel is
|
||||
often found inside the disassembled code object.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
llvm-objdump --disassemble-all path/to/code-object.co
|
||||
|
||||
Consider turning off memory caching strategies both within the ROCm
|
||||
stack and PyTorch where possible. This will give the debug agent the
|
||||
best chance at finding the memory fault where it originates. Otherwise,
|
||||
it could be masked by writing past the end of a cached block within a
|
||||
larger allocation.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
PYTORCH_NO_HIP_MEMORY_CACHING=1
|
||||
|
||||
HSA_DISABLE_FRAGMENT_ALLOCATOR=1
|
||||
|
||||
@@ -0,0 +1,510 @@
|
||||
.. meta::
|
||||
:description: Model fine-tuning and inference on a single-GPU system
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, single-GPU, LoRA, PEFT, inference
|
||||
|
||||
****************************************************
|
||||
Fine-tuning and inference using a single accelerator
|
||||
****************************************************
|
||||
|
||||
This section explains model fine-tuning and inference techniques on a single-accelerator system. See
|
||||
:doc:`Multi-accelerator fine-tuning <multi-gpu-fine-tuning-and-inference>` for a setup with multiple accelerators or
|
||||
GPUs.
|
||||
|
||||
.. _fine-tuning-llms-single-gpu-env:
|
||||
|
||||
Environment setup
|
||||
=================
|
||||
|
||||
This section was tested using the following hardware and software environment.
|
||||
|
||||
.. list-table::
|
||||
:stub-columns: 1
|
||||
|
||||
* - Hardware
|
||||
- AMD Instinct MI300X accelerator
|
||||
|
||||
* - Software
|
||||
- ROCm 6.1, Ubuntu 22.04, PyTorch 2.1.2, Python 3.10
|
||||
|
||||
* - Libraries
|
||||
- ``transformers`` ``datasets`` ``huggingface-hub`` ``peft`` ``trl`` ``scipy``
|
||||
|
||||
* - Base model
|
||||
- ``meta-llama/Llama-2-7b-chat-hf``
|
||||
|
||||
.. _fine-tuning-llms-single-gpu-env-setup:
|
||||
|
||||
Setting up the base implementation environment
|
||||
----------------------------------------------
|
||||
|
||||
#. Install PyTorch for ROCm. Refer to the
|
||||
:doc:`PyTorch installation guide <rocm-install-on-linux:how-to/3rd-party/pytorch-install>`. For a consistent
|
||||
installation, it’s recommended to use official ROCm prebuilt Docker images with the framework pre-installed.
|
||||
|
||||
#. In the Docker container, check the availability of ROCm-capable accelerators using the following command.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
rocm-smi -showproductname
|
||||
|
||||
Your output should look like this:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
============================ ROCm System Management Interface ============================
|
||||
====================================== Product Info ======================================
|
||||
GPU[0] : Card series: AMD Instinct MI300X OAM
|
||||
GPU[0] : Card model: 0x74a1
|
||||
GPU[0] : Card vendor: Advanced Micro Devices, Inc. [AMD/ATI]
|
||||
GPU[0] : Card SKU: MI3SRIOV
|
||||
==========================================================================================
|
||||
================================== End of ROCm SMI Log ===================================
|
||||
|
||||
#. Check that your accelerators are available to PyTorch.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import torch
|
||||
print("Is a ROCm-GPU detected? ", torch.cuda.is_available())
|
||||
print("How many ROCm-GPUs are detected? ", torch.cuda.device_count())
|
||||
|
||||
If successful, your output should look like this:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
>>> print("Is a ROCm-GPU detected? ", torch.cuda.is_available())
|
||||
Is a ROCm-GPU detected? True
|
||||
>>> print("How many ROCm-GPUs are detected? ", torch.cuda.device_count())
|
||||
How many ROCm-GPUs are detected? 4
|
||||
|
||||
#. Install the required dependencies.
|
||||
|
||||
bitsandbytes is a library that facilitates quantization to improve the efficiency of deep learning models. Learn more
|
||||
about its use in :doc:`model-quantization`.
|
||||
|
||||
See the :ref:`Optimizations for model fine-tuning <fine-tuning-llms-concept-optimizations>` for a brief discussion on
|
||||
PEFT and TRL.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Install `bitsandbytes` for ROCm 6.0+.
|
||||
# Use -DBNB_ROCM_ARCH to target a specific GPU architecture.
|
||||
git clone --recurse https://github.com/ROCm/bitsandbytes.git
|
||||
cd bitsandbytes
|
||||
git checkout rocm_enabled
|
||||
pip install -r requirements-dev.txt
|
||||
cmake -DBNB_ROCM_ARCH="gfx942" -DCOMPUTE_BACKEND=hip -S .
|
||||
python setup.py install
|
||||
|
||||
# To leverage the SFTTrainer in TRL for model fine-tuning.
|
||||
pip install trl
|
||||
|
||||
# To leverage PEFT for efficiently adapting pre-trained language models .
|
||||
pip install peft
|
||||
|
||||
# Install the other dependencies.
|
||||
pip install transformers, datasets, huggingface-hub, scipy
|
||||
|
||||
#. Check that the required packages can be imported.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from transformers import (
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
TrainingArguments
|
||||
)
|
||||
from peft import LoraConfig
|
||||
from trl import SFTTrainer
|
||||
|
||||
.. _fine-tuning-llms-single-gpu-download-model-dataset:
|
||||
|
||||
Download the base model and fine-tuning dataset
|
||||
-----------------------------------------------
|
||||
|
||||
#. Request to access to download the `Meta's official Llama model <https://huggingface.co/meta-llama>`_ from Hugging
|
||||
Face. After permission is granted, log in with the following command using your personal access tokens:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
huggingface-cli login
|
||||
|
||||
.. note::
|
||||
|
||||
You can also use the `NousResearch Llama-2-7b-chat-hf <https://huggingface.co/NousResearch/Llama-2-7b-chat-hf>`_
|
||||
as a substitute. It has the same model weights as the original.
|
||||
|
||||
#. Run the following code to load the base model and tokenizer.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Base model and tokenizer names.
|
||||
base_model_name = "meta-llama/Llama-2-7b-chat-hf"
|
||||
|
||||
# Load base model to GPU memory.
|
||||
device = "cuda:0"
|
||||
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, trust_remote_code = True).to(device)
|
||||
|
||||
# Load tokenizer.
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
base_model_name,
|
||||
trust_remote_code = True)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
tokenizer.padding_side = "right"
|
||||
|
||||
#. Now, let's fine-tune the base model for a question-and-answer task using a small dataset called
|
||||
`mlabonne/guanaco-llama2-1k <https://huggingface.co/datasets/mlabonne/guanaco-llama2-1k>`_, which is a 1000 sample
|
||||
subset of the `timdettmers/openassistant-guanaco <https://huggingface.co/datasets/OpenAssistant/oasst1>`_ dataset.
|
||||
|
||||
.. code-block::
|
||||
|
||||
# Dataset for fine-tuning.
|
||||
training_dataset_name = "mlabonne/guanaco-llama2-1k"
|
||||
training_dataset = load_dataset(training_dataset_name, split = "train")
|
||||
|
||||
# Check the data.
|
||||
print(training_dataset)
|
||||
|
||||
# Dataset 11 is a QA sample in English.
|
||||
print(training_dataset[11])
|
||||
|
||||
#. With the base model and the dataset, let's start fine-tuning!
|
||||
|
||||
.. _fine-tuning-llms-single-gpu-configure-params:
|
||||
|
||||
Configure fine-tuning parameters
|
||||
--------------------------------
|
||||
|
||||
To set up ``SFTTrainer`` parameters, you can use the following code as reference.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Training parameters for SFTTrainer.
|
||||
training_arguments = TrainingArguments(
|
||||
output_dir = "./results",
|
||||
num_train_epochs = 1,
|
||||
per_device_train_batch_size = 4,
|
||||
gradient_accumulation_steps = 1,
|
||||
optim = "paged_adamw_32bit",
|
||||
save_steps = 50,
|
||||
logging_steps = 50,
|
||||
learning_rate = 4e-5,
|
||||
weight_decay = 0.001,
|
||||
fp16=False,
|
||||
bf16=False,
|
||||
max_grad_norm = 0.3,
|
||||
max_steps = -1,
|
||||
warmup_ratio = 0.03,
|
||||
group_by_length = True,
|
||||
lr_scheduler_type = "constant",
|
||||
report_to = "tensorboard"
|
||||
)
|
||||
|
||||
.. _fine-tuning-llms-single-gpu-start:
|
||||
|
||||
Fine-tuning
|
||||
===========
|
||||
|
||||
In this section, you'll see two ways of training: with the LoRA technique and without. See :ref:`Optimizations for model
|
||||
fine-tuning <fine-tuning-llms-concept-optimizations>` for an introduction to LoRA. Training with LoRA uses the
|
||||
``SFTTrainer`` API with its PEFT integration. Training without LoRA forgoes these benefits.
|
||||
|
||||
Compare the number of trainable parameters and training time under the two different methodologies.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Fine-tuning with LoRA and PEFT
|
||||
:sync: with
|
||||
|
||||
1. Configure LoRA using the following code snippet.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
peft_config = LoraConfig(
|
||||
lora_alpha = 16,
|
||||
lora_dropout = 0.1,
|
||||
r = 64,
|
||||
bias = "none",
|
||||
task_type = "CAUSAL_LM"
|
||||
)
|
||||
# View the number of trainable parameters.
|
||||
from peft import get_peft_model
|
||||
peft_model = get_peft_model(base_model, peft_config)
|
||||
peft_model.print_trainable_parameters()
|
||||
|
||||
The output should look like this. Compare the number of trainable parameters to that when fine-tuning without
|
||||
LoRA and PEFT.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
trainable params: 33,554,432 || all params: 6,771,970,048 || trainable%: 0.49548996469513035
|
||||
|
||||
2. Initialize ``SFTTrainer`` with a PEFT LoRA configuration and run the trainer.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Initialize an SFT trainer.
|
||||
sft_trainer = SFTTrainer(
|
||||
model = base_model,
|
||||
train_dataset = training_dataset,
|
||||
peft_config = peft_config,
|
||||
dataset_text_field = "text",
|
||||
tokenizer = tokenizer,
|
||||
args = training_arguments
|
||||
)
|
||||
|
||||
# Run the trainer.
|
||||
sft_trainer.train()
|
||||
|
||||
The output should look like this:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
{'loss': 1.5973, 'grad_norm': 0.25271978974342346, 'learning_rate': 4e-05, 'epoch': 0.16}
|
||||
{'loss': 2.0519, 'grad_norm': 0.21817368268966675, 'learning_rate': 4e-05, 'epoch': 0.32}
|
||||
{'loss': 1.6147, 'grad_norm': 0.3046981394290924, 'learning_rate': 4e-05, 'epoch': 0.48}
|
||||
{'loss': 1.4124, 'grad_norm': 0.11534837633371353, 'learning_rate': 4e-05, 'epoch': 0.64}
|
||||
{'loss': 1.5627, 'grad_norm': 0.09108350425958633, 'learning_rate': 4e-05, 'epoch': 0.8}
|
||||
{'loss': 1.417, 'grad_norm': 0.2536439299583435, 'learning_rate': 4e-05, 'epoch': 0.96}
|
||||
{'train_runtime': 197.4947, 'train_samples_per_second': 5.063, 'train_steps_per_second': 0.633, 'train_loss': 1.6194254455566406, 'epoch': 1.0}
|
||||
100%|██████████████████████████████████████████████████████████████████████████████████████████████████████| 125/125 [03:17<00:00, 1.58s/it]
|
||||
|
||||
.. tab-item:: Fine-tuning without LoRA and PEFT
|
||||
:sync: without
|
||||
|
||||
1. Use the following code to get started.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def print_trainable_parameters(model):
|
||||
# Prints the number of trainable parameters in the model.
|
||||
trainable_params = 0
|
||||
all_param = 0
|
||||
for _, param in model.named_parameters():
|
||||
all_param += param.numel()
|
||||
if param.requires_grad:
|
||||
trainable_params += param.numel()
|
||||
print(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}")
|
||||
|
||||
sft_trainer.peft_config = None
|
||||
print_trainable_parameters(sft_trainer.model)
|
||||
|
||||
The output should look like this. Compare the number of trainable parameters to that when fine-tuning with LoRA
|
||||
and PEFT.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.00
|
||||
|
||||
|
||||
2. Run the trainer.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Trainer without LoRA config.
|
||||
trainer_full = SFTTrainer(
|
||||
model = base_model,
|
||||
train_dataset = training_dataset,
|
||||
dataset_text_field = "text",
|
||||
tokenizer = tokenizer,
|
||||
args = training_arguments
|
||||
)
|
||||
|
||||
# Training.
|
||||
trainer_full.train()
|
||||
|
||||
The output should look like this:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
{'loss': 1.5975, 'grad_norm': 0.25113457441329956, 'learning_rate': 4e-05, 'epoch': 0.16}
|
||||
{'loss': 2.0524, 'grad_norm': 0.2180655151605606, 'learning_rate': 4e-05, 'epoch': 0.32}
|
||||
{'loss': 1.6145, 'grad_norm': 0.2949850261211395, 'learning_rate': 4e-05, 'epoch': 0.48}
|
||||
{'loss': 1.4118, 'grad_norm': 0.11036080121994019, 'learning_rate': 4e-05, 'epoch': 0.64}
|
||||
{'loss': 1.5595, 'grad_norm': 0.08962831646203995, 'learning_rate': 4e-05, 'epoch': 0.8}
|
||||
{'loss': 1.4119, 'grad_norm': 0.25422757863998413, 'learning_rate': 4e-05, 'epoch': 0.96}
|
||||
{'train_runtime': 419.5154, 'train_samples_per_second': 2.384, 'train_steps_per_second': 0.298, 'train_loss': 1.6171623611450194, 'epoch': 1.0}
|
||||
100%|██████████████████████████████████████████████████████████████████████████████████████████████████████| 125/125 [06:59<00:00, 3.36s/it]
|
||||
|
||||
.. _fine-tuning-llms-single-gpu-saving:
|
||||
|
||||
Saving adapters or fully fine-tuned models
|
||||
------------------------------------------
|
||||
|
||||
PEFT methods freeze the pre-trained model parameters during fine-tuning and add a smaller number of trainable
|
||||
parameters, namely the adapters, on top of it. The adapters are trained to learn specific task information. The adapters
|
||||
trained with PEFT are usually an order of magnitude smaller than the full base model, making them convenient to share,
|
||||
store, and load.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Saving a PEFT adapter
|
||||
:sync: with
|
||||
|
||||
If you're using LoRA and PEFT, use the following code to save a PEFT adapter to your system once the fine-tuning
|
||||
is completed.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# PEFT adapter name.
|
||||
adapter_name = "llama-2-7b-enhanced-adapter"
|
||||
|
||||
# Save PEFT adapter.
|
||||
sft_trainer.model.save_pretrained(adapter_name)
|
||||
|
||||
The saved PEFT adapter should look like this on your system:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Access adapter directory.
|
||||
cd llama-2-7b-enhanced-adapter
|
||||
|
||||
# List all adapter files.
|
||||
README.md adapter_config.json adapter_model.safetensors
|
||||
|
||||
.. tab-item:: Saving a fully fine-tuned model
|
||||
:sync: without
|
||||
|
||||
If you're not using LoRA and PEFT so there is no PEFT LoRA configuration used for training, use the following code
|
||||
to save your fine-tuned model to your system.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Fully fine-tuned model name.
|
||||
new_model_name = "llama-2-7b-enhanced"
|
||||
|
||||
# Save the fully fine-tuned model.
|
||||
full_trainer.model.save_pretrained(new_model_name)
|
||||
|
||||
The saved new full model should look like this on your system:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Access new model directory.
|
||||
cd llama-2-7b-enhanced
|
||||
|
||||
# List all model files.
|
||||
config.json model-00002-of-00006.safetensors model-00005-of-00006.safetensors
|
||||
generation_config.json model-00003-of-00006.safetensors model-00006-of-00006.safetensors
|
||||
model-00001-of-00006.safetensors model-00004-of-00006.safetensors model.safetensors.index.json
|
||||
|
||||
.. note::
|
||||
|
||||
PEFT adapters can’t be loaded by ``AutoModelForCausalLM`` from the Transformers library as they do not contain
|
||||
full model parameters and model configurations, for example, ``config.json``. To use it as a normal transformer
|
||||
model, you need to merge them into the base model.
|
||||
|
||||
Basic model inference
|
||||
=====================
|
||||
|
||||
A trained model can be classified into one of three types:
|
||||
|
||||
* A PEFT adapter
|
||||
|
||||
* A pre-trained language model in Hugging Face
|
||||
|
||||
* A fully fine-tuned model not using PEFT
|
||||
|
||||
Let's look at achieving model inference using these types of models.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Inference using PEFT adapters
|
||||
|
||||
To use PEFT adapters like a normal transformer model, you can run the generation by loading a base model along with PEFT
|
||||
adapters as follows.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from peft import PeftModel
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
# Set the path of the model or the name on Hugging face hub
|
||||
base_model_name = "meta-llama/Llama-2-7b-chat-hf"
|
||||
|
||||
# Set the path of the adapter
|
||||
adapter_name = "Llama-2-7b-enhanced-adpater"
|
||||
|
||||
# Load base model
|
||||
base_model = AutoModelForCausalLM.from_pretrained(base_model_name)
|
||||
|
||||
# Adapt the base model with the adapter
|
||||
new_model = PeftModel.from_pretrained(base_model, adapter_name)
|
||||
|
||||
# Then, run generation as the same with a normal model outlined in 2.1
|
||||
|
||||
The PEFT library provides a ``merge_and_unload`` method, which merges the adapter layers into the base model. This is
|
||||
needed if someone wants to save the adapted model into local storage and use it as a normal standalone model.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Load base model
|
||||
base_model = AutoModelForCausalLM.from_pretrained(base_model_name)
|
||||
|
||||
# Adapt the base model with the adapter
|
||||
new_model = PeftModel.from_pretrained(base_model, adapter_name)
|
||||
|
||||
# Merge adapter
|
||||
model = model.merge_and_unload()
|
||||
|
||||
# Save the merged model into local
|
||||
model.save_pretrained("merged_adpaters")
|
||||
|
||||
.. tab-item:: Inference using pre-trained or fully fine-tuned models
|
||||
|
||||
If you have a fully fine-tuned model not using PEFT, you can load it like any other pre-trained language model in
|
||||
`Hugging Face Hub <https://huggingface.co/docs/hub/en/index>`_ using the `Transformers
|
||||
<https://huggingface.co/docs/transformers/en/index>`_ library.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Import relevant class for loading model and tokenizer
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
# Set the pre-trained model name on Hugging face hub
|
||||
model_name = "meta-llama/Llama-2-7b-chat-hf"
|
||||
|
||||
# Set device type
|
||||
device = "cuda:0"
|
||||
|
||||
# Load model and tokenizer
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
|
||||
# Input prompt encoding
|
||||
query = "What is a large language model?"
|
||||
inputs = tokenizer.encode(query, return_tensors="pt").to(device)
|
||||
|
||||
# Token generation
|
||||
outputs = model.generate(inputs)
|
||||
|
||||
# Outputs decoding
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
|
||||
In addition, pipelines from Transformers offer simple APIs to use pre-trained models for different tasks, including
|
||||
sentiment analysis, feature extraction, question answering and so on. You can use the pipeline abstraction to achieve
|
||||
model inference easily.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Import relevant class for loading model and tokenizer
|
||||
from transformers import pipeline
|
||||
|
||||
# Set the path of your model or the name on Hugging face hub
|
||||
model_name_or_path = "meta-llama/Llama-2-7b-chat-hf"
|
||||
|
||||
# Set pipeline
|
||||
# A positive device value will run the model on associated CUDA device id
|
||||
pipe = pipeline("text-generation", model=model_name_or_path, device=0)
|
||||
|
||||
# Token generation
|
||||
print(pipe("What is a large language model?")[0]["generated_text"])
|
||||
|
||||
If using multiple accelerators, see
|
||||
:ref:`Multi-accelerator fine-tuning and inference <fine-tuning-llms-multi-gpu-hugging-face-accelerate>` to explore
|
||||
popular libraries that simplify fine-tuning and inference in a multi-accelerator system.
|
||||
|
||||
Read more about inference frameworks like vLLM and Hugging Face TGI in
|
||||
:doc:`LLM inference frameworks <llm-inference-frameworks>`.
|
||||
@@ -21,3 +21,6 @@ In this guide, you'll learn about:
|
||||
- :doc:`Running models from Hugging Face <hugging-face-models>`
|
||||
|
||||
- :doc:`Deploying your model <deploy-your-model>`
|
||||
|
||||
To learn about ROCm for HPC applications and scientific computing, see
|
||||
:doc:`../rocm-for-hpc/index`.
|
||||
|
||||
@@ -107,7 +107,10 @@ for more information about running AMP on an AMD accelerator.
|
||||
Fine-tuning your model
|
||||
======================
|
||||
|
||||
ROCm supports multiple fine-tuning techniques, for example, LoRA, QLoRA, PEFT, and FSDP.
|
||||
ROCm supports multiple techniques for :ref:`optimizing fine-tuning <fine-tuning-llms-concept-optimizations>`, for
|
||||
example, LoRA, QLoRA, PEFT, and FSDP.
|
||||
|
||||
Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`.
|
||||
|
||||
The following developer blogs showcase examples of how to fine-tune a model on an AMD accelerator or GPU.
|
||||
|
||||
|
||||
231
docs/how-to/rocm-for-hpc/index.rst
Normal file
@@ -0,0 +1,231 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for HPC
|
||||
:keywords: ROCm, AI, high performance computing, HPC
|
||||
|
||||
******************
|
||||
Using ROCm for HPC
|
||||
******************
|
||||
|
||||
The ROCm open-source software stack is optimized to extract high-performance
|
||||
computing (HPC) workload performance from AMD Instinct™ accelerators
|
||||
while maintaining compatibility with industry software frameworks.
|
||||
|
||||
ROCm enhances support and access for developers by providing streamlined and
|
||||
improved tools that significantly increase productivity. Being open-source, ROCm
|
||||
fosters innovation, differentiation, and collaboration within the developer
|
||||
community, making it a powerful and accessible solution for leveraging the full
|
||||
potential of AMD accelerators' capabilities in diverse computational
|
||||
applications.
|
||||
|
||||
* For more information, see :doc:`What is ROCm? <../../what-is-rocm>`.
|
||||
|
||||
* For guidance on installing ROCm, see :doc:`rocm-install-on-linux:index`. See
|
||||
the :doc:`../../compatibility/compatibility-matrix` for details on hardware
|
||||
and operating system support.
|
||||
|
||||
Some of the most popular HPC frameworks are part of the ROCm platform, including
|
||||
those to help parallelize operations across multiple accelerators and servers,
|
||||
handle memory hierarchies, and solve linear systems.
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-hpc/hpc-stack-2024_6_20.png
|
||||
:align: center
|
||||
:alt: Software and hardware ecosystem surrounding ROCm and AMD Instinct for HPC
|
||||
|
||||
The following catalog of GPU-accelerated solutions includes a vast set of
|
||||
platform-compatible HPC applications, including those for astrophysics, climate
|
||||
and weather, computational chemistry, computational fluid dynamics, earth
|
||||
science, genomics, geophysics, molecular dynamics, and physics computing.
|
||||
|
||||
Refer to the resources in the following table for instructions on building,
|
||||
running, and deploying these applications on ROCm-capable systems with AMD
|
||||
Instinct accelerators. Each build container provides parameters to specify
|
||||
different source code branches, release versions of ROCm, OpenMPI, UCX, and
|
||||
Ubuntu versions.
|
||||
|
||||
.. _hpc-apps:
|
||||
|
||||
..
|
||||
Reduce font size of HPC app descriptions slightly.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
#hpc-apps-table tr td:last-child {
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
</style>
|
||||
|
||||
.. container::
|
||||
:name: hpc-apps-table
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:stub-columns: 1
|
||||
:widths: 2 2 5
|
||||
|
||||
* - Application domain
|
||||
- HPC application
|
||||
- Description
|
||||
|
||||
* - Physics
|
||||
- `Chroma <https://github.com/amd/InfinityHub-CI/tree/main/chroma/>`_
|
||||
- The Chroma package supports data-parallel programming constructs for lattice
|
||||
field theory and in particular lattice QCD. It uses the SciDAC QDP++ data-parallel
|
||||
programming (in C++) that presents a single high-level code image to the user,
|
||||
but can generate highly optimized code for many architectural systems including
|
||||
single node workstations, multi and many-core nodes, clusters of nodes via
|
||||
QMP, and classic vector computers.
|
||||
|
||||
* -
|
||||
- `Grid <https://github.com/amd/InfinityHub-CI/tree/main/grid/>`_
|
||||
- Grid is a library for lattice QCD calculations that employs a high-level data parallel
|
||||
approach while using a number of techniques to target multiple types of parallelism.
|
||||
The library currently supports MPI, OpenMP and short vector parallelism. The SIMD
|
||||
instructions sets covered include SSE, AVX, AVX2, FMA4, IMCI and AVX512. Recent
|
||||
releases expanded this support to include GPU offloading.
|
||||
|
||||
* -
|
||||
- `MILC <https://github.com/amd/InfinityHub-CI/tree/main/milc/>`_
|
||||
- The MILC Code is a set of research codes developed by MIMD Lattice Computation
|
||||
(MILC) collaboration for doing simulations of four dimensional SU(3) lattice gauge
|
||||
theory on MIMD parallel machines scaling from single-processor workstations
|
||||
to HPC systems. The MILC Code is publicly available for research purposes.
|
||||
Publications of work done using this code or derivatives of this code should
|
||||
acknowledge this use.
|
||||
|
||||
* -
|
||||
- `PIConGPU <https://github.com/amd/InfinityHub-CI/tree/main/picongpu>`_
|
||||
- PIConGPU (Particle-in-cell on Graphics Processing Units) is an Open Source
|
||||
simulations framework for plasma and laser-plasma physics used to develop
|
||||
advanced particle accelerators for radiation therapy of cancer, high energy
|
||||
physics and photon science.
|
||||
|
||||
* - Astrophysics
|
||||
- `Cholla <https://github.com/amd/InfinityHub-CI/tree/main/cholla/>`_
|
||||
- An astrophysical simulation code developed for the extreme environments
|
||||
encountered in astrophysical systems.
|
||||
|
||||
* - Geophysics
|
||||
- `SPECFEM3D Cartesian <https://github.com/amd/InfinityHub-CI/tree/main/specfem3d>`_
|
||||
- SPECFEM3D Cartesian simulates acoustic (fluid), elastic (solid), coupled
|
||||
acoustic/elastic, poroelastic or seismic wave propagation in any type of
|
||||
conforming mesh of hexahedra (structured or not.) It can, for instance,
|
||||
model seismic waves propagating in sedimentary basins or any other
|
||||
regional geological model following earthquakes. It can also be used
|
||||
for non-destructive testing or for ocean acoustics.
|
||||
|
||||
* - Molecular dynamics
|
||||
- `GROMACS with HIP (AMD implementation) <https://github.com/amd/InfinityHub-CI/tree/main/gromacs>`_
|
||||
- GROMACS is a versatile package to perform molecular dynamics, i.e.
|
||||
simulate the Newtonian equations of motion for systems with hundreds
|
||||
to millions of particles. This AMD container is based on a released
|
||||
version of GROMACS modified by AMD. This container only supports up
|
||||
to a 8 GPU configuration
|
||||
|
||||
* -
|
||||
- `LAMMPS <https://github.com/amd/InfinityHub-CI/tree/main/lammps>`_
|
||||
- LAMMPS is a classical molecular dynamics code with a focus on materials
|
||||
modeling. It's an acronym for Large-scale Atomic/Molecular Massively
|
||||
Parallel Simulator.
|
||||
|
||||
* - Computational fluid dynamics
|
||||
- `NEKO <https://github.com/amd/InfinityHub-CI/tree/main/neko>`_
|
||||
- Neko is a portable framework for high-order spectral element flow simulations.
|
||||
Written in modern Fortran, Neko adopts an object-oriented approach, allowing
|
||||
multi-tier abstractions of the solver stack and facilitating various hardware
|
||||
backends ranging from general-purpose processors, CUDA and HIP enabled
|
||||
accelerators to SX-Aurora vector processors.
|
||||
|
||||
* -
|
||||
- `nekRS <https://github.com/amd/InfinityHub-CI/tree/main/nekrs>`_
|
||||
- nekRS is an open-source Navier Stokes solver based on the spectral element
|
||||
method targeting classical processors and accelerators like GPUs.
|
||||
|
||||
* - Computational chemistry
|
||||
- `QUDA <https://github.com/amd/InfinityHub-CI/tree/main/quda>`_
|
||||
- Library designed for efficient lattice QCD computations on
|
||||
accelerators. It includes optimized Dirac operators and a variety of
|
||||
fermion solvers and conjugate gradient (CG) implementations, enhancing
|
||||
performance and accuracy in lattice QCD simulations.
|
||||
|
||||
* - Electronic structure
|
||||
- `CP2K <https://github.com/amd/InfinityHub-CI/tree/main/cp2k>`_
|
||||
- CP2K is a quantum chemistry and solid state physics software package that can
|
||||
perform atomistic simulations of solid state, liquid, molecular, periodic, material,
|
||||
crystal, and biological systems. This AMD container, based on a released version
|
||||
of CP2K, is an AMD beta version with ongoing optimizations.
|
||||
|
||||
* - Quantum Monte Carlo Simulation
|
||||
- `QMCPACK <https://github.com/amd/InfinityHub-CI/tree/main/qmcpack>`_
|
||||
- QMCPACK is an open-source production-level many-body ab initio Quantum
|
||||
Monte Carlo code for computing the electronic structure of atoms, molecules, 2D
|
||||
nanomaterials and solids. The solid-state capabilities include metallic systems
|
||||
as well as insulators. QMCPACK is expected to run well on workstations through
|
||||
to the latest generation supercomputers. Besides high performance, particular
|
||||
emphasis is placed on code quality and reproducibility.
|
||||
|
||||
* - Climate and weather
|
||||
- `MPAS <https://github.com/amd/InfinityHub-CI/tree/main/mpas>`_
|
||||
- The Model for Prediction Across Scales (MPAS) is a collaborative project for
|
||||
developing atmosphere, ocean, and other earth-system simulation components
|
||||
for use in climate, regional climate, and weather studies.
|
||||
|
||||
* - Benchmark
|
||||
- `rocHPL <https://github.com/amd/InfinityHub-CI/tree/main/rochpl>`_
|
||||
- HPL, or High-Performance Linpack, is a benchmark which solves a uniformly
|
||||
random system of linear equations and reports floating-point execution rate.
|
||||
This documentation supports the implementation of the HPL benchmark on
|
||||
top of AMD's ROCm platform.
|
||||
|
||||
* -
|
||||
- `rocHPL-MxP <https://github.com/amd/InfinityHub-CI/tree/main/hpl-mxp>`_
|
||||
- Benchmark that highlights the convergence of HPC and AI workloads by
|
||||
solving a system of linear equations using novel, mixed-precision
|
||||
algorithms.
|
||||
|
||||
* -
|
||||
- `HPCG <https://github.com/amd/InfinityHub-CI/tree/main/hpcg>`_
|
||||
- HPCG, or the High Performance Conjugate Gradient Benchmark complements
|
||||
the High Performance LINPACK (HPL) benchmark. The computational and data
|
||||
access patterns of HPCG are designed to closely match a broad set of important
|
||||
applications not represented by HPL, and to incentivize computer system
|
||||
designers to invest in capabilities that will benefit the collective performance
|
||||
of these applications.
|
||||
|
||||
* - Tools and libraries
|
||||
- `ROCm with GPU-aware MPI container <https://github.com/amd/InfinityHub-CI/tree/main/base-gpu-mpi-rocm-docker>`_
|
||||
- Base container for GPU-aware MPI with ROCm for HPC applications. This
|
||||
project provides a boilerplate for building and running a Docker
|
||||
container with ROCm supporting GPU-aware MPI implementations using
|
||||
OpenMPI or UCX.
|
||||
|
||||
* -
|
||||
- `Kokkos <https://github.com/amd/InfinityHub-CI/tree/main/kokkos>`_
|
||||
- Kokkos is a programming model in C++ for writing performance portable
|
||||
applications for use across HPC platforms. It provides abstractions for both
|
||||
parallel execution of code and data management. Kokkos is designed to target
|
||||
complex node architectures with N-level memory hierarchies and multiple types
|
||||
of execution resources.
|
||||
|
||||
* -
|
||||
- `PyFR <https://github.com/amd/InfinityHub-CI/tree/main/pyfr>`_
|
||||
- PyFR is an open-source Python based framework for solving advection-diffusion
|
||||
type problems on streaming architectures using the Flux Reconstruction approach of
|
||||
Huynh. The framework is designed to solve a range of governing systems on mixed
|
||||
unstructured grids containing various element types. It is also designed to target a
|
||||
range of hardware platforms via use of an in-built domain specific language derived
|
||||
from the Mako templating engine.
|
||||
|
||||
* -
|
||||
- `RAJA <https://github.com/amd/InfinityHub-CI/tree/main/raja>`_
|
||||
- RAJA is a library of C++ software abstractions, primarily developed at Lawrence
|
||||
Livermore National Laboratory (LLNL), that enables architecture and programming
|
||||
model portability for HPC applications.
|
||||
|
||||
* -
|
||||
- `Trilinos <https://github.com/amd/InfinityHub-CI/tree/main/trilinos>`_
|
||||
- The Trilinos Project is an effort to develop algorithms and enabling technologies
|
||||
within an object-oriented software framework for the solution of large-scale,
|
||||
complex multi-physics engineering and scientific problems.
|
||||
|
||||
To learn about ROCm for AI applications, see :doc:`../rocm-for-ai/index`.
|
||||
42
docs/how-to/setting-cus.rst
Normal file
@@ -0,0 +1,42 @@
|
||||
.. meta::
|
||||
:description: Setting the number of CUs
|
||||
:keywords: CU, CUs, number of CUs, compute units
|
||||
|
||||
.. _settings-cus-reference:
|
||||
|
||||
*************************************************************
|
||||
Setting the number of compute units
|
||||
*************************************************************
|
||||
|
||||
The GPU driver provides two environment variables to set the number of CUs used:
|
||||
|
||||
- ``HSA_CU_MASK``
|
||||
- ``ROC_GLOBAL_CU_MASK``
|
||||
|
||||
The ``ROC_GLOBAL_CU_MASK`` variable sets the CU mask on queues created by HIP or OpenCL runtimes. The ``HSA_CU_MASK`` variable sets the mask on a lower level of queue creation in the driver. It also sets the mask on the queues being profiled.
|
||||
|
||||
.. tip::
|
||||
|
||||
When using GPUs to accelerate compute workloads, it sometimes becomes necessary to configure the hardware's usage of compute units (CU). This is a more advanced option, so please read this page before experimentation.
|
||||
|
||||
The environment variables have the following syntax:
|
||||
|
||||
::
|
||||
|
||||
ID = [0-9][0-9]* ex. base 10 numbers
|
||||
ID_list = (ID | ID-ID)[, (ID | ID-ID)]* ex. 0,2-4,7
|
||||
GPU_list = ID_list ex. 0,2-4,7
|
||||
CU_list = 0x[0-F]* | ID_list ex. 0x337F OR 0,2-4,7
|
||||
CU_Set = GPU_list : CU_list ex. 0,2-4,7:0-15,32-47 OR 0,2-4,7:0x337F
|
||||
HSA_CU_MASK = CU_Set [; CU_Set]* ex. 0,2-4,7:0-15,32-47; 3-9:0x337F
|
||||
|
||||
The GPU indices are taken post ``ROCR_VISIBLE_DEVICES`` reordering. The listed or masked CUs are enabled for listed GPUs, and the others are disabled. Unlisted GPUs are not be affected, and their CUs are enabled.
|
||||
|
||||
The variable parsing stops when a syntax error occurs. The erroneous set and the following are ignored. Repeating GPU or CU IDs results in a syntax error. Specifying a mask with no usable CUs (CU_list is 0x0) results in a syntax error. To exclude GPU devices, use ``ROCR_VISIBLE_DEVICES``.
|
||||
|
||||
.. note::
|
||||
|
||||
These environment variables only affect ROCm software, not graphics applications.
|
||||
|
||||
Not all CU configurations are valid on all devices. For example, on devices where two CUs can be combined into a WGP (for kernels running in WGP mode), it’s not valid to disable only a single CU in a WGP. For more information about what to expect when disabling CUs, see the `Exploring AMD GPU Scheduling Details by Experimenting With “Worst Practices” <https://www.cs.unc.edu/~otternes/papers/rtsj2022.pdf>`_ paper.
|
||||
|
||||
@@ -25,7 +25,6 @@ Our documentation is organized into the following categories:
|
||||
:class-container: rocm-doc-grid
|
||||
|
||||
:::{grid-item-card}
|
||||
:class-card: sd-text-black
|
||||
:img-top: ./data/banner-installation.jpg
|
||||
:img-alt: Install documentation
|
||||
:padding: 2
|
||||
@@ -34,20 +33,18 @@ Our documentation is organized into the following categories:
|
||||
* {doc}`Quick start guide<rocm-install-on-linux:tutorial/quick-start>`
|
||||
* {doc}`Linux install guide<rocm-install-on-linux:how-to/native-install/index>`
|
||||
* {doc}`Package manager integration<rocm-install-on-linux:how-to/native-install/package-manager-integration>`
|
||||
* {doc}`Install Docker containers<rocm-install-on-linux:how-to/docker>`
|
||||
* {doc}`ROCm & Spack<rocm-install-on-linux:how-to/spack>`
|
||||
* Windows
|
||||
* {doc}`Windows install guide<rocm-install-on-windows:how-to/install>`
|
||||
* {doc}`Application deployment guidelines<rocm-install-on-windows:conceptual/deployment-guidelines>`
|
||||
* [Deep learning frameworks](./how-to/deep-learning-rocm.rst)
|
||||
* {doc}`Install Docker containers<rocm-install-on-linux:how-to/docker>`
|
||||
* {doc}`PyTorch for ROCm<rocm-install-on-linux:how-to/3rd-party/pytorch-install>`
|
||||
* {doc}`TensorFlow for ROCm<rocm-install-on-linux:how-to/3rd-party/tensorflow-install>`
|
||||
* {doc}`JAX for ROCm<rocm-install-on-linux:how-to/3rd-party/jax-install>`
|
||||
* {doc}`MAGMA for ROCm<rocm-install-on-linux:how-to/3rd-party/magma-install>`
|
||||
* {doc}`ROCm & Spack<rocm-install-on-linux:how-to/spack>`
|
||||
:::
|
||||
|
||||
:::{grid-item-card}
|
||||
:class-card: sd-text-black
|
||||
:img-top: ./data/banner-compatibility.jpg
|
||||
:img-alt: Compatibility information
|
||||
:padding: 2
|
||||
@@ -65,7 +62,6 @@ Our documentation is organized into the following categories:
|
||||
|
||||
<!-- markdownlint-disable MD051 -->
|
||||
:::{grid-item-card}
|
||||
:class-card: sd-text-black
|
||||
:img-top: ./data/banner-reference.jpg
|
||||
:img-alt: Reference documentation
|
||||
:padding: 2
|
||||
@@ -81,17 +77,19 @@ Our documentation is organized into the following categories:
|
||||
* [Development](#development-tools)
|
||||
* [Performance analysis](#performance-analysis)
|
||||
* [System](#system-tools)
|
||||
* [Environment Variables](./reference/env-variables.rst)
|
||||
* [Hardware specifications](./reference/gpu-arch-specs.rst)
|
||||
:::
|
||||
<!-- markdownlint-enable MD051 -->
|
||||
|
||||
:::{grid-item-card}
|
||||
:class-card: sd-text-black
|
||||
:img-top: ./data/banner-howto.jpg
|
||||
:img-alt: How-to documentation
|
||||
:padding: 2
|
||||
|
||||
* [Using ROCm for AI](./how-to/rocm-for-ai/index.rst)
|
||||
* [Using ROCm for HPC](./how-to/rocm-for-hpc/index.rst)
|
||||
* [Fine-tuning LLMs and inference optimization](./how-to/llm-fine-tuning-optimization/index.rst)
|
||||
* [System tuning for various architectures](./how-to/tuning-guides.md)
|
||||
* [MI100](./how-to/tuning-guides/mi100.md)
|
||||
* [MI200](./how-to/tuning-guides/mi200.md)
|
||||
@@ -101,12 +99,12 @@ Our documentation is organized into the following categories:
|
||||
* [Using AddressSanitizer](./conceptual/using-gpu-sanitizer.md)
|
||||
* [Compiler disambiguation](./conceptual/compiler-disambiguation.md)
|
||||
* [OpenMP support in ROCm](./about/compatibility/openmp.md)
|
||||
* [Setting the number of CUs](./how-to/setting-cus)
|
||||
* [System level debugging](./how-to/system-debugging.md)
|
||||
* [GitHub examples](https://github.com/amd/rocm-examples)
|
||||
:::
|
||||
|
||||
:::{grid-item-card}
|
||||
:class-card: sd-text-black
|
||||
:img-top: ./data/banner-conceptual.jpg
|
||||
:img-alt: Conceptual documentation
|
||||
:padding: 2
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
* {doc}`Composable Kernel <composable_kernel:index>`
|
||||
* {doc}`MIGraphX <amdmigraphx:index>`
|
||||
* {doc}`MIOpen <miopen:index>`
|
||||
* {doc}`MIVisionX <mivisionx:doxygen/html/index>`
|
||||
* {doc}`MIVisionX <mivisionx:index>`
|
||||
* {doc}`rocAL <rocal:index>`
|
||||
* {doc}`rocDecode <rocdecode:index>`
|
||||
* {doc}`ROCm Performance Primitives (RPP) <rpp:index>`
|
||||
|
||||
920
docs/reference/env-variables.rst
Normal file
@@ -0,0 +1,920 @@
|
||||
.. meta::
|
||||
:description: Environment variables reference
|
||||
:keywords: AMD, ROCm, environment variables, environment, reference
|
||||
|
||||
.. role:: cpp(code)
|
||||
:language: cpp
|
||||
|
||||
.. _env-variables-reference:
|
||||
|
||||
*************************************************************
|
||||
ROCm environment variables
|
||||
*************************************************************
|
||||
|
||||
The following table lists the most commonly used environment variables in the ROCm software stack. These variables help to perform simple tasks such as building a ROCm library or running applications on AMDGPUs.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 70,30
|
||||
|
||||
* - **Environment variable**
|
||||
- **Value**
|
||||
|
||||
* - | ``HIP_PATH``
|
||||
| The path of the HIP SDK on Microsoft Windows.
|
||||
- Default: ``C:/hip``
|
||||
|
||||
* - | ``HIP_DIR``
|
||||
| The path of the HIP SDK on Microsoft Windows. This variable is ignored, if ``HIP_PATH`` is set.
|
||||
- Default: ``C:/hip``
|
||||
|
||||
* - | ``ROCM_PATH``
|
||||
| The path of the installed ROCm software stack on Linux.
|
||||
- Default: ``/opt/rocm``
|
||||
|
||||
* - | ``HIP_PLATFORM``
|
||||
| The platform targeted by HIP. If ``HIP_PLATFORM`` is not set, then HIPCC attempts to auto-detect the platform, if it can find NVCC.
|
||||
- ``amd``, ``nvidia``
|
||||
|
||||
CLR environment variables
|
||||
=========================
|
||||
|
||||
AMD Common Language Runtime (:doc:`CLR <hip:understand/amd_clr>`) library contains source codes for AMD's compute languages runtimes:
|
||||
|
||||
* ``hipamd``: Contains implementation of HIP language on the AMD platform.
|
||||
* ``opencl``: Contains implementation of `OpenCL™ <https://www.khronos.org/opencl/>`_ on AMD platform. It is hosted at `clr/opencl <https://github.com/ROCm/clr/tree/develop/opencl>`_.
|
||||
* ``rocclr``: Contains common runtime used in HIP and OpenCL. This is hosted at `clr/rocclr <https://github.com/ROCm/clr/tree/develop/rocclr>`_.
|
||||
|
||||
The environment variables affecting the CLR library might affect HIP and OpenCL libraries or applications.
|
||||
|
||||
The following table lists the environment variables that affect ``opencl`` and ``hipamd``:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 70,30
|
||||
|
||||
* - **Environment variable**
|
||||
- **Value**
|
||||
|
||||
* - | ``ROCM_LIBPATCH_VERSION``
|
||||
| The ROCm version in the integer format. The format is
|
||||
| :cpp:`MAJOR * 10000 + MINOR * 100 + PATCH`
|
||||
- 50000, 60020...
|
||||
|
||||
* - | ``CPACK_DEBIAN_PACKAGE_RELEASE``
|
||||
| This is the numbering of the Debian package itself, i.e., the version of the packaging and not the version of the content.
|
||||
- 1, 2, 3...
|
||||
|
||||
* - | ``CPACK_RPM_PACKAGE_RELEASE``
|
||||
| This is the numbering of the RPM package itself, i.e., the version of the packaging and not the version of the content.
|
||||
- 1, 2, 3...
|
||||
|
||||
The following table lists the environment variables that affect ``hipamd``:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 70,30
|
||||
|
||||
* - **Environment variable**
|
||||
- **Value**
|
||||
|
||||
* - | ``HIP_FORCE_QUEUE_PROFILING``
|
||||
| Simulates the application to run in rocprof by forcing command queue profiling to ``on`` by default.
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HSA_OVERRIDE_GFX_VERSION``
|
||||
| Overrides the target version; used to enable HIP usage on unsupported hardware.
|
||||
- 11.0.0, 10.3.0
|
||||
|
||||
* - | ``HSA_DISABLE_CACHE``
|
||||
| Disables the L2 cache.
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HSAKMT_DEBUG_LEVEL``
|
||||
| When set to the highest level, the system prints memory allocation information.
|
||||
- 1, 2, ... 7
|
||||
|
||||
The following table lists the environment variables that affect ``rocclr``:
|
||||
|
||||
.. https://github.com/ROCm/clr/blob/develop/rocclr/utils/flags.hpp
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 35,14,51
|
||||
|
||||
* - **Environment variable**
|
||||
- **Default value**
|
||||
- **Value**
|
||||
|
||||
* - | ``AMD_CPU_AFFINITY``
|
||||
| Resets CPU affinity of any runtime threads
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``AMD_DIRECT_DISPATCH``
|
||||
| Enables direct kernel dispatch. Currently available on Linux; under development for Windows.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``AMD_GPU_FORCE_SINGLE_FP_DENORM``
|
||||
| Forces denormalization for single precision.
|
||||
- ``-1``
|
||||
- | -1: Don't force
|
||||
| 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``AMD_LOG_LEVEL``
|
||||
| Enables HIP log on various level.
|
||||
- ``0``
|
||||
- | 0: Disable log.
|
||||
| 1: Enables log on error level.
|
||||
| 2: Enables log on warning and lower levels.
|
||||
| 3: Enables log on information and lower levels.
|
||||
| 4: Enables log on debug and lower levels.
|
||||
|
||||
* - | ``AMD_LOG_LEVEL_FILE``
|
||||
| Sets output file for ``AMD_LOG_LEVEL``.
|
||||
- stderr output
|
||||
-
|
||||
|
||||
* - | ``AMD_LOG_MASK``
|
||||
| Specifies HIP log filters. Here is the ` complete list of log masks <https://github.com/ROCm/clr/blob/develop/rocclr/utils/debug.hpp#L40>`_.
|
||||
- ``0x7FFFFFFF``
|
||||
- | 0x1: Log API calls.
|
||||
| 0x2: Kernel and copy commands and barriers.
|
||||
| 0x4: Synchronization and waiting for commands to finish.
|
||||
| 0x8: Decode and display AQL packets.
|
||||
| 0x10: Queue commands and queue contents.
|
||||
| 0x20: Signal creation, allocation, pool.
|
||||
| 0x40: Locks and thread-safety code.
|
||||
| 0x80: Kernel creations and arguments, etc.
|
||||
| 0x100: Copy debug.
|
||||
| 0x200: Detailed copy debug.
|
||||
| 0x400: Resource allocation, performance-impacting events.
|
||||
| 0x800: Initialization and shutdown.
|
||||
| 0x1000: Misc debug, not yet classified.
|
||||
| 0x2000: Show raw bytes of AQL packet.
|
||||
| 0x4000: Show code creation debug.
|
||||
| 0x8000: More detailed command info, including barrier commands.
|
||||
| 0x10000: Log message location.
|
||||
| 0x20000: Memory allocation.
|
||||
| 0x40000: Memory pool allocation, including memory in graphs.
|
||||
| 0x80000: Timestamp details.
|
||||
| 0xFFFFFFFF: Log always even mask flag is zero.
|
||||
|
||||
* - | ``AMD_OCL_BUILD_OPTIONS``
|
||||
| Sets the options for ``clBuildProgram`` and ``clCompileProgram``. This variable overrides the previously set options.
|
||||
- None
|
||||
-
|
||||
|
||||
* - | ``AMD_OCL_BUILD_OPTIONS_APPEND``
|
||||
| Appends the options for ``clBuildProgram`` and ``clCompileProgram``.
|
||||
- None
|
||||
-
|
||||
|
||||
* - | ``AMD_OCL_LINK_OPTIONS``
|
||||
| Sets the options for ``clLinkProgram``.
|
||||
- None
|
||||
-
|
||||
|
||||
* - | ``AMD_OCL_LINK_OPTIONS_APPEND``
|
||||
| Appends the options for ``clLinkProgram``.
|
||||
- None
|
||||
-
|
||||
|
||||
* - | ``AMD_OCL_WAIT_COMMAND``
|
||||
| Enforces a wait for every submitted command.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``OCL_SET_SVM_SIZE``
|
||||
| Sets shared virtual memory (SVM) space size in bytes for discrete GPUs.
|
||||
- ``65536``
|
||||
-
|
||||
|
||||
* - | ``OCL_STUB_PROGRAMS``
|
||||
| Enables OCL programs stubing.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``OPENCL_VERSION``
|
||||
| Force GPU OpenCL version.
|
||||
- ``200``
|
||||
-
|
||||
|
||||
* - | ``AMD_OPT_FLUSH``
|
||||
| Sets kernel flush option.
|
||||
- ``0x1``
|
||||
- | ``0x0`` = Uses system-scope fence operations.
|
||||
| ``0x1`` = Uses device-scope fence operations when possible.
|
||||
|
||||
* - | ``AMD_SERIALIZE_COPY``
|
||||
| Controls serialization of copies
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Waits for completion before enqueue.
|
||||
| 2: Waits for completion after enqueue.
|
||||
| 3: Both
|
||||
|
||||
* - | ``AMD_SERIALIZE_KERNEL``
|
||||
| Serializes kernel enqueue.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Waits for completion before enqueue.
|
||||
| 2: Waits for completion after enqueue.
|
||||
| 3: Both
|
||||
|
||||
* - | ``AMD_THREAD_TRACE_ENABLE``
|
||||
| Enables thread trace extension.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``CL_KHR_FP64``
|
||||
| Controls support for double precision.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``CQ_THREAD_STACK_SIZE``
|
||||
| The default command queue thread stack size in Bytes.
|
||||
- ``262144``: 256 KB
|
||||
-
|
||||
|
||||
* - | ``CUDA_VISIBLE_DEVICES``
|
||||
| The visible devices to HIP (whose indices are present in the sequence)
|
||||
- None
|
||||
- ``0,1,2``: List of the device indices. Depending on the number of devices in the system.
|
||||
|
||||
* - | ``DEBUG_CLR_GRAPH_PACKET_CAPTURE``
|
||||
| Controls capturing of graph packets.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``DEBUG_CLR_LIMIT_BLIT_WG``
|
||||
| Sets the limit for the number of workgroups in blit operations.
|
||||
- ``16``
|
||||
-
|
||||
|
||||
* - | ``DISABLE_DEFERRED_ALLOC``
|
||||
| Controls deferred memory allocation on device.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_ADD_HBCC_SIZE``
|
||||
| Adds HBCC size to the reported device memory.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_ANALYZE_HANG``
|
||||
| Allows you to analyze GPU hang issue.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_BLIT_ENGINE_TYPE``
|
||||
| Specifies blit engine type.
|
||||
- ``0``
|
||||
- | 0: Default
|
||||
| 1: Host
|
||||
| 2: CAL
|
||||
| 3: Kernel
|
||||
|
||||
* - | ``GPU_CP_DMA_COPY_SIZE``
|
||||
| Set maximum size of CP DMA copy in KB.
|
||||
- ``1``
|
||||
-
|
||||
|
||||
* - | ``GPU_DEBUG_ENABLE``
|
||||
| Enables collection of extra information for debugger at the cost of performance.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_DEVICE_ORDINAL``
|
||||
| Selects the device ordinal, which is a comma separated list of available devices.
|
||||
- None
|
||||
- A value of ``0,2`` exposes devices 1 and 3 in the system.
|
||||
|
||||
* - | ``GPU_DUMP_BLIT_KERNELS``
|
||||
| Controls dumping of the kernels for blit manager.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_DUMP_CODE_OBJECT``
|
||||
| Controls dumping of code object.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_ENABLE_COOP_GROUPS``
|
||||
| Enables cooperative group launch.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_ENABLE_HW_P2P``
|
||||
| Enables hardware peer to peer (P2P) path.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_ENABLE_LC``
|
||||
| Enables LC path.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_ENABLE_PAL``
|
||||
| Specifies platform abstraction library (PAL) backend.
|
||||
- ``2``
|
||||
- | 0: ROC
|
||||
| 1: PAL
|
||||
| 2: ROC or PAL
|
||||
|
||||
* - | ``GPU_ENABLE_WAVE32_MODE``
|
||||
| Enables Wave32 compilation in hardware, if available.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_ENABLE_WGP_MODE``
|
||||
| Enables WGP Mode in hardware, if available. Workgroups of waves are
|
||||
| dispatched in one of the two modes: CU or WGP.
|
||||
- ``1``
|
||||
- | 0: CU mode. The waves of a workgroup are distributed across just two SIMD32’s.
|
||||
| 1: WGP mode. The waves of a workgroup are distributed across all 4 SIMD32’s within a workgroup.
|
||||
|
||||
* - | ``GPU_FORCE_BLIT_COPY_SIZE``
|
||||
| Specifies the threshold size in KB, under which blit is forced instead of system direct memory access (SDMA).
|
||||
- ``0``
|
||||
-
|
||||
|
||||
* - | ``GPU_FORCE_QUEUE_PROFILING``
|
||||
| Forces command queue profiling.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_FLUSH_ON_EXECUTION``
|
||||
| Submits commands to hardware on every operation.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_IMAGE_BUFFER_WAR``
|
||||
| Enables image buffer workaround.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_IMAGE_DMA``
|
||||
| Enables DRM DMA for image transfers.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_MAX_COMMAND_BUFFERS``
|
||||
| Sets the maximum number of command buffers allocated per queue.
|
||||
- ``8``
|
||||
-
|
||||
|
||||
* - | ``GPU_MAX_HEAP_SIZE``
|
||||
| Sets the maximum size of the GPU heap (in percentage) on the board memory.
|
||||
- ``100``
|
||||
-
|
||||
|
||||
* - | ``GPU_MAX_HW_QUEUES``
|
||||
| Sets the maximum number of hardware queues to be allocated per device.
|
||||
- ``4``
|
||||
- This variable controls how many independent hardware queues HIP runtime can create per process, per device. If an application allocates more HIP streams than the specified value, then HIP runtime reuses the same hardware queues for the new streams in a round-robin manner. Note that this value doesn't apply to hardware queues that are created for CU-masked HIP streams or cooperative queues for HIP cooperative groups (single queue per device).
|
||||
|
||||
* - | ``GPU_MAX_REMOTE_MEM_SIZE``
|
||||
| Sets the maximum size in KB for device memory substitution with the system.
|
||||
- ``2``
|
||||
-
|
||||
|
||||
* - | ``GPU_MAX_SUBALLOC_SIZE``
|
||||
| Sets the maximum size for sub-allocations in KB.
|
||||
- ``4096``
|
||||
-
|
||||
|
||||
* - | ``GPU_MAX_USWC_ALLOC_SIZE``
|
||||
| Sets the maximum uncacheable speculative write combining (USWC) allocation size in MB.
|
||||
- ``2048``
|
||||
- -1: No limit
|
||||
|
||||
* - | ``GPU_MAX_WORKGROUP_SIZE``
|
||||
| Sets the maximum number of workitems in a workgroup for GPU.
|
||||
- ``0``: Sets no limit on workitems.
|
||||
-
|
||||
|
||||
* - | ``GPU_MIPMAP``
|
||||
| Enables GPU mipmap extension.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_NUM_COMPUTE_RINGS``
|
||||
| Sets the number of GPU compute rings.
|
||||
- ``2``
|
||||
- | 0: Disable
|
||||
| Any other number corresponds to the number of compute rings.
|
||||
|
||||
* - | ``GPU_NUM_MEM_DEPENDENCY``
|
||||
| Sets the number of memory objects for dependency tracking.
|
||||
- ``256``
|
||||
-
|
||||
|
||||
* - | ``GPU_PINNED_MIN_XFER_SIZE``
|
||||
| Sets the minimum buffer size (in MB) for pinned read and write transfers.
|
||||
- ``128``
|
||||
-
|
||||
|
||||
* - | ``GPU_PINNED_XFER_SIZE``
|
||||
| Sets the buffer size (in MB) for pinned read and write transfers.
|
||||
- ``32``
|
||||
-
|
||||
|
||||
* - | ``GPU_PRINT_CHILD_KERNEL``
|
||||
| Specifies the number of child kernels to be printed.
|
||||
- ``0``
|
||||
-
|
||||
|
||||
* - | ``GPU_RESOURCE_CACHE_SIZE``
|
||||
| Sets the resource cache size in MB.
|
||||
- ``64``
|
||||
-
|
||||
|
||||
* - | ``GPU_SINGLE_ALLOC_PERCENT``
|
||||
| Sets the maximum size of a single allocation as a percentage of the total.
|
||||
- ``85``
|
||||
-
|
||||
|
||||
* - | ``GPU_STAGING_BUFFER_SIZE``
|
||||
| Sets the GPU staging buffer size in MB.
|
||||
- ``4``
|
||||
-
|
||||
|
||||
* - | ``GPU_STREAMOPS_CP_WAIT``
|
||||
| Forces the stream memory operation to wait on command processor (CP).
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_USE_DEVICE_QUEUE``
|
||||
| Controls use of dedicated device queue for the actual submissions.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``GPU_WAVES_PER_SIMD``
|
||||
| Forces the number of waves per SIMD.
|
||||
- ``0``
|
||||
- 1-10
|
||||
|
||||
* - | ``GPU_XFER_BUFFER_SIZE``
|
||||
| Sets the transfer buffer size for image copy optimization in KB.
|
||||
- ``0``
|
||||
-
|
||||
|
||||
* - | ``HIP_FORCE_DEV_KERNARG``
|
||||
| Forces device memory for kernel arguments.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HIP_HIDDEN_FREE_MEM``
|
||||
| Specifies the amount of memory to hide from the free memory reported by ``hipMemGetInfo``.
|
||||
- ``0``: Disable
|
||||
-
|
||||
|
||||
* - | ``HIP_HOST_COHERENT``
|
||||
| Specifies if the memory is coherent between the host and GPU in ``hipHostMalloc``.
|
||||
- ``0``
|
||||
- | 0: Memory is not coherent.
|
||||
| 1: Memory is coherent.
|
||||
| Environment variable has effect, if the following conditions are statisfied:
|
||||
| - One of the ``hipHostMallocDefault``, ``hipHostMallocPortable``, ``hipHostMallocWriteCombined`` or ``hipHostMallocNumaUser`` flag set to 1.
|
||||
| - ``hipHostMallocCoherent``, ``hipHostMallocNonCoherent`` and ``hipHostMallocMapped`` flags set to 0.
|
||||
|
||||
* - | ``HIP_INITIAL_DM_SIZE``
|
||||
| Sets the initial heap size for device malloc.
|
||||
- ``8388608``: 8 MB
|
||||
-
|
||||
|
||||
* - | ``HIP_LAUNCH_BLOCKING``
|
||||
| Controls serialization of kernel execution.
|
||||
- ``0``
|
||||
- | 0: Disable. Kernel executes normally.
|
||||
| 1: Enable. Serializes kernel execution; behaves similar to ``AMD_SERIALIZE_KERNEL``.
|
||||
|
||||
* - | ``HIP_MEM_POOL_SUPPORT``
|
||||
| Enables memory pool support in HIP.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HIP_MEM_POOL_USE_VM``
|
||||
| Enables memory pool support in HIP.
|
||||
- | ``0``: Default value on other OS.
|
||||
| ``1``: Default value on Microsoft Windows.
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HIP_USE_RUNTIME_UNBUNDLER``
|
||||
| Controls use of runtime code object unbundler.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HIP_VISIBLE_DEVICES``
|
||||
| Specifies the indices of the devices allowed to be visible to HIP.
|
||||
- None
|
||||
- 0,1,2: Depending on the number of devices on the system.
|
||||
|
||||
* - | ``HIP_VMEM_MANAGE_SUPPORT``
|
||||
| Enables virtual memory management support.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HIPCC_VERBOSE``
|
||||
| Controls the extra information to be displayed during the build such as compiler commands with flags, paths and arguments.
|
||||
- ``0``
|
||||
- | 0x1: Print detailed compiler commands.
|
||||
| 0x2: Print HIP, ROCm and CUDA paths (``HIP_PATH``, ``ROCM_PATH``, ``HIP_CLANG_PATH``, ...).
|
||||
| 0x4: Print HIPCC arguments.
|
||||
|
||||
* - | ``HIPRTC_COMPILE_OPTIONS_APPEND``
|
||||
| Sets compile options needed for ``hiprtc`` compilation.
|
||||
- None
|
||||
- ``--gpu-architecture=gfx906:sramecc+:xnack``, ``-fgpu-rdc``
|
||||
|
||||
* - | ``HIPRTC_LINK_OPTIONS_APPEND``
|
||||
| Sets link options needed for ``hiprtc`` compilation.
|
||||
- None
|
||||
-
|
||||
|
||||
* - | ``HIPRTC_USE_RUNTIME_UNBUNDLER``
|
||||
| Forces runtime unbundler in hiprtc.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HSA_KERNARG_POOL_SIZE``
|
||||
| Sets the pool size for kernel arguments.
|
||||
- ``1048576``: 1 MB
|
||||
-
|
||||
|
||||
* - | ``HSA_LOCAL_MEMORY_ENABLE``
|
||||
| Enables use of local memory on HSA device.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``PAL_DISABLE_SDMA``
|
||||
| Disables SDMA for PAL.
|
||||
- ``0``
|
||||
- | 0: Enable SDMA for PAL.
|
||||
| 1: Disable SDMA for PAL.
|
||||
|
||||
* - | ``PAL_MALL_POLICY``
|
||||
| Controls the behaviour of allocations with respect to the MALL.
|
||||
- ``0``
|
||||
- | 0: MALL policy is decided by KMD.
|
||||
| 1: Allocations are never put through the MALL.
|
||||
| 2: Allocations will always be put through the MALL.
|
||||
|
||||
* - | ``PAL_ALWAYS_RESIDENT``
|
||||
| Forces memory resources to become resident during allocation.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``PAL_EMBED_KERNEL_MD``
|
||||
| Enables writing kernel metadata into command buffers.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``PAL_FORCE_ASIC_REVISION``
|
||||
| Forces a specific ASIC revision on all devices.
|
||||
- ``0``
|
||||
-
|
||||
|
||||
* - | ``PAL_HIP_IPC_FLAG``
|
||||
| Enables inter-process flag for device allocation in PAL HIP.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``PAL_PREPINNED_MEMORY_SIZE``
|
||||
| Sets the size in KB of pre-pinned memory.
|
||||
- ``64``
|
||||
-
|
||||
|
||||
* - | ``PAL_RGP_DISP_COUNT``
|
||||
| Sets the number of dispatches for RGP capture with SQTT.
|
||||
- ``10000``
|
||||
-
|
||||
|
||||
* - | ``REMOTE_ALLOC``
|
||||
| Enables use of remote memory for the global heap allocation.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``ROC_ACTIVE_WAIT_TIMEOUT``
|
||||
| Forces active wait of GPU interrupt for the timeout in us.
|
||||
- ``0``
|
||||
-
|
||||
|
||||
* - | ``ROC_AQL_QUEUE_SIZE``
|
||||
| Sets the AQL queue size in bytes in the AQL packets.
|
||||
- ``16384``: 16 KB
|
||||
-
|
||||
|
||||
* - | ``ROC_CPU_WAIT_FOR_SIGNAL``
|
||||
| Enable CPU wait for dependent HSA signals.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``ROC_ENABLE_LARGE_BAR``
|
||||
| Enable large bar if supported by the device.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``ROC_GLOBAL_CU_MASK``
|
||||
| Sets a global CU mask, entered as hex value for all queues. Each active bit represents one CU, e.g., ``0xf`` enables 4 CUs.
|
||||
- None
|
||||
-
|
||||
|
||||
* - | ``ROC_HMM_FLAGS``
|
||||
| Sets ROCm HMM configuration flags.
|
||||
- ``0``: Disabled
|
||||
-
|
||||
|
||||
* - | ``ROC_P2P_SDMA_SIZE``
|
||||
| Sets the minimum size in KB for peer to peer (P2P) transfer with SDMA.
|
||||
- ``1024``: 1 MB
|
||||
-
|
||||
|
||||
* - | ``ROC_SIGNAL_POOL_SIZE``
|
||||
| Sets the initial size for HSA signal pool.
|
||||
- ``32``
|
||||
-
|
||||
|
||||
* - | ``ROC_SKIP_KERNEL_ARG_COPY``
|
||||
| Allows the runtime to skip kernel argument copy.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``ROC_SYSTEM_SCOPE_SIGNAL``
|
||||
| Enable system scope for signals, uses interrupts.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``ROC_USE_FGS_KERNARG``
|
||||
| Enables use of fine grain kernel arguments segment for supported ASICs.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``ROCPROFILER_REGISTER_ROOT``
|
||||
| Sets the path to ``rocProfiler``.
|
||||
- None
|
||||
-
|
||||
|
||||
The following table lists the debug environment variables that affect ``rocclr`` of the CLR project. These environment variables can only be set during DEBUG build.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 35,14,51
|
||||
|
||||
* - **Environment variable**
|
||||
- **Default value**
|
||||
- **Value**
|
||||
|
||||
* - | ``AMD_OCL_SUBST_OBJFILE``
|
||||
| Specifies binary substitution config file for OpenCL.
|
||||
- None
|
||||
-
|
||||
|
||||
* - | ``CPU_MEMORY_ALIGNMENT_SIZE``
|
||||
| Sets the size in bytes for the default alignment of guarded memory on CPU.
|
||||
- ``256``
|
||||
-
|
||||
|
||||
* - | ``CPU_MEMORY_GUARD_PAGE_SIZE``
|
||||
| Size of the CPU memory guard page in KB.
|
||||
- ``64``: 64 KB
|
||||
-
|
||||
|
||||
* - | ``CPU_MEMORY_GUARD_PAGES``
|
||||
| Enables using guard pages for CPU memory.
|
||||
- ``0``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``MEMOBJ_BASE_ADDR_ALIGN``
|
||||
| Alignment of the base address of any allocate memory object.
|
||||
- ``4096``: 4 KB
|
||||
-
|
||||
|
||||
* - | ``PARAMETERS_MIN_ALIGNMENT``
|
||||
| Specifies the minimum alignment required for the abstract parameters stack.
|
||||
- 64 at ``__AVX512F__``, 32 at ``__AVX__`` and 16 in other cases
|
||||
-
|
||||
|
||||
ROCR-Runtime environment variables
|
||||
==================================
|
||||
|
||||
.. https://github.com/ROCm/ROCR-Runtime/blob/master/src/core/util/flag.h
|
||||
.. We need to extend the following list.
|
||||
|
||||
The following table lists the ROCR-Runtime environment variables:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 35,14,51
|
||||
|
||||
* - **Environment variable**
|
||||
- **Default value**
|
||||
- **Value**
|
||||
|
||||
* - | ``ROCR_VISIBLE_DEVICES``
|
||||
| Specifies a list of device indices or UUIDs to be exposed to the applications.
|
||||
- None
|
||||
- ``0,GPU-DEADBEEFDEADBEEF``
|
||||
|
||||
* - | ``HSA_SCRATCH_MEM``
|
||||
| Specifies the maximum amount of scratch memory that can be used per process per GPU.
|
||||
-
|
||||
-
|
||||
|
||||
* - | ``HSA_XNACK``
|
||||
| Enables XNACK.
|
||||
- None
|
||||
- 1: Enable
|
||||
|
||||
* - | ``HSA_CU_MASK``
|
||||
| Sets the mask on a lower level of queue creation in the driver.
|
||||
| This mask is also applied to the queues being profiled.
|
||||
- None
|
||||
- ``1:0-8``
|
||||
|
||||
* - | ``HSA_ENABLE_SDMA``
|
||||
| Enables the use of direct memory access (DMA) engines in all copy directions (Host-to-Device, Device-to-Host, Device-to-Device), when using any of the following APIs:
|
||||
| ``hsa_memory_copy``,
|
||||
| ``hsa_amd_memory_fill``,
|
||||
| ``hsa_amd_memory_async_copy``,
|
||||
| ``hsa_amd_memory_async_copy_on_engine``.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
* - | ``HSA_ENABLE_PEER_SDMA``
|
||||
| Enables the use of DMA engines for Device-to-Device copies, when using any of the following APIs:
|
||||
| ``hsa_memory_copy``,
|
||||
| ``hsa_amd_memory_async_copy``,
|
||||
| ``hsa_amd_memory_async_copy_on_engine``.
|
||||
- ``1``
|
||||
- | 0: Disable
|
||||
| 1: Enable
|
||||
|
||||
Note that this environment variable is ignored if ``HSA_ENABLE_SDMA`` is set to 0.
|
||||
|
||||
rocPRIM environment variables
|
||||
=============================
|
||||
|
||||
The following table lists the environment variables used in the rocPRIM library.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 70,30
|
||||
|
||||
* - **Environment variable**
|
||||
- **Default value**
|
||||
|
||||
* - | ``HIP_PATH``
|
||||
| Specifies the path of the HIP SDK on Microsoft Windows.
|
||||
- ``C:/hip``
|
||||
|
||||
* - | ``HIP_DIR``
|
||||
| Specifies the path of the HIP SDK on Microsoft Windows. This variable is ignored, if ``HIP_PATH`` is set.
|
||||
- ``C:/hip``
|
||||
|
||||
* - | ``VCPKG_PATH``
|
||||
| Specifies the path of the ``vcpkg`` package manager on Microsoft Windows. This environment variable has no effect on Linux.
|
||||
- ``C:/github/vcpkg``
|
||||
|
||||
* - | ``ROCM_PATH``
|
||||
| Specifies the path of the installed ROCm software stack on Linux.
|
||||
- ``/opt/rocm``
|
||||
|
||||
* - | ``ROCM_CMAKE_PATH``
|
||||
| Specifies the path of the installed ROCm ``cmake`` file on Microsoft Windows.
|
||||
- ``C:/hipSDK``
|
||||
|
||||
* - | ``HIPCC_COMPILE_FLAGS_APPEND``
|
||||
| Enables extra ``amdclang++`` compiler flags on Linux. This environment variable is ignored if ``CXX`` environment variable is set.
|
||||
- None
|
||||
|
||||
* - | ``ROCPRIM_USE_HMM``
|
||||
| Enables the test suite to use unified memory, when set to 1 during the tests.
|
||||
- None
|
||||
|
||||
* - | ``CTEST_RESOURCE_GROUP_0``
|
||||
| Enables grouping of the tests for different CI steps. This environment variable is used by CI and is of little use to most users.
|
||||
- None
|
||||
|
||||
hipCUB environment variables
|
||||
============================
|
||||
|
||||
The following table lists the environment variables used in the hipCUB library.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 70,30
|
||||
|
||||
* - **Environment variable**
|
||||
- **Default value**
|
||||
|
||||
* - | ``HIP_PATH``
|
||||
| Specifies the path of the HIP SDK on Microsoft Windows.
|
||||
- ``C:/hip``
|
||||
|
||||
* - | ``HIP_DIR``
|
||||
| Specifies the path of the HIP SDK on Microsoft Windows. This variable is ignored, if ``HIP_PATH`` is set.
|
||||
- ``C:/hip``
|
||||
|
||||
* - | ``VCPKG_PATH``
|
||||
| Specifies the path of the ``vcpkg`` package manager on Microsoft Windows. This environment variable has no effect on Linux.
|
||||
- ``C:/github/vcpkg``
|
||||
|
||||
* - | ``ROCM_PATH``
|
||||
| Specifies the path of the installed ROCm software stack on Linux.
|
||||
- ``/opt/rocm``
|
||||
|
||||
* - | ``HIPCC_COMPILE_FLAGS_APPEND``
|
||||
| Enables extra ``amdclang`` or ``amdclang++`` compiler flags on Linux. This environment variable is ignored if ``CXX`` or ``CC`` environment variable is set.
|
||||
- None
|
||||
|
||||
* - | ``HIPCUB_USE_HMM``
|
||||
| Enables the test suite to use unified memory, when set to 1 during the tests.
|
||||
- None
|
||||
|
||||
* - | ``CTEST_RESOURCE_GROUP_0``
|
||||
| Enables grouping of the tests for different CI steps. This environment variable is used by CI and is of little use to most users.
|
||||
- None
|
||||
|
||||
rocThrust environment variables
|
||||
===============================
|
||||
|
||||
The following table lists the environment variables used in the rocThrust library.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 70,30
|
||||
|
||||
* - **Environment variable**
|
||||
- **Default value**
|
||||
|
||||
* - | ``HIP_PATH``
|
||||
| Specifies the path of the HIP SDK on Microsoft Windows.
|
||||
- ``C:/hip``
|
||||
|
||||
* - | ``HIP_DIR``
|
||||
| Specifies the path of the HIP SDK on Microsoft Windows. This variable is ignored, if ``HIP_PATH`` is set.
|
||||
- ``C:/hip``
|
||||
|
||||
* - | ``VCPKG_PATH``
|
||||
| Specifies the path of the ``vcpkg`` package manager on Microsoft Windows. This environment variable has no effect on Linux.
|
||||
- ``C:/github/vcpkg``
|
||||
|
||||
* - | ``ROCM_PATH``
|
||||
| Specifies the path of the installed ROCm software stack on Linux.
|
||||
- ``/opt/rocm``
|
||||
|
||||
* - | ``ROCTHRUST_USE_HMM``
|
||||
| Enables the test suite to use unified memory, when set to 1 during the tests.
|
||||
- None
|
||||
|
||||
* - | ``CTEST_RESOURCE_GROUP_0``
|
||||
| Enables grouping of the tests for different CI steps. This environment variable is used by CI and is of little use to most users.
|
||||
- None
|
||||