Compare commits
96 Commits
docs/6.3.0
...
james/buil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
23f1118209 | ||
|
|
9696d8efb2 | ||
|
|
9dbffddfe7 | ||
|
|
3166703028 | ||
|
|
85bd6e98f5 | ||
|
|
f76145c2ad | ||
|
|
027b2ea376 | ||
|
|
76d6e892bb | ||
|
|
fe69fc1bb4 | ||
|
|
4d31d717a6 | ||
|
|
1f74defe18 | ||
|
|
3cf3266c49 | ||
|
|
47502421e1 | ||
|
|
bf9727db74 | ||
|
|
49d253dd13 | ||
|
|
94f97d4428 | ||
|
|
33891d6fd0 | ||
|
|
859923e595 | ||
|
|
74f3e26b62 | ||
|
|
09a5a2f23a | ||
|
|
59f217be22 | ||
|
|
2fd18ab1b6 | ||
|
|
d275733631 | ||
|
|
16e18bf5e6 | ||
|
|
c9dd86f2d2 | ||
|
|
95845105a5 | ||
|
|
14a6fd5837 | ||
|
|
eb06305a88 | ||
|
|
771518a2aa | ||
|
|
3e714f683c | ||
|
|
1998affd4f | ||
|
|
8d3de707e2 | ||
|
|
5f86dba37c | ||
|
|
9a602592a0 | ||
|
|
6d524affd2 | ||
|
|
988da8a0d1 | ||
|
|
758e8a33db | ||
|
|
21d26e52d0 | ||
|
|
134ee09631 | ||
|
|
4f10f22920 | ||
|
|
935a55f703 | ||
|
|
45d59ffdba | ||
|
|
c2be7ee900 | ||
|
|
279a241c11 | ||
|
|
0356ffd148 | ||
|
|
cf3c4d1e67 | ||
|
|
cbe105ae8f | ||
|
|
5c5b5cce73 | ||
|
|
72dce7937b | ||
|
|
08d9286cd5 | ||
|
|
6a7d8654ad | ||
|
|
c5ee1196c4 | ||
|
|
dc648ad764 | ||
|
|
f9dbc1f21f | ||
|
|
a857597340 | ||
|
|
e839c63a01 | ||
|
|
6d997135a5 | ||
|
|
2d6a601253 | ||
|
|
a2d128749c | ||
|
|
bacb49681e | ||
|
|
04fdc08328 | ||
|
|
1b33f1d7da | ||
|
|
fd067f7b3b | ||
|
|
2a7520f08a | ||
|
|
a591218531 | ||
|
|
5271c2c82d | ||
|
|
59a928f3a7 | ||
|
|
22572a9857 | ||
|
|
49e50b93c6 | ||
|
|
3354099b9c | ||
|
|
794b34f40e | ||
|
|
25ef417b31 | ||
|
|
78f9adc6ec | ||
|
|
4abcae54a8 | ||
|
|
2690506e64 | ||
|
|
3dffe1998a | ||
|
|
059c2cd9a4 | ||
|
|
f53faa19ea | ||
|
|
c5bf6c39ca | ||
|
|
e677ebcb47 | ||
|
|
a52c98cb2b | ||
|
|
d4ad0a838d | ||
|
|
e9e9fa4ba5 | ||
|
|
c18694b0fe | ||
|
|
d83ed9d58a | ||
|
|
0a237dfd42 | ||
|
|
c1f0a8d9d9 | ||
|
|
c9754cb9d8 | ||
|
|
b7ecf6d552 | ||
|
|
48d2d16563 | ||
|
|
47cda1bc70 | ||
|
|
34c0266496 | ||
|
|
8e3d51c31d | ||
|
|
b9ec9507db | ||
|
|
be19ac1071 | ||
|
|
916b5513cf |
@@ -197,3 +197,4 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
|
||||
parameters:
|
||||
componentName: MIOpen
|
||||
testParameters: '-VV --output-on-failure --force-new-ctest-process --output-junit test_output.xml --exclude-regex test_rnn_seq_api'
|
||||
|
||||
@@ -126,6 +126,7 @@ jobs:
|
||||
componentName: comgr
|
||||
extraBuildFlags: >-
|
||||
-DCMAKE_PREFIX_PATH="$(Build.SourcesDirectory)/llvm/build;$(Build.SourcesDirectory)/amd/device-libs/build"
|
||||
-DCOMGR_DISABLE_SPIRV=1
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
cmakeBuildDir: 'amd/comgr/build'
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml
|
||||
|
||||
@@ -181,6 +181,7 @@ jobs:
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
setupHIPLibrarySymlinks: true
|
||||
${{ if eq(parameters.checkoutRef, '') }}:
|
||||
dependencySource: staging
|
||||
${{ elseif ne(parameters.checkoutRef, '') }}:
|
||||
|
||||
@@ -41,6 +41,7 @@ parameters:
|
||||
- ROCR-Runtime
|
||||
- rocprofiler-register
|
||||
- roctracer
|
||||
- aomp
|
||||
|
||||
jobs:
|
||||
- job: rocprofilersdk
|
||||
|
||||
@@ -51,6 +51,7 @@ parameters:
|
||||
- rocprofiler
|
||||
- rocprofiler-register
|
||||
- roctracer
|
||||
- rocprofiler-sdk
|
||||
|
||||
jobs:
|
||||
- job: rocprofiler_systems
|
||||
@@ -73,6 +74,12 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
checkoutRepo: ${{ parameters.checkoutRepo }}
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml
|
||||
parameters:
|
||||
${{ if eq(parameters.checkoutRef, '') }}:
|
||||
dependencySource: staging
|
||||
${{ elseif ne(parameters.checkoutRef, '') }}:
|
||||
dependencySource: tag-builds
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml
|
||||
parameters:
|
||||
dependencyList: ${{ parameters.rocmDependencies }}
|
||||
@@ -109,6 +116,7 @@ jobs:
|
||||
-DROCPROFSYS_BUILD_TESTING=ON
|
||||
-DROCPROFSYS_BUILD_DYNINST=ON
|
||||
-DROCPROFSYS_BUILD_LIBUNWIND=ON
|
||||
-DROCPROFSYS_DISABLE_EXAMPLES="openmp-target"
|
||||
-DDYNINST_BUILD_TBB=ON
|
||||
-DDYNINST_BUILD_ELFUTILS=ON
|
||||
-DDYNINST_BUILD_LIBIBERTY=ON
|
||||
|
||||
@@ -142,6 +142,10 @@ parameters:
|
||||
- binary_ufuncs
|
||||
- autograd
|
||||
# - inductor/torchinductor takes too long
|
||||
# set to false to disable torchvision build and test
|
||||
- name: includeVision
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
@@ -237,6 +241,12 @@ jobs:
|
||||
git clone https://github.com/pytorch/builder.git --depth=1 --recurse-submodules
|
||||
sudo ln -s $(Build.SourcesDirectory)/builder /builder
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: Temporarily Patch CK Submodule
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git pull origin develop
|
||||
workingDirectory: $(Build.SourcesDirectory)/pytorch/third_party/composable_kernel
|
||||
- task: Bash@3
|
||||
displayName: Install patchelf
|
||||
inputs:
|
||||
@@ -296,59 +306,60 @@ jobs:
|
||||
sourceDir: /remote/wheelhouserocm$(ROCM_VERSION)
|
||||
contentsString: '*.whl'
|
||||
# common helper source for pytorch vision and audio
|
||||
- task: Bash@3
|
||||
displayName: git clone pytorch test-infra
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone https://github.com/pytorch/test-infra.git --depth=1 --recurse-submodules
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: install package helper
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: python3 -m pip install test-infra/tools/pkg-helpers
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: pytorch pkg helpers
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: CU_VERSION=${CU_VERSION} CHANNEL=${CHANNEL} python -m pytorch_pkg_helpers
|
||||
# get torch vision source and build
|
||||
- task: Bash@3
|
||||
displayName: git clone pytorch vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: Build vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: >-
|
||||
TORCH_PACKAGE_NAME=torch.$(ROCM_BRANCH).$(JOB_GPU_TARGET)
|
||||
TORCHVISION_PACKAGE_NAME=torchvision.$(ROCM_BRANCH).$(JOB_GPU_TARGET)
|
||||
PYTORCH_VERSION=$(cat $(Build.SourcesDirectory)/pytorch/version.txt | cut -da -f1)post$(date -u +%Y%m%d)
|
||||
BUILD_VERSION=$(cat $(Build.SourcesDirectory)/vision/version.txt | cut -da -f1)post$(date -u +%Y%m%d)
|
||||
python3 setup.py bdist_wheel
|
||||
workingDirectory: $(Build.SourcesDirectory)/vision
|
||||
- task: Bash@3
|
||||
displayName: Relocate vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: python3 packaging/wheel/relocate.py
|
||||
workingDirectory: $(Build.SourcesDirectory)/vision
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml
|
||||
parameters:
|
||||
sourceDir: $(Build.SourcesDirectory)/vision/dist
|
||||
contentsString: '*.whl'
|
||||
clean: false
|
||||
- ${{ if eq(parameters.includeVision, true) }}:
|
||||
- task: Bash@3
|
||||
displayName: git clone pytorch test-infra
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone https://github.com/pytorch/test-infra.git --depth=1 --recurse-submodules
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: install package helper
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: python3 -m pip install test-infra/tools/pkg-helpers
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: pytorch pkg helpers
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: CU_VERSION=${CU_VERSION} CHANNEL=${CHANNEL} python -m pytorch_pkg_helpers
|
||||
# get torch vision source and build
|
||||
- task: Bash@3
|
||||
displayName: git clone pytorch vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: Build vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: >-
|
||||
TORCH_PACKAGE_NAME=torch.$(ROCM_BRANCH).$(JOB_GPU_TARGET)
|
||||
TORCHVISION_PACKAGE_NAME=torchvision.$(ROCM_BRANCH).$(JOB_GPU_TARGET)
|
||||
PYTORCH_VERSION=$(cat $(Build.SourcesDirectory)/pytorch/version.txt | cut -da -f1)post$(date -u +%Y%m%d)
|
||||
BUILD_VERSION=$(cat $(Build.SourcesDirectory)/vision/version.txt | cut -da -f1)post$(date -u +%Y%m%d)
|
||||
python3 setup.py bdist_wheel
|
||||
workingDirectory: $(Build.SourcesDirectory)/vision
|
||||
- task: Bash@3
|
||||
displayName: Relocate vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: python3 packaging/wheel/relocate.py
|
||||
workingDirectory: $(Build.SourcesDirectory)/vision
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml
|
||||
parameters:
|
||||
sourceDir: $(Build.SourcesDirectory)/vision/dist
|
||||
contentsString: '*.whl'
|
||||
clean: false
|
||||
- task: PublishPipelineArtifact@1
|
||||
displayName: 'wheel file Publish'
|
||||
retryCountOnTaskFailure: 3
|
||||
inputs:
|
||||
targetPath: $(Build.BinariesDirectory)
|
||||
|
||||
- job: torchvision_testing
|
||||
- job: pytorch_testing
|
||||
dependsOn: pytorch
|
||||
condition: and(succeeded(), eq(variables.ENABLE_GFX942_TESTS, 'true'), not(containsValue(split(variables.DISABLED_GFX942_TESTS, ','), variables['Build.DefinitionName'])))
|
||||
variables:
|
||||
@@ -401,12 +412,13 @@ jobs:
|
||||
targetType: inline
|
||||
script: git clone https://github.com/pytorch/pytorch.git --depth=1 --recurse-submodules
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: git clone pytorch vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- ${{ if eq(parameters.includeVision, true) }}:
|
||||
- task: Bash@3
|
||||
displayName: git clone pytorch vision
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules
|
||||
workingDirectory: $(Build.SourcesDirectory)
|
||||
- task: Bash@3
|
||||
displayName: Install Wheel Files
|
||||
inputs:
|
||||
@@ -510,13 +522,14 @@ jobs:
|
||||
script: pytest test/test_${{ torchTest }}.py
|
||||
# Reference on what tests to run for torchvision found in private repo:
|
||||
# https://github.com/ROCm/rocAutomation/blob/jenkins-pipelines/pytorch/pytorch_ci/test_torchvision.sh#L51
|
||||
- task: Bash@3
|
||||
displayName: Test vision/transforms
|
||||
continueOnError: true
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: pytest test/test_transforms.py
|
||||
workingDirectory: $(Build.SourcesDirectory)/vision
|
||||
- ${{ if eq(parameters.includeVision, true) }}:
|
||||
- task: Bash@3
|
||||
displayName: Test vision/transforms
|
||||
continueOnError: true
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: pytest test/test_transforms.py
|
||||
workingDirectory: $(Build.SourcesDirectory)/vision
|
||||
- task: Bash@3
|
||||
displayName: Uninstall Wheel Files
|
||||
inputs:
|
||||
|
||||
@@ -165,6 +165,11 @@ parameters:
|
||||
- name: skipLlvmSymlink
|
||||
type: boolean
|
||||
default: false
|
||||
# set to true if dlopen calls for HIP libraries are causing failures
|
||||
# because they do not follow shared library symlink convention
|
||||
- name: setupHIPLibrarySymlinks
|
||||
type: boolean
|
||||
default: false
|
||||
# some ROCm components can specify GPU target and this will affect downloads
|
||||
- name: gpuTarget
|
||||
type: string
|
||||
@@ -280,6 +285,37 @@ steps:
|
||||
for file in amdclang amdclang++ amdclang-cl amdclang-cpp amdflang amdlld aompcc mygpu mycpu offload-arch; do
|
||||
sudo ln -s $(Agent.BuildDirectory)/rocm/llvm/bin/$file $(Agent.BuildDirectory)/rocm/bin/$file
|
||||
done
|
||||
# dlopen calls within a ctest or pytest sequence runs into issues when shared library symlink convention is not followed
|
||||
# the convention is as follows:
|
||||
# unversioned .so is a symlink to major version .so
|
||||
# major version .so is a symlink to detailed version .so
|
||||
# HIP libraries do not follow this convention, and each .so is a copy of each other
|
||||
# changing the library structure to follow the symlink convention resolves some test failures
|
||||
- ${{ if eq(parameters.setupHIPLibrarySymlinks, true) }}:
|
||||
- task: Bash@3
|
||||
displayName: Setup symlinks for hip libraries
|
||||
inputs:
|
||||
targetType: inline
|
||||
workingDirectory: $(Agent.BuildDirectory)/rocm/lib
|
||||
script: |
|
||||
LIBRARIES=("libamdhip64" "libhiprtc-builtins" "libhiprtc")
|
||||
for LIB_NAME in "${LIBRARIES[@]}"; do
|
||||
VERSIONED_SO=$(ls ${LIB_NAME}.so.* 2>/dev/null | grep -E "${LIB_NAME}\.so\.[0-9]+\.[0-9]+\.[0-9]+(-.*)?" | sort -V | tail -n 1)
|
||||
if [[ -z "$VERSIONED_SO" ]]; then
|
||||
continue
|
||||
fi
|
||||
MAJOR_VERSION=$(echo "$VERSIONED_SO" | grep -oP "${LIB_NAME}\.so\.\K[0-9]+")
|
||||
if [[ -e "${LIB_NAME}.so.${MAJOR_VERSION}" && ! -L "${LIB_NAME}.so.${MAJOR_VERSION}" ]]; then
|
||||
rm -f "${LIB_NAME}.so.${MAJOR_VERSION}"
|
||||
fi
|
||||
if [[ -e "${LIB_NAME}.so" && ! -L "${LIB_NAME}.so" ]]; then
|
||||
rm -f "${LIB_NAME}.so"
|
||||
fi
|
||||
ln -sf "$VERSIONED_SO" "${LIB_NAME}.so.${MAJOR_VERSION}"
|
||||
ln -sf "${LIB_NAME}.so.${MAJOR_VERSION}" "${LIB_NAME}.so"
|
||||
echo "Symlinks created for $LIB_NAME:"
|
||||
ls -l ${LIB_NAME}.so*
|
||||
done
|
||||
- task: Bash@3
|
||||
displayName: 'List downloaded ROCm files'
|
||||
inputs:
|
||||
|
||||
@@ -90,6 +90,7 @@ Dask
|
||||
DataFrame
|
||||
DataLoader
|
||||
DataParallel
|
||||
Debian
|
||||
DeepSpeed
|
||||
Dependabot
|
||||
Deprecations
|
||||
@@ -106,6 +107,7 @@ FFT
|
||||
FFTs
|
||||
FFmpeg
|
||||
FHS
|
||||
FIXME
|
||||
FMA
|
||||
FP
|
||||
FX
|
||||
@@ -130,6 +132,7 @@ GiB
|
||||
GIM
|
||||
GL
|
||||
GLXT
|
||||
Gloo
|
||||
GMI
|
||||
GPG
|
||||
GPR
|
||||
@@ -147,6 +150,7 @@ HCA
|
||||
HGX
|
||||
HIPCC
|
||||
HIPExtension
|
||||
HIPification
|
||||
HIPIFY
|
||||
HPC
|
||||
HPCG
|
||||
@@ -189,6 +193,7 @@ Jupyter
|
||||
KFD
|
||||
KFDTest
|
||||
KiB
|
||||
KMD
|
||||
KV
|
||||
KVM
|
||||
Keras
|
||||
@@ -211,6 +216,7 @@ MiB
|
||||
MIGraphX
|
||||
MIOpen
|
||||
MIOpenGEMM
|
||||
MIOpen's
|
||||
MIVisionX
|
||||
MLM
|
||||
MMA
|
||||
@@ -240,6 +246,7 @@ MyEnvironment
|
||||
MyST
|
||||
NBIO
|
||||
NBIOs
|
||||
NCCL
|
||||
NIC
|
||||
NICs
|
||||
NLI
|
||||
@@ -285,6 +292,7 @@ PCC
|
||||
PCI
|
||||
PCIe
|
||||
PEFT
|
||||
PEQT
|
||||
PIL
|
||||
PILImage
|
||||
POR
|
||||
@@ -314,6 +322,7 @@ RDMA
|
||||
RDNA
|
||||
README
|
||||
RHEL
|
||||
RMW
|
||||
RNN
|
||||
RNNs
|
||||
ROC
|
||||
@@ -330,6 +339,7 @@ ROCmSoftwarePlatform
|
||||
ROCmValidationSuite
|
||||
ROCprofiler
|
||||
ROCr
|
||||
RPP
|
||||
RST
|
||||
RW
|
||||
Radeon
|
||||
@@ -337,6 +347,7 @@ RelWithDebInfo
|
||||
Req
|
||||
Rickle
|
||||
RoCE
|
||||
Runfile
|
||||
Ryzen
|
||||
SALU
|
||||
SBIOS
|
||||
@@ -349,6 +360,7 @@ SENDMSG
|
||||
SGPR
|
||||
SGPRs
|
||||
SHA
|
||||
SHARK's
|
||||
SIGQUIT
|
||||
SIMD
|
||||
SIMDs
|
||||
@@ -393,9 +405,14 @@ TensorFlow
|
||||
TensorParallel
|
||||
ToC
|
||||
TorchAudio
|
||||
torchaudio
|
||||
TorchElastic
|
||||
TorchMIGraphX
|
||||
torchrec
|
||||
TorchScript
|
||||
TorchServe
|
||||
torchserve
|
||||
torchtext
|
||||
TorchVision
|
||||
TransferBench
|
||||
TrapStatus
|
||||
@@ -502,6 +519,9 @@ copyable
|
||||
cpp
|
||||
csn
|
||||
cuBLAS
|
||||
cuda
|
||||
cuDNN
|
||||
cudnn
|
||||
cuFFT
|
||||
cuLIB
|
||||
cuRAND
|
||||
@@ -518,6 +538,7 @@ dbgapi
|
||||
de
|
||||
deallocation
|
||||
debuggability
|
||||
debian
|
||||
denoise
|
||||
denoised
|
||||
denoises
|
||||
@@ -570,6 +591,7 @@ hipBLASLt's
|
||||
hipblaslt
|
||||
hipCUB
|
||||
hipFFT
|
||||
hipFORT
|
||||
hipLIB
|
||||
hipRAND
|
||||
hipSOLVER
|
||||
@@ -591,6 +613,7 @@ hpp
|
||||
hsa
|
||||
hsakmt
|
||||
hyperparameter
|
||||
hyperparameters
|
||||
iDRAC
|
||||
ib_core
|
||||
inband
|
||||
@@ -663,6 +686,7 @@ prebuilt
|
||||
precompiled
|
||||
preconditioner
|
||||
preconfigured
|
||||
preemptible
|
||||
prefetch
|
||||
prefetchable
|
||||
prefill
|
||||
@@ -679,6 +703,7 @@ profilers
|
||||
protobuf
|
||||
pseudorandom
|
||||
py
|
||||
recommender
|
||||
quantile
|
||||
quantizer
|
||||
quasirandom
|
||||
@@ -704,6 +729,7 @@ rocALUTION
|
||||
rocBLAS
|
||||
rocDecode
|
||||
rocFFT
|
||||
rocHPCG
|
||||
rocJPEG
|
||||
rocLIB
|
||||
rocMLIR
|
||||
|
||||
11
README.md
@@ -50,7 +50,8 @@ The following example shows how to use the repo tool to download the ROCm source
|
||||
```bash
|
||||
mkdir -p ~/ROCm/
|
||||
cd ~/ROCm/
|
||||
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x
|
||||
export ROCM_VERSION=6.3.1
|
||||
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
|
||||
~/bin/repo sync
|
||||
```
|
||||
|
||||
@@ -76,7 +77,7 @@ The Build time will reduce significantly if we limit the GPU Architecture/s agai
|
||||
|
||||
mkdir -p ~/WORKSPACE/ # Or any folder name other than WORKSPACE
|
||||
cd ~/WORKSPACE/
|
||||
export ROCM_VERSION=6.3.0
|
||||
export ROCM_VERSION=6.3.1
|
||||
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
|
||||
~/bin/repo sync
|
||||
|
||||
@@ -87,11 +88,11 @@ export ROCM_VERSION=6.3.0
|
||||
# Option 1: Start a docker container
|
||||
# Pulling required base docker images:
|
||||
# Ubuntu20.04 built from ROCm/tools/rocm-build/docker/ubuntu20/Dockerfile
|
||||
docker pull rocm/rocm-build-ubuntu-20.04:6.2
|
||||
docker pull rocm/rocm-build-ubuntu-20.04:6.3
|
||||
# Ubuntu22.04 built from ROCm/tools/rocm-build/docker/ubuntu22/Dockerfile
|
||||
docker pull rocm/rocm-build-ubuntu-22.04:6.2
|
||||
docker pull rocm/rocm-build-ubuntu-22.04:6.3
|
||||
# Ubuntu24.04 built from ROCm/tools/rocm-build/docker/ubuntu24/Dockerfile
|
||||
docker pull rocm/rocm-build-ubuntu-24.04:6.2
|
||||
docker pull rocm/rocm-build-ubuntu-24.04:6.3
|
||||
|
||||
# Start docker container and mount the source code folder:
|
||||
docker run -ti \
|
||||
|
||||
1530
RELEASE.md
10
default.xml
@@ -1,17 +1,14 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<manifest>
|
||||
<remote name="rocm-org" fetch="https://github.com/ROCm/" />
|
||||
<default revision="refs/tags/rocm-6.3.0"
|
||||
<default revision="refs/tags/rocm-6.3.1"
|
||||
remote="rocm-org"
|
||||
sync-c="true"
|
||||
sync-j="4" />
|
||||
<!--list of projects for ROCm-->
|
||||
<project name="ROCK-Kernel-Driver" />
|
||||
<project name="ROCR-Runtime" />
|
||||
<project name="ROCT-Thunk-Interface" />
|
||||
<project name="amdsmi" />
|
||||
<project name="omniperf" />
|
||||
<project name="omnitrace" />
|
||||
<project name="rdc" />
|
||||
<project name="rocm_bandwidth_test" />
|
||||
<project name="rocm_smi_lib" />
|
||||
@@ -21,6 +18,8 @@
|
||||
<project name="rocprofiler" />
|
||||
<project name="rocprofiler-register" />
|
||||
<project name="rocprofiler-sdk" />
|
||||
<project name="rocprofiler-compute" />
|
||||
<project name="rocprofiler-systems" />
|
||||
<project name="roctracer" />
|
||||
<!--HIP Projects-->
|
||||
<project name="HIP" />
|
||||
@@ -42,6 +41,7 @@
|
||||
<project groups="mathlibs" name="ROCmValidationSuite" />
|
||||
<project groups="mathlibs" name="Tensile" />
|
||||
<project groups="mathlibs" name="composable_kernel" />
|
||||
<project groups="mathlibs" name="hipBLAS-common" />
|
||||
<project groups="mathlibs" name="hipBLAS" />
|
||||
<project groups="mathlibs" name="hipBLASLt" />
|
||||
<project groups="mathlibs" name="hipCUB" />
|
||||
@@ -57,6 +57,7 @@
|
||||
<project groups="mathlibs" name="rocALUTION" />
|
||||
<project groups="mathlibs" name="rocBLAS" />
|
||||
<project groups="mathlibs" name="rocDecode" />
|
||||
<project groups="mathlibs" name="rocJPEG" />
|
||||
<project groups="mathlibs" name="rocPyDecode" />
|
||||
<project groups="mathlibs" name="rocFFT" />
|
||||
<project groups="mathlibs" name="rocPRIM" />
|
||||
@@ -67,6 +68,7 @@
|
||||
<project groups="mathlibs" name="rocWMMA" />
|
||||
<project groups="mathlibs" name="rocm-cmake" />
|
||||
<project groups="mathlibs" name="rpp" />
|
||||
<project groups="mathlibs" name="TransferBench" />
|
||||
<!-- Projects for OpenMP-Extras -->
|
||||
<project name="aomp" path="openmp-extras/aomp" />
|
||||
<project name="aomp-extras" path="openmp-extras/aomp-extras" />
|
||||
|
||||
@@ -1,118 +1,128 @@
|
||||
ROCm Version,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,,
|
||||
,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2"
|
||||
,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
|
||||
,"RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4 [#red-hat94-past-60]_, 9.3, 9.2","RHEL 9.4 [#red-hat94-past-60]_, 9.3, 9.2","RHEL 9.4 [#red-hat94-past-60]_, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2"
|
||||
,"RHEL 8.10","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8"
|
||||
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4"
|
||||
,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9
|
||||
,Oracle Linux 8.10 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,,,
|
||||
,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2
|
||||
,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA
|
||||
,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3
|
||||
,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2
|
||||
,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>`,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100
|
||||
,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030
|
||||
,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_
|
||||
,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a
|
||||
,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908
|
||||
,,,,,,,,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`PyTorch <rocm-install-on-linux:install/3rd-party/pytorch-install>`,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <rocm-install-on-linux:install/3rd-party/tensorflow-install>`,"2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
|
||||
:doc:`JAX <rocm-install-on-linux:install/3rd-party/jax-install>`,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
|
||||
,,,,,,,,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1
|
||||
,,,,,,,,,,
|
||||
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
Thrust,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
CUB,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
,,,,,,,,,,
|
||||
KFD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
|
||||
,,,,,,,,,,
|
||||
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0
|
||||
:doc:`MIOpen <miopen:index>`,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0
|
||||
:doc:`rocAL <rocal:index>`,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A
|
||||
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`RPP <rpp:index>`,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0
|
||||
,,,,,,,,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`RCCL <rccl:index>`,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
|
||||
,,,,,,,,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
|
||||
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13
|
||||
:doc:`hipfort <hipfort:index>`,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0
|
||||
:doc:`hipRAND <hiprand:index>`,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16
|
||||
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0
|
||||
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3
|
||||
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23
|
||||
:doc:`rocRAND <rocrand:index>`,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2
|
||||
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0
|
||||
:doc:`Tensile <tensile:index>`,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0
|
||||
,,,,,,,,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`hipCUB <hipcub:index>`,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0
|
||||
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
,,,,,,,,,,
|
||||
SUPPORT LIBS,,,,,,,,,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245
|
||||
,,,,,,,,,,
|
||||
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000
|
||||
,,,,,,,,,,
|
||||
PERFORMANCE TOOLS,,,,,,,,,,
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000
|
||||
,,,,,,,,,,
|
||||
DEVELOPMENT TOOLS,,,,,,,,,,
|
||||
:doc:`HIPIFY <hipify:index>`,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,N/A,N/A
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3
|
||||
,,,,,,,,,,
|
||||
COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
`Flang <https://github.com/ROCm/flang>`_,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
:doc:`llvm-project <llvm-project:index>`,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,
|
||||
RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
:doc:`HIP <hip:index>`,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0
|
||||
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0
|
||||
ROCm Version,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,,
|
||||
,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2"
|
||||
,,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
|
||||
,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2"
|
||||
,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8"
|
||||
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4"
|
||||
,,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9
|
||||
,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,,,
|
||||
,Debian 12 [#mic300x-past-60]_,,,,,,,,,,
|
||||
,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2
|
||||
,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA
|
||||
,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3
|
||||
,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2
|
||||
,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>`,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100
|
||||
,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030
|
||||
,gfx942,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_
|
||||
,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a
|
||||
,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908
|
||||
,,,,,,,,,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`PyTorch <../compatibility/pytorch-compatibility>`,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <rocm-install-on-linux:install/3rd-party/tensorflow-install>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
|
||||
:doc:`JAX <rocm-install-on-linux:install/3rd-party/jax-install>`,0.4.35,0.4.35,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
|
||||
,,,,,,,,,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1
|
||||
,,,,,,,,,,,
|
||||
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
Thrust,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
CUB,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
,,,,,,,,,,,
|
||||
,,,,,,,,,,,
|
||||
KMD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
|
||||
,,,,,,,,,,,
|
||||
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0
|
||||
:doc:`MIOpen <miopen:index>`,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0
|
||||
:doc:`rocAL <rocal:index>`,2.1.0,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A
|
||||
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`RPP <rpp:index>`,1.9.1,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0
|
||||
,,,,,,,,,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`RCCL <rccl:index>`,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
|
||||
,,,,,,,,,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
|
||||
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13
|
||||
:doc:`hipfort <hipfort:index>`,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0
|
||||
:doc:`hipRAND <hiprand:index>`,2.11.1,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16
|
||||
,,,,,,,,,,,
|
||||
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0
|
||||
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3
|
||||
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23
|
||||
:doc:`rocRAND <rocrand:index>`,3.2.0,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2
|
||||
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0
|
||||
:doc:`Tensile <tensile:index>`,4.42.0,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0
|
||||
,,,,,,,,,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`hipCUB <hipcub:index>`,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0
|
||||
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
,,,,,,,,,,,
|
||||
SUPPORT LIBS,,,,,,,,,,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245
|
||||
,,,,,,,,,,,
|
||||
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000
|
||||
,,,,,,,,,,,
|
||||
PERFORMANCE TOOLS,,,,,,,,,,,
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.0,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60301,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000
|
||||
,,,,,,,,,,,
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.60301,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000
|
||||
,,,,,,,,,,,
|
||||
,,,,,,,,,,,
|
||||
DEVELOPMENT TOOLS,,,,,,,,,,,
|
||||
:doc:`HIPIFY <hipify:index>`,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,,
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,N/A,N/A
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3
|
||||
,,,,,,,,,,,
|
||||
COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
`Flang <https://github.com/ROCm/flang>`_,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
:doc:`llvm-project <llvm-project:index>`,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,,
|
||||
,,,,,,,,,,,
|
||||
RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,
|
||||
:doc:`HIP <hip:index>`,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0
|
||||
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0
|
||||
|
||||
|
@@ -23,17 +23,16 @@ compatibility and system requirements.
|
||||
.. container:: format-big-table
|
||||
|
||||
.. csv-table::
|
||||
:header: "ROCm Version", "6.3.0", "6.2.4", "6.1.0"
|
||||
:header: "ROCm Version", "6.3.1", "6.3.0", "6.2.0"
|
||||
:stub-columns: 1
|
||||
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04",
|
||||
,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3"
|
||||
,,,"Ubuntu 20.04.6, 20.04.5"
|
||||
,"RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4 [#red-hat94]_, 9.3, 9.2"
|
||||
,"RHEL 8.10","RHEL 8.10, 8.9","RHEL 8.9, 8.8"
|
||||
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4"
|
||||
,,,CentOS 7.9
|
||||
,Oracle Linux 8.10 [#oracle89]_,Oracle Linux 8.9 [#oracle89]_,
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04
|
||||
,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4"
|
||||
,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3"
|
||||
,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9"
|
||||
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5"
|
||||
,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.9 [#mi300x]_
|
||||
,Debian 12 [#mi300x]_,,
|
||||
,.. _architecture-support-compatibility-matrix:,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2
|
||||
@@ -43,115 +42,114 @@ compatibility and system requirements.
|
||||
,.. _gpu-support-compatibility-matrix:,,
|
||||
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>`,gfx1100,gfx1100,gfx1100
|
||||
,gfx1030,gfx1030,gfx1030
|
||||
,gfx942,gfx942 [#mi300_624]_, gfx942 [#mi300_610]_
|
||||
,gfx942,gfx942,gfx942 [#mi300_620]_
|
||||
,gfx90a,gfx90a,gfx90a
|
||||
,gfx908,gfx908,gfx908
|
||||
,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix:,,
|
||||
:doc:`PyTorch <rocm-install-on-linux:install/3rd-party/pytorch-install>`,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <rocm-install-on-linux:install/3rd-party/tensorflow-install>`,"2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1"
|
||||
:doc:`JAX <rocm-install-on-linux:install/3rd-party/jax-install>`,0.4.26,0.4.26,0.4.26
|
||||
:doc:`PyTorch <../compatibility/pytorch-compatibility>`,"2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <rocm-install-on-linux:install/3rd-party/tensorflow-install>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1"
|
||||
:doc:`JAX <rocm-install-on-linux:install/3rd-party/jax-install>`,0.4.35,0.4.35,0.4.26
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3
|
||||
,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix:,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.14.1
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.15.0
|
||||
,,,
|
||||
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix:,,
|
||||
Thrust,2.3.2,2.2.0,2.1.0
|
||||
CUB,2.3.2,2.2.0,2.1.0
|
||||
Thrust,2.3.2,2.3.2,2.2.0
|
||||
CUB,2.3.2,2.3.2,2.2.0
|
||||
,,,
|
||||
KFD & USER SPACE [#kfd_support]_,.. _kfd-userspace-support-compatibility-matrix:,,
|
||||
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x"
|
||||
KMD & USER SPACE [#kfd_support]_,.. _kfd-userspace-support-compatibility-matrix:,,
|
||||
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x"
|
||||
,,,
|
||||
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix:,,
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.10.0,2.9.0
|
||||
:doc:`MIOpen <miopen:index>`,3.3.0,3.2.0,3.1.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.0.0,2.5.0
|
||||
:doc:`rocAL <rocal:index>`,2.1.0,2.0.0,1.0.0
|
||||
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.6.0,0.5.0
|
||||
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,N/A,N/A
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.1.0,N/A
|
||||
:doc:`RPP <rpp:index>`,1.9.1,1.8.0,1.5.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.11.0,2.10.0
|
||||
:doc:`MIOpen <miopen:index>`,3.3.0,3.3.0,3.2.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.1.0,3.0.0
|
||||
:doc:`rocAL <rocal:index>`,2.1.0,2.1.0,1.0.0
|
||||
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.8.0,0.6.0
|
||||
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,0.6.0,N/A
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.2.0,0.1.0
|
||||
:doc:`RPP <rpp:index>`,1.9.1,1.9.1,1.8.0
|
||||
,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix:,,
|
||||
:doc:`RCCL <rccl:index>`,2.21.5,2.20.5,2.18.6
|
||||
:doc:`RCCL <rccl:index>`,2.21.5,2.21.5,2.20.5
|
||||
,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix:,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0
|
||||
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.2.0,2.1.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.8.0,0.7.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.16,1.0.14
|
||||
:doc:`hipfort <hipfort:index>`,0.5.0,0.4.0,0.4.0
|
||||
:doc:`hipRAND <hiprand:index>`,2.11.0,2.11.1,2.10.16
|
||||
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.2.0,2.1.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.1,3.0.1
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.1,0.1.0
|
||||
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.1.1
|
||||
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.2.4,4.1.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.30,1.0.26
|
||||
:doc:`rocRAND <rocrand:index>`,3.2.0,3.1.1,3.0.1
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.26.2,3.25.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.2.1,3.1.2
|
||||
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.5.0,1.4.0
|
||||
:doc:`Tensile <tensile:index>`,4.42.0,4.41.0,4.40.0
|
||||
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.3.0,2.2.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.10.0,0.8.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.17,1.0.14
|
||||
:doc:`hipfort <hipfort:index>`,0.5.0,0.5.0,0.4.0
|
||||
:doc:`hipRAND <hiprand:index>`,2.11.1,2.11.0,2.11.0
|
||||
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.3.0,2.2.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.2,3.1.1
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.2,0.2.1
|
||||
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.2.0
|
||||
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.3.0,4.2.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.31,1.0.28
|
||||
:doc:`rocRAND <rocrand:index>`,3.2.0,3.2.0,3.1.0
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.27.0,3.26.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.3.0,3.2.0
|
||||
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.6.0,1.5.0
|
||||
:doc:`Tensile <tensile:index>`,4.42.0,4.42.0,4.41.0
|
||||
,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix:,,
|
||||
:doc:`hipCUB <hipcub:index>`,3.3.0,3.2.1,3.1.0
|
||||
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.3.0,1.2.0
|
||||
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.2.2,3.1.0
|
||||
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.1.1,3.0.1
|
||||
:doc:`hipCUB <hipcub:index>`,3.3.0,3.3.0,3.2.0
|
||||
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.4.0,1.3.0
|
||||
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.3.0,3.2.0
|
||||
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.3.0,3.0.1
|
||||
,,,
|
||||
SUPPORT LIBS,,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,6.3.42131,6.2.41134,6.1.40091
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.0,6.2.4,6.1.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr]_,20240607.5.7,20240125.3.30
|
||||
`hipother <https://github.com/ROCm/hipother>`_,6.3.42133,6.3.42131,6.2.41133
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.1,6.3.0,6.2.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr]_,N/A [#ROCT-rocr]_,20240607.1.4246
|
||||
,,,
|
||||
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix:,,
|
||||
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.6.3,24.4.1
|
||||
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.7.1,24.6.2
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0,0.3.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.3.0,7.0.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.0.60204,1.0.60100
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.4.0,7.3.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.1.0,1.0.60200
|
||||
,,,
|
||||
PERFORMANCE TOOLS,,,
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,2.0.1,N/A
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.0,1.11.2,N/A
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60300,2.0.60204,2.0.60100
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.4.0,N/A
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.60300,4.1.60204,4.1.60100
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,3.0.0,2.0.1
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.0,0.1.0,1.11.2
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60301,2.0.60300,2.0.60200
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.5.0,0.4.0
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.60301,4.1.60300,4.1.60200
|
||||
,,,
|
||||
DEVELOPMENT TOOLS,,,
|
||||
:doc:`HIPIFY <hipify:index>`,18.0.0.24455,18.0.0.24392,17.0.0.24103
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.13.0,0.12.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.76.0,0.71.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,14.2.0,14.1.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.3.0
|
||||
:doc:`HIPIFY <hipify:index>`,18.0.0.24491,18.0.0.24455,18.0.0.24232
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.13.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.77.0,0.76.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,15.2.0,14.2.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3
|
||||
,,,
|
||||
COMPILERS,.. _compilers-support-compatibility-matrix:,,
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,0.5.0
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.0.0
|
||||
`Flang <https://github.com/ROCm/flang>`_,18.0.0.24455,18.0.0.24392,17.0.0.24103
|
||||
:doc:`llvm-project <llvm-project:index>`,18.0.0.24455,18.0.0.24392,17.0.0.24103
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.24455,18.0.0.24392,17.0.0.24103
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1
|
||||
`Flang <https://github.com/ROCm/flang>`_,18.0.0.24491,18.0.0.24455,18.0.0.24232
|
||||
:doc:`llvm-project <llvm-project:index>`,18.0.0.24491,18.0.0.24455,18.0.0.24232
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.24491,18.0.0.24455,18.0.0.24232
|
||||
,,,
|
||||
RUNTIMES,.. _runtime-support-compatibility-matrix:,,
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42131,6.2.41134,6.1.40091
|
||||
:doc:`HIP <hip:index>`,6.3.42131,6.2.41134,6.1.40091
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42133,6.3.42131,6.2.41133
|
||||
:doc:`HIP <hip:index>`,6.3.42133,6.3.42131,6.2.41133
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0
|
||||
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.13.0
|
||||
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#red-hat94] RHEL 9.4 is supported only on AMD Instinct MI300A.
|
||||
.. [#oracle89] Oracle Linux is supported only on AMD Instinct MI300X.
|
||||
.. [#mi300_624] **For ROCm 6.2.4** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
.. [#mi300_610] **For ROCm 6.1.0** - MI300A (gfx942) is supported on Ubuntu 22.04.4, RHEL 9.4, RHEL 9.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.4.
|
||||
.. [#kfd_support] ROCm provides forward and backward compatibility between the Kernel Fusion Driver (KFD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
|
||||
.. [#ROCT-rocr] As of ROCm 6.3.0, the ROCT Thunk Interface is now included as part of the ROCr runtime package.
|
||||
.. [#mi300x] Oracle Linux and Debian are supported only on AMD Instinct MI300X.
|
||||
.. [#mi300_620] **For ROCm 6.2.0** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
.. [#kfd_support] ROCm provides forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
|
||||
.. [#ROCT-rocr] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package.
|
||||
|
||||
.. _OS-kernel-versions:
|
||||
|
||||
@@ -166,35 +164,25 @@ Use this lookup table to confirm which operating system and kernel versions are
|
||||
:stub-columns: 1
|
||||
|
||||
`Ubuntu <https://ubuntu.com/about/release-cycle#ubuntu-kernel-release-cycle>`_, 24.04.2, "6.8 GA, 6.11 HWE"
|
||||
, 24.04.1, "6.8 GA"
|
||||
, 24.04, "6.8 GA"
|
||||
,,
|
||||
`Ubuntu <https://ubuntu.com/about/release-cycle#ubuntu-kernel-release-cycle>`_, 22.04.5, "5.15 GA, 6.8 HWE"
|
||||
, 22.04.4, "5.15 GA, 6.5 HWE"
|
||||
, 22.04.3, "5.15 GA, 6.2 HWE"
|
||||
, 22.04.2, "5.15 GA, 5.19 HWE"
|
||||
,,
|
||||
`Ubuntu <https://ubuntu.com/about/release-cycle#ubuntu-kernel-release-cycle>`_, 20.04.06, "5.15 HWE"
|
||||
, 20.04.5, "5.15 HWE"
|
||||
,,
|
||||
`Red Hat Enterprise Linux (RHEL) <https://access.redhat.com/articles/3078#RHEL9>`_, 9.5, 5.14.0
|
||||
,9.4, 5.14.0
|
||||
,9.3, 5.14.0
|
||||
,9.2, 5.14.0
|
||||
,,
|
||||
`Red Hat Enterprise Linux (RHEL) <https://access.redhat.com/articles/3078#RHEL8>`_, 8.10, 4.18.0
|
||||
,8.9, 4.18.0
|
||||
,8.8, 4.18.0
|
||||
,,
|
||||
`CentOS <https://access.redhat.com/articles/3078#RHEL7>`_, 7.9, 3.10
|
||||
,,
|
||||
`SUSE Linux Enterprise Server (SLES) <https://www.suse.com/support/kb/doc/?id=000019587#SLE15SP4>`_, 15 SP6, 6.4.0
|
||||
,15 SP5, 5.14.21
|
||||
,15 SP4, 5.14.21
|
||||
,,
|
||||
`Oracle Linux <https://blogs.oracle.com/scoter/post/oracle-linux-and-unbreakable-enterprise-kernel-uek-releases>`_, 8.10, 5.15.0
|
||||
,8.9, 5.15.0
|
||||
`Azure Linux <https://github.com/microsoft/azurelinux/releases>`_, 3.0, 6.6.60
|
||||
,,
|
||||
`Debian <https://www.debian.org/download>`_,12, 6.1
|
||||
|
||||
..
|
||||
Footnotes and ref anchors in below historical tables should be appended with "-past-60", to differentiate from the
|
||||
@@ -222,7 +210,7 @@ Expand for full historical view of:
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#oracle89-past-60] Oracle Linux is supported only on AMD Instinct MI300X.
|
||||
.. [#mic300x-past-60] Oracle Linux and Debian are supported only on AMD Instinct MI300X.
|
||||
.. [#mi300_624-past-60] **For ROCm 6.2.4** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
.. [#mi300_622-past-60] **For ROCm 6.2.2** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
.. [#mi300_621-past-60] **For ROCm 6.2.1** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
@@ -232,5 +220,5 @@ Expand for full historical view of:
|
||||
.. [#mi300_610-past-60] **For ROCm 6.1.0** - MI300A (gfx942) is supported on Ubuntu 22.04.4, RHEL 9.4, RHEL 9.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.4.
|
||||
.. [#mi300_602-past-60] **For ROCm 6.0.2** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3.
|
||||
.. [#mi300_600-past-60] **For ROCm 6.0.0** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3.
|
||||
.. [#kfd_support-past-60] ROCm provides forward and backward compatibility between the Kernel Fusion Driver (KFD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
|
||||
.. [#ROCT-rocr-past-60] As of ROCm 6.3.0, the ROCT Thunk Interface is now included as part of the ROCr runtime package.
|
||||
.. [#kfd_support-past-60] ROCm provides forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
|
||||
.. [#ROCT-rocr-past-60] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package.
|
||||
|
||||
916
docs/compatibility/pytorch-compatibility.rst
Normal file
@@ -0,0 +1,916 @@
|
||||
.. meta::
|
||||
:description: PyTorch compatibility
|
||||
:keywords: GPU, PyTorch compatibility
|
||||
|
||||
********************************************************************************
|
||||
PyTorch compatibility
|
||||
********************************************************************************
|
||||
|
||||
`PyTorch <https://pytorch.org/>`_ is an open-source tensor library designed for
|
||||
deep learning. PyTorch on ROCm provides mixed-precision and large-scale training
|
||||
using `MIOpen <https://github.com/ROCm/MIOpen>`_ and
|
||||
`RCCL <https://github.com/ROCm/rccl>`_ libraries.
|
||||
|
||||
ROCm support for PyTorch is upstreamed into the official PyTorch repository. Due to independent
|
||||
compatibility considerations, this results in two distinct release cycles for PyTorch on ROCm:
|
||||
|
||||
- ROCm PyTorch release:
|
||||
|
||||
- Provides the latest version of ROCm but doesn't immediately support the latest stable PyTorch
|
||||
version.
|
||||
|
||||
- Offers :ref:`Docker images <pytorch-docker-compat>` with ROCm and PyTorch
|
||||
pre-installed.
|
||||
|
||||
- ROCm PyTorch repository: `<https://github.com/rocm/pytorch>`__
|
||||
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>` to get started.
|
||||
|
||||
- Official PyTorch release:
|
||||
|
||||
- Provides the latest stable version of PyTorch but doesn't immediately support the latest ROCm version.
|
||||
|
||||
- Official PyTorch repository: `<https://github.com/pytorch/pytorch>`__
|
||||
|
||||
- See the `Nightly and latest stable version installation guide <https://pytorch.org/get-started/locally/>`_
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_ to get started.
|
||||
|
||||
The upstream PyTorch includes an automatic HIPification solution that automatically generates HIP
|
||||
source code from the CUDA backend. This approach allows PyTorch to support ROCm without requiring
|
||||
manual code modifications.
|
||||
|
||||
ROCm's development is aligned with the stable release of PyTorch while upstream PyTorch testing uses
|
||||
the stable release of ROCm to maintain consistency.
|
||||
|
||||
.. _pytorch-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
AMD validates and publishes ready-made `PyTorch <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
images with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are validated for `ROCm 6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_.
|
||||
|
||||
.. list-table:: PyTorch Docker image components
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- Apex
|
||||
- torchvision
|
||||
- TensorBoard
|
||||
- MAGMA
|
||||
- UCX
|
||||
- OMPI
|
||||
- OFED
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu24.04_py3.12_pytorch_release_2.4.0/images/sha256-98ddf20333bd01ff749b8092b1190ee369a75d3b8c71c2fac80ffdcb1a98d529?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 24.04
|
||||
- `3.12 <https://www.python.org/downloads/release/python-3128/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.4.0/images/sha256-402c9b4f1a6b5a81c634a1932b56cbe01abb699cfcc7463d226276997c6cf8ea?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_2.4.0/images/sha256-e0608b55d408c3bfe5c19fdd57a4ced3e0eb3a495b74c309980b60b156c526dd?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-652cf25263d05b1de548222970aeb76e60b12de101de66751264709c0d0ff9d8?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.3.0 <https://github.com/ROCm/pytorch/tree/release/2.3>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.3.0 <https://github.com/ROCm/apex/tree/release/1.3.0>`_
|
||||
- `0.18.0 <https://github.com/pytorch/vision/tree/v0.18.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.2.1/images/sha256-051976f26beab8f9aa65d999e3ad546c027b39240a0cc3ee81b114a9024f2912?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_2.2.1/images/sha256-88c839a364d109d3748c100385bfa100d28090d25118cc723fd0406390ab2f7e?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_1.13.1/images/sha256-994424ed07a63113f79dd9aa72159124c00f5fbfe18127151e6658f7d0b6f821?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_1.13.1/images/sha256-7b8139fe40a9aeb4bca3aecd15c22c1fa96e867d93479fa3a24fdeeeeafa1219?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Critical ROCm libraries for PyTorch
|
||||
================================================================================
|
||||
|
||||
The functionality of PyTorch with ROCm is shaped by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
|
||||
- 1.1.0
|
||||
- Enables faster execution of core operations like matrix multiplication
|
||||
(GEMM), convolutions and transformations.
|
||||
- Speeds up ``torch.permute``, ``torch.view``, ``torch.matmul``,
|
||||
``torch.mm``, ``torch.bmm``, ``torch.nn.Conv2d``, ``torch.nn.Conv3d``
|
||||
and ``torch.nn.MultiheadAttention``.
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations like matrix multiplication, matrix-vector products,
|
||||
and tensor contractions. Utilized in both dense and batched linear
|
||||
algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- 0.10.0
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
|
||||
matrix multiplications used in convolutional and linear layers.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
|
||||
and ``torch.topk``. Operations on sparse tensors or tensors with
|
||||
irregular shapes often involve scanning, sorting, and filtering, which
|
||||
hipCUB handles efficiently.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- 1.0.17
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
- Used in functions like the ``torch.fft`` module.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- 2.11.0
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
|
||||
``torch.nn.Dropout``.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
- Supports functions like ``torch.linalg.solve``,
|
||||
``torch.linalg.eig``, and ``torch.linalg.svd``.
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- 3.1.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- 0.2.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipTensor <https://github.com/ROCm/hipTensor>`_
|
||||
- 1.4.0
|
||||
- Optimizes for high-performance tensor operations, such as contractions.
|
||||
- Accelerates tensor algebra, especially in deep learning and scientific
|
||||
computing.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- 3.3.0
|
||||
- Optimizes deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs), recurrent neural
|
||||
networks (RNNs), and other layers. Used in operations like
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`_
|
||||
- 2.11.0
|
||||
- Add graph-level optimizations, ONNX models and mixed precision support
|
||||
and enable Ahead-of-Time (AOT) Compilation.
|
||||
- Speeds up inference models and executes ONNX models for
|
||||
compatibility with other frameworks.
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`_
|
||||
- 3.1.0
|
||||
- Optimizes acceleration for computer vision and AI workloads like
|
||||
preprocessing, augmentation, and inferencing.
|
||||
- Faster data preprocessing and augmentation pipelines for datasets like
|
||||
ImageNet or COCO and easy to integrate into PyTorch's ``torch.utils.data``
|
||||
and ``torchvision`` workflows.
|
||||
* - `rocAL <https://github.com/ROCm/rocAL>`_
|
||||
- 2.1.0
|
||||
- Accelerates the data pipeline by offloading intensive preprocessing and
|
||||
augmentation tasks. rocAL is part of MIVisionX.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- 2.21.5
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
- Distributed data parallel training (``torch.nn.parallel.DistributedDataParallel``).
|
||||
Handles communication in multi-GPU setups.
|
||||
* - `rocDecode <https://github.com/ROCm/rocDecode>`_
|
||||
- 0.8.0
|
||||
- Provide hardware-accelerated data decoding capabilities, particularly
|
||||
for image, video, and other dataset formats.
|
||||
- Can be integrated in ``torch.utils.data``, ``torchvision.transforms``
|
||||
and ``torch.distributed``.
|
||||
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`_
|
||||
- 0.6.0
|
||||
- Provide hardware-accelerated JPEG image decoding and encoding.
|
||||
- GPU accelerated ``torchvision.io.decode_jpeg`` and
|
||||
``torchvision.io.encode_jpeg`` and can be integrated in
|
||||
``torch.utils.data`` and ``torchvision``.
|
||||
* - `RPP <https://github.com/ROCm/RPP>`_
|
||||
- 1.9.1
|
||||
- Speed up data augmentation, transformation, and other preprocessing step.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Utilized in backend operations for tensor computations requiring
|
||||
parallel processing.
|
||||
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`_
|
||||
- 1.6.0
|
||||
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
|
||||
multiplication (GEMM) and accumulation operations with mixed precision
|
||||
support.
|
||||
- Linear layers (``torch.nn.Linear``), convolutional layers
|
||||
(``torch.nn.Conv2d``), attention layers, general tensor operations that
|
||||
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
|
||||
more.
|
||||
|
||||
Supported and unsupported features
|
||||
================================================================================
|
||||
|
||||
The following section maps GPU-accelerated PyTorch features to their supported
|
||||
ROCm and PyTorch versions.
|
||||
|
||||
torch
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
`torch <https://pytorch.org/docs/stable/index.html>`_ is the central module of
|
||||
PyTorch, providing data structures for multi-dimensional tensors and
|
||||
implementing mathematical operations on them. It also includes utilities for
|
||||
efficient serialization of tensors and arbitrary data types, along with various
|
||||
other tools.
|
||||
|
||||
Tensor data types
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or argument, and PyTorch supports a wide range of data types for different use cases.
|
||||
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_'s single data types:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.float8_e4m3fn``
|
||||
- 8-bit floating point, e4m3
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float8_e5m2``
|
||||
- 8-bit floating point, e5m2
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float16`` or ``torch.half``
|
||||
- 16-bit floating point
|
||||
- 0.1.6
|
||||
- 2.0
|
||||
* - ``torch.bfloat16``
|
||||
- 16-bit floating point
|
||||
- 1.6
|
||||
- 2.6
|
||||
* - ``torch.float32`` or ``torch.float``
|
||||
- 32-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.float64`` or ``torch.double``
|
||||
- 64-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.complex32`` or ``torch.chalf``
|
||||
- PyTorch provides native support for 32-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex64`` or ``torch.cfloat``
|
||||
- PyTorch provides native support for 64-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex128`` or ``torch.cdouble``
|
||||
- PyTorch provides native support for 128-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.uint8``
|
||||
- 8-bit integer (unsigned)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.uint16``
|
||||
- 16-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint32``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint64``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.int8``
|
||||
- 8-bit integer (signed)
|
||||
- 1.12
|
||||
- 5.0
|
||||
* - ``torch.int16`` or ``torch.short``
|
||||
- 16-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int32`` or ``torch.int``
|
||||
- 32-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int64`` or ``torch.long``
|
||||
- 64-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.bool``
|
||||
- Boolean
|
||||
- 1.2
|
||||
- 2.0
|
||||
* - ``torch.quint8``
|
||||
- Quantized 8-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint8``
|
||||
- Quantized 8-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint32``
|
||||
- Quantized 32-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.quint4x2``
|
||||
- Quantized 4-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
|
||||
.. note::
|
||||
|
||||
Unsigned types aside from ``uint8`` are currently only have limited support in
|
||||
eager mode (they primarily exist to assist usage with ``torch.compile``).
|
||||
|
||||
The :doc:`ROCm precision support page <rocm:reference/precision-support>`
|
||||
collected the native HW support of different data types.
|
||||
|
||||
torch.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.cuda`` in PyTorch is a module that provides utilities and functions for
|
||||
managing and utilizing AMD and NVIDIA GPUs. It enables GPU-accelerated
|
||||
computations, memory management, and efficient execution of tensor operations,
|
||||
leveraging ROCm and CUDA as the underlying frameworks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Device management
|
||||
- Utilities for managing and interacting with GPUs.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Tensor operations on GPU
|
||||
- Perform tensor operations such as addition and matrix multiplications on
|
||||
the GPU.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Streams and events
|
||||
- Streams allow overlapping computation and communication for optimized
|
||||
performance, events enable synchronization.
|
||||
- 1.6.0
|
||||
- 3.8
|
||||
* - Memory management
|
||||
- Functions to manage and inspect memory usage like
|
||||
``torch.cuda.memory_allocated()``, ``torch.cuda.max_memory_allocated()``,
|
||||
``torch.cuda.memory_reserved()`` and ``torch.cuda.empty_cache()``.
|
||||
- 0.3.0
|
||||
- 1.9.2
|
||||
* - Running process lists of memory management
|
||||
- Return a human-readable printout of the running processes and their GPU
|
||||
memory use for a given device with functions like
|
||||
``torch.cuda.memory_stats()`` and ``torch.cuda.memory_summary()``.
|
||||
- 1.8.0
|
||||
- 4.0
|
||||
* - Communication collectives
|
||||
- A set of APIs that enable efficient communication between multiple GPUs,
|
||||
allowing for distributed computing and data parallelism.
|
||||
- 1.9.0
|
||||
- 5.0
|
||||
* - ``torch.cuda.CUDAGraph``
|
||||
- Graphs capture sequences of GPU operations to minimize kernel launch
|
||||
overhead and improve performance.
|
||||
- 1.10.0
|
||||
- 5.3
|
||||
* - TunableOp
|
||||
- A mechanism that allows certain operations to be more flexible and
|
||||
optimized for performance. It enables automatic tuning of kernel
|
||||
configurations and other settings to achieve the best possible
|
||||
performance based on the specific hardware (GPU) and workload.
|
||||
- 2.0
|
||||
- 5.4
|
||||
* - NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.8.0
|
||||
- ❌
|
||||
* - Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.13.0
|
||||
- ❌
|
||||
* - Jiterator (beta)
|
||||
- Jiterator allows asynchronous data streaming into computation streams
|
||||
during training loops.
|
||||
- 1.13.0
|
||||
- 5.2
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.backends.cuda`` is a PyTorch module that provides configuration options
|
||||
and flags to control the behavior of CUDA or ROCm operations. It is part of the
|
||||
PyTorch backend configuration system, which allows users to fine-tune how
|
||||
PyTorch interacts with the CUDA or ROCm environment.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``cufft_plan_cache``
|
||||
- Manages caching of GPU FFT plans to optimize repeated FFT computations.
|
||||
- 1.7.0
|
||||
- 5.0
|
||||
* - ``matmul.allow_tf32``
|
||||
- Enables or disables the use of TensorFloat-32 (TF32) precision for
|
||||
faster matrix multiplications on GPUs with Tensor Cores.
|
||||
- 1.10.0
|
||||
- ❌
|
||||
* - ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions (e.g., with fp16 accumulation type) are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cudnn
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Supported ``torch`` options:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions on NVIDIA
|
||||
Ampere or newer GPUs.
|
||||
- 1.12.0
|
||||
- ❌
|
||||
* - ``deterministic``
|
||||
- A bool that, if True, causes cuDNN to only use deterministic
|
||||
convolution algorithms.
|
||||
- 1.12.0
|
||||
- 6.0
|
||||
|
||||
Automatic mixed precision: torch.amp
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
PyTorch that automates the process of using both 16-bit (half-precision,
|
||||
float16) and 32-bit (single-precision, float32) floating-point types in model
|
||||
training and inference.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Autocasting
|
||||
- Instances of autocast serve as context managers or decorators that allow
|
||||
regions of your script to run in mixed precision.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - Gradient scaling
|
||||
- To prevent underflow, “gradient scaling” multiplies the network’s
|
||||
loss(es) by a scale factor and invokes a backward pass on the scaled
|
||||
loss(es). Gradients flowing backward through the network are then
|
||||
scaled by the same factor. In other words, gradient values have a
|
||||
larger magnitude, so they don’t flush to zero.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - CUDA op-specific behavior
|
||||
- These ops always go through autocasting whether they are invoked as part
|
||||
of a ``torch.nn.Module``, as a function, or as a ``torch.Tensor`` method. If
|
||||
functions are exposed in multiple namespaces, they go through
|
||||
autocasting regardless of the namespace.
|
||||
- 1.9
|
||||
- 2.5
|
||||
|
||||
Distributed library features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The PyTorch distributed library includes a collective of parallelism modules, a
|
||||
communications layer, and infrastructure for launching and debugging large
|
||||
training jobs. See :ref:`rocm-for-ai-pytorch-distributed` for more information.
|
||||
|
||||
The Distributed Library feature in PyTorch provides tools and APIs for building
|
||||
and running distributed machine learning workflows. It allows training models
|
||||
across multiple processes, GPUs, or nodes in a cluster, enabling efficient use
|
||||
of computational resources and scalability for large-scale tasks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - TensorPipe
|
||||
- TensorPipe is a point-to-point communication library integrated into
|
||||
PyTorch for distributed training. It is designed to handle tensor data
|
||||
transfers efficiently between different processes or devices, including
|
||||
those on separate machines.
|
||||
- 1.8
|
||||
- 5.4
|
||||
* - Gloo
|
||||
- Gloo is designed for multi-machine and multi-GPU setups, enabling
|
||||
efficient communication and synchronization between processes. Gloo is
|
||||
one of the default backends for PyTorch's Distributed Data Parallel
|
||||
(DDP) and RPC frameworks, alongside other backends like NCCL and MPI.
|
||||
- 1.0
|
||||
- 2.0
|
||||
|
||||
torch.compiler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.compiler`` (AOT Autograd)
|
||||
- Autograd captures not only the user-level code, but also backpropagation,
|
||||
which results in capturing the backwards pass “ahead-of-time”. This
|
||||
enables acceleration of both forwards and backwards pass using
|
||||
``TorchInductor``.
|
||||
- 2.0
|
||||
- 5.3
|
||||
* - ``torch.compiler`` (TorchInductor)
|
||||
- The default ``torch.compile`` deep learning compiler that generates fast
|
||||
code for multiple accelerators and backends. You need to use a backend
|
||||
compiler to make speedups through ``torch.compile`` possible. For AMD,
|
||||
NVIDIA, and Intel GPUs, it leverages OpenAI Triton as the key building block.
|
||||
- 2.0
|
||||
- 5.3
|
||||
|
||||
torchaudio
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchaudio <https://pytorch.org/audio/stable/index.html>`_ library provides
|
||||
utilities for processing audio data in PyTorch, such as audio loading,
|
||||
transformations, and feature extraction.
|
||||
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to move audio
|
||||
data (waveform tensor) explicitly to GPU using ``.to('cuda')``.
|
||||
|
||||
The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since torchaudio version
|
||||
- Since ROCm
|
||||
* - ``torchaudio.transforms.Spectrogram``
|
||||
- Generate spectrogram of an input waveform using STFT.
|
||||
- 0.6.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MelSpectrogram``
|
||||
- Generate the mel-scale spectrogram of raw audio signals.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MFCC``
|
||||
- Extract of MFCC features.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.Resample``
|
||||
- Resample a signal from one frequency to another
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
|
||||
torchvision
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchvision <https://pytorch.org/vision/stable/index.html>`_ library
|
||||
provide datasets, model architectures, and common image transformations for
|
||||
computer vision.
|
||||
|
||||
The following ``torchvision`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since torchvision version
|
||||
- Since ROCm
|
||||
* - ``torchvision.transforms.functional``
|
||||
- Provides GPU-compatible transformations for image preprocessing like
|
||||
resize, normalize, rotate and crop.
|
||||
- 0.2.0
|
||||
- 4.0
|
||||
* - ``torchvision.ops``
|
||||
- GPU-accelerated operations for object detection and segmentation tasks.
|
||||
``torchvision.ops.roi_align``, ``torchvision.ops.nms`` and
|
||||
``box_convert``.
|
||||
- 0.6.0
|
||||
- 3.3
|
||||
* - ``torchvision.models`` with ``.to('cuda')``
|
||||
- ``torchvision`` provides several pre-trained models (ResNet, Faster
|
||||
R-CNN, Mask R-CNN, ...) that can run on CUDA for faster inference and
|
||||
training.
|
||||
- 0.1.6
|
||||
- 2.x
|
||||
* - ``torchvision.io``
|
||||
- Video decoding and frame extraction using GPU acceleration with NVIDIA’s
|
||||
NVDEC and nvJPEG (rocJPEG) on CUDA-enabled GPUs.
|
||||
- 0.4.0
|
||||
- 6.3
|
||||
|
||||
torchtext
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtext <https://pytorch.org/text/stable/index.html>`_ library provides
|
||||
utilities for processing and working with text data in PyTorch, including
|
||||
tokenization, vocabulary management, and text embeddings. torchtext supports
|
||||
preprocessing pipelines and integration with PyTorch models, simplifying the
|
||||
implementation of natural language processing (NLP) tasks.
|
||||
|
||||
To leverage GPU acceleration in torchtext, you need to move tensors
|
||||
explicitly to the GPU using ``.to('cuda')``.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchtune
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtune <https://pytorch.org/torchtune/stable/index.html>`_ library for
|
||||
authoring, fine-tuning and experimenting with LLMs.
|
||||
|
||||
* Usage: It works out-of-the-box, enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchserve
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchserve <https://pytorch.org/torchserve/>`_ is a PyTorch domain library
|
||||
for common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchrec
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchrec <https://pytorch.org/torchrec/>`_ is a PyTorch domain library for
|
||||
common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
Unsupported PyTorch features
|
||||
----------------------------
|
||||
|
||||
The following are GPU-accelerated PyTorch features not currently supported by ROCm.
|
||||
|
||||
.. list-table::
|
||||
:widths: 30, 60, 10
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
* - APEX batch norm
|
||||
- Use APEX batch norm instead of PyTorch batch norm.
|
||||
- 1.6.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_tf32``
|
||||
- A bool that controls whether TensorFloat-32 tensor cores may be used in
|
||||
matrix multiplications.
|
||||
- 1.7
|
||||
* - ``torch.cuda`` / NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.7.0
|
||||
* - ``torch.cuda`` / Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.8.0
|
||||
* - ``torch-tensorrt``
|
||||
- Integrate TensorRT library for optimizing and deploying PyTorch models.
|
||||
ROCm does not have equialent library for TensorRT.
|
||||
- 1.9.0
|
||||
* - ``torch.backends`` / ``cudnn.allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions.
|
||||
- 1.10.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions with fp16 accumulation type are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.nn.functional`` / ``scaled_dot_product_attention``
|
||||
- Flash attention backend for SDPA to accelerate attention computation in
|
||||
transformer-based models.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
* - Dynamic parallelism
|
||||
- PyTorch itself does not directly expose dynamic parallelism as a core
|
||||
feature. Dynamic parallelism allow GPU threads to launch additional
|
||||
threads which can be reached using custom operations via the
|
||||
``torch.utils.cpp_extension`` module.
|
||||
- Not a core feature
|
||||
* - Unified memory support in PyTorch
|
||||
- Unified Memory is not directly exposed in PyTorch's core API, it can be
|
||||
utilized effectively through custom CUDA extensions or advanced
|
||||
workflows.
|
||||
- Not a core feature
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/train-a-model>` provides
|
||||
guidance on how to leverage the ROCm platform for training AI models. It covers the steps, tools, and best practices
|
||||
for optimizing training workflows on AMD GPUs using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning and inference of
|
||||
machine learning models, particularly large language models (LLMs), on systems with a single AMD
|
||||
Instinct MI300X accelerator. This page provides a detailed guide for setting up, optimizing, and
|
||||
executing fine-tuning and inference workflows in such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/llm-fine-tuning-optimization/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning models on systems
|
||||
with multi MI300X accelerators.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/tuning-guides/mi300x/workload>` provides detailed
|
||||
guidance on optimizing workloads for the AMD Instinct MI300X accelerator using ROCm. This guide is aimed at helping
|
||||
users achieve optimal performance for deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the use of PyTorch on the ROCm platform and
|
||||
focuses on how to efficiently leverage AMD GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_
|
||||
@@ -1,156 +0,0 @@
|
||||
.. meta::
|
||||
:description: How ROCm uses PCIe atomics
|
||||
:keywords: PCIe, PCIe atomics, atomics, BAR memory, AMD, ROCm
|
||||
|
||||
*****************************************************************************
|
||||
How ROCm uses PCIe atomics
|
||||
*****************************************************************************
|
||||
|
||||
ROCm PCIe feature and overview of BAR memory
|
||||
================================================================
|
||||
|
||||
ROCm is an extension of HSA platform architecture, so it shares the queuing model, memory model,
|
||||
signaling and synchronization protocols. Platform atomics are integral to perform queuing and
|
||||
signaling memory operations where there may be multiple-writers across CPU and GPU agents.
|
||||
|
||||
The full list of HSA system architecture platform requirements are here:
|
||||
`HSA Sys Arch Features <http://hsafoundation.com/wp-content/uploads/2021/02/HSA-SysArch-1.2.pdf>`_.
|
||||
|
||||
AMD ROCm Software uses the new PCI Express 3.0 (Peripheral Component Interconnect Express [PCIe]
|
||||
3.0) features for atomic read-modify-write transactions which extends inter-processor synchronization
|
||||
mechanisms to IO to support the defined set of HSA capabilities needed for queuing and signaling
|
||||
memory operations.
|
||||
|
||||
The new PCIe atomic operations operate as completers for ``CAS`` (Compare and Swap), ``FetchADD``,
|
||||
``SWAP`` atomics. The atomic operations are initiated by the I/O device which support 32-bit, 64-bit and
|
||||
128-bit operand which target address have to be naturally aligned to operation sizes.
|
||||
|
||||
For ROCm the Platform atomics are used in ROCm in the following ways:
|
||||
|
||||
* Update HSA queue's read_dispatch_id: 64 bit atomic add used by the command processor on the
|
||||
GPU agent to update the packet ID it processed.
|
||||
* Update HSA queue's write_dispatch_id: 64 bit atomic add used by the CPU and GPU agent to
|
||||
support multi-writer queue insertions.
|
||||
* Update HSA Signals -- 64bit atomic ops are used for CPU & GPU synchronization.
|
||||
|
||||
The PCIe 3.0 atomic operations feature allows atomic transactions to be requested by, routed through
|
||||
and completed by PCIe components. Routing and completion does not require software support.
|
||||
Component support for each is detectable via the Device Capabilities 2 (DevCap2) register. Upstream
|
||||
bridges need to have atomic operations routing enabled or the atomic operations will fail even though
|
||||
PCIe endpoint and PCIe I/O devices has the capability to atomic operations.
|
||||
|
||||
To do atomic operations routing capability between two or more Root Ports, each associated Root Port
|
||||
must indicate that capability via the atomic operations routing supported bit in the DevCap2 register.
|
||||
|
||||
If your system has a PCIe Express Switch it needs to support atomic operations routing. Atomic
|
||||
operations requests are permitted only if a component's ``DEVCTL2.ATOMICOP_REQUESTER_ENABLE``
|
||||
field is set. These requests can only be serviced if the upstream components support atomic operation
|
||||
completion and/or routing to a component which does. Atomic operations routing support=1, routing
|
||||
is supported; atomic operations routing support=0, routing is not supported.
|
||||
|
||||
An atomic operation is a non-posted transaction supporting 32-bit and 64-bit address formats, there
|
||||
must be a response for Completion containing the result of the operation. Errors associated with the
|
||||
operation (uncorrectable error accessing the target location or carrying out the atomic operation) are
|
||||
signaled to the requester by setting the Completion Status field in the completion descriptor, they are
|
||||
set to to Completer Abort (CA) or Unsupported Request (UR).
|
||||
|
||||
To understand more about how PCIe atomic operations work, see
|
||||
`PCIe atomics <https://pcisig.com/specifications/pciexpress/specifications/ECN_Atomic_Ops_080417.pdf>`_
|
||||
|
||||
`Linux Kernel Patch to pci_enable_atomic_request <https://patchwork.kernel.org/project/linux-pci/patch/1443110390-4080-1-git-send-email-jay@jcornwall.me/>`_
|
||||
|
||||
There are also a number of papers which talk about these new capabilities:
|
||||
|
||||
* `Atomic Read Modify Write Primitives by Intel <https://www.intel.es/content/dam/doc/white-paper/atomic-read-modify-write-primitives-i-o-devices-paper.pdf>`_
|
||||
* `PCI express 3 Accelerator White paper by Intel <https://www.intel.sg/content/dam/doc/white-paper/pci-express3-accelerator-white-paper.pdf>`_
|
||||
* `PCIe Generation 4 Base Specification includes atomic operations <https://astralvx.com/storage/2020/11/PCI_Express_Base_4.0_Rev0.3_February19-2014.pdf>`_
|
||||
* `Xilinx PCIe Ultrascale White paper <https://docs.xilinx.com/v/u/8OZSA2V1b1LLU2rRCDVGQw>`_
|
||||
|
||||
Other I/O devices with PCIe atomics support:
|
||||
|
||||
* Mellanox ConnectX-5 InfiniBand Card
|
||||
* Cray Aries Interconnect
|
||||
* Xilinx 7 Series Devices
|
||||
|
||||
Future bus technology with richer I/O atomics operation Support
|
||||
|
||||
* GenZ
|
||||
|
||||
New PCIe Endpoints with support beyond AMD Ryzen and EPYC CPU; Intel Haswell or newer CPUs
|
||||
with PCIe Generation 3.0 support.
|
||||
|
||||
* Mellanox Bluefield SOC
|
||||
* Cavium Thunder X2
|
||||
|
||||
In ROCm, we also take advantage of PCIe ID based ordering technology for P2P when the GPU
|
||||
originates two writes to two different targets:
|
||||
|
||||
* Write to another GPU memory
|
||||
* Write to system memory to indicate transfer complete
|
||||
|
||||
They are routed off to different ends of the computer but we want to make sure the write to system
|
||||
memory to indicate transfer complete occurs AFTER P2P write to GPU has complete.
|
||||
|
||||
BAR memory overview
|
||||
----------------------------------------------------------------------------------------------------
|
||||
On a Xeon E5 based system in the BIOS we can turn on above 4GB PCIe addressing, if so he need to set
|
||||
memory-mapped input/output (MMIO) base address (MMIOH base) and range (MMIO high size) in the BIOS.
|
||||
|
||||
In the Supermicro system in the system bios you need to see the following
|
||||
|
||||
* Advanced->PCIe/PCI/PnP configuration-\> Above 4G Decoding = Enabled
|
||||
* Advanced->PCIe/PCI/PnP Configuration-\>MMIOH Base = 512G
|
||||
* Advanced->PCIe/PCI/PnP Configuration-\>MMIO High Size = 256G
|
||||
|
||||
When we support Large Bar Capability there is a Large Bar VBIOS which also disable the IO bar.
|
||||
|
||||
For GFX9 and Vega10 which have Physical Address up 44 bit and 48 bit Virtual address.
|
||||
|
||||
* BAR0-1 registers: 64bit, prefetchable, GPU memory. 8GB or 16GB depending on Vega10 SKU. Must
|
||||
be placed < 2^44 to support P2P access from other Vega10.
|
||||
* BAR2-3 registers: 64bit, prefetchable, Doorbell. Must be placed \< 2^44 to support P2P access from
|
||||
other Vega10.
|
||||
* BAR4 register: Optional, not a boot device.
|
||||
* BAR5 register: 32bit, non-prefetchable, MMIO. Must be placed \< 4GB.
|
||||
|
||||
Here is how our base address register (BAR) works on GFX 8 GPUs with 40 bit Physical Address Limit ::
|
||||
|
||||
11:00.0 Display controller: Advanced Micro Devices, Inc. [AMD/ATI] Fiji [Radeon R9 FURY / NANO
|
||||
Series] (rev c1)
|
||||
|
||||
Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device 0b35
|
||||
|
||||
Flags: bus master, fast devsel, latency 0, IRQ 119
|
||||
|
||||
Memory at bf40000000 (64-bit, prefetchable) [size=256M]
|
||||
|
||||
Memory at bf50000000 (64-bit, prefetchable) [size=2M]
|
||||
|
||||
I/O ports at 3000 [size=256]
|
||||
|
||||
Memory at c7400000 (32-bit, non-prefetchable) [size=256K]
|
||||
|
||||
Expansion ROM at c7440000 [disabled] [size=128K]
|
||||
|
||||
Legend:
|
||||
|
||||
1 : GPU Frame Buffer BAR -- In this example it happens to be 256M, but typically this will be size of the
|
||||
GPU memory (typically 4GB+). This BAR has to be placed \< 2^40 to allow peer-to-peer access from
|
||||
other GFX8 AMD GPUs. For GFX9 (Vega GPU) the BAR has to be placed \< 2^44 to allow peer-to-peer
|
||||
access from other GFX9 AMD GPUs.
|
||||
|
||||
2 : Doorbell BAR -- The size of the BAR is typically will be \< 10MB (currently fixed at 2MB) for this
|
||||
generation GPUs. This BAR has to be placed \< 2^40 to allow peer-to-peer access from other current
|
||||
generation AMD GPUs.
|
||||
|
||||
3 : IO BAR -- This is for legacy VGA and boot device support, but since this the GPUs in this project are
|
||||
not VGA devices (headless), this is not a concern even if the SBIOS does not setup.
|
||||
|
||||
4 : MMIO BAR -- This is required for the AMD Driver SW to access the configuration registers. Since the
|
||||
reminder of the BAR available is only 1 DWORD (32bit), this is placed \< 4GB. This is fixed at 256KB.
|
||||
|
||||
5 : Expansion ROM -- This is required for the AMD Driver SW to access the GPU video-bios. This is
|
||||
currently fixed at 128KB.
|
||||
|
||||
For more information, you can review
|
||||
`Overview of Changes to PCI Express 3.0 <https://www.mindshare.com/files/resources/PCIe%203-0.pdf>`_.
|
||||
@@ -1,241 +0,0 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="GPU memory">
|
||||
<meta name="keywords" content="GPU memory, VRAM, video random access memory, pageable
|
||||
memory, pinned memory, managed memory, AMD, ROCm">
|
||||
</head>
|
||||
|
||||
# GPU memory
|
||||
|
||||
For the HIP reference documentation, see:
|
||||
|
||||
* {doc}`hip:doxygen/html/group___memory`
|
||||
* {doc}`hip:doxygen/html/group___memory_m`
|
||||
|
||||
Host memory exists on the host (e.g. CPU) of the machine in random access memory (RAM).
|
||||
|
||||
Device memory exists on the device (e.g. GPU) of the machine in video random access memory (VRAM).
|
||||
Recent architectures use graphics double data rate (GDDR) synchronous dynamic random-access memory (SDRAM)such as GDDR6, or high-bandwidth memory (HBM) such as HBM2e.
|
||||
|
||||
## Memory allocation
|
||||
|
||||
Memory can be allocated in two ways: pageable memory, and pinned memory.
|
||||
The following API calls with result in these allocations:
|
||||
|
||||
| API | Data location | Allocation |
|
||||
|--------------------|---------------|------------|
|
||||
| System allocated | Host | Pageable |
|
||||
| `hipMallocManaged` | Host | Managed |
|
||||
| `hipHostMalloc` | Host | Pinned |
|
||||
| `hipMalloc` | Device | Pinned |
|
||||
|
||||
:::{tip}
|
||||
`hipMalloc` and `hipFree` are blocking calls, however, HIP recently added non-blocking versions `hipMallocAsync` and `hipFreeAsync` which take in a stream as an additional argument.
|
||||
:::
|
||||
|
||||
### Pageable memory
|
||||
|
||||
Pageable memory is usually gotten when calling `malloc` or `new` in a C++ application.
|
||||
It is unique in that it exists on "pages" (blocks of memory), which can be migrated to other memory storage.
|
||||
For example, migrating memory between CPU sockets on a motherboard, or a system that runs out of space in RAM and starts dumping pages of RAM into the swap partition of your hard drive.
|
||||
|
||||
### Pinned memory
|
||||
|
||||
Pinned memory (or page-locked memory, or non-pageable memory) is host memory that is mapped into the address space of all GPUs, meaning that the pointer can be used on both host and device.
|
||||
Accessing host-resident pinned memory in device kernels is generally not recommended for performance, as it can force the data to traverse the host-device interconnect (e.g. PCIe), which is much slower than the on-device bandwidth (>40x on MI200).
|
||||
|
||||
Pinned host memory can be allocated with one of two types of coherence support:
|
||||
|
||||
:::{note}
|
||||
In HIP, pinned memory allocations are coherent by default (`hipHostMallocDefault`).
|
||||
There are additional pinned memory flags (e.g. `hipHostMallocMapped` and `hipHostMallocPortable`).
|
||||
On MI200 these options do not impact performance.
|
||||
<!-- TODO: link to programming_manual#memory-allocation-flags -->
|
||||
For more information, see the section *memory allocation flags* in the HIP Programming Guide: {doc}`hip:how-to/programming_manual`.
|
||||
:::
|
||||
|
||||
Much like how a process can be locked to a CPU core by setting affinity, a pinned memory allocator does this with the memory storage system.
|
||||
On multi-socket systems it is important to ensure that pinned memory is located on the same socket as the owning process, or else each cache line will be moved through the CPU-CPU interconnect, thereby increasing latency and potentially decreasing bandwidth.
|
||||
|
||||
In practice, pinned memory is used to improve transfer times between host and device.
|
||||
For transfer operations, such as `hipMemcpy` or `hipMemcpyAsync`, using pinned memory instead of pageable memory on host can lead to a ~3x improvement in bandwidth.
|
||||
|
||||
:::{tip}
|
||||
If the application needs to move data back and forth between device and host (separate allocations), use pinned memory on the host side.
|
||||
:::
|
||||
|
||||
### Managed memory
|
||||
|
||||
Managed memory refers to universally addressable, or unified memory available on the MI200 series of GPUs.
|
||||
Much like pinned memory, managed memory shares a pointer between host and device and (by default) supports fine-grained coherence, however, managed memory can also automatically migrate pages between host and device.
|
||||
The allocation will be managed by AMD GPU driver using the Linux HMM (Heterogeneous Memory Management) mechanism.
|
||||
|
||||
If heterogenous memory management (HMM) is not available, then `hipMallocManaged` will default back to using system memory and will act like pinned host memory.
|
||||
Other managed memory API calls will have undefined behavior.
|
||||
It is therefore recommended to check for managed memory capability with: `hipDeviceGetAttribute` and `hipDeviceAttributeManagedMemory`.
|
||||
|
||||
HIP supports additional calls that work with page migration:
|
||||
|
||||
* `hipMemAdvise`
|
||||
* `hipMemPrefetchAsync`
|
||||
|
||||
:::{tip}
|
||||
If the application needs to use data on both host and device regularly, does not want to deal with separate allocations, and is not worried about maxing out the VRAM on MI200 GPUs (64 GB per GCD), use managed memory.
|
||||
:::
|
||||
|
||||
:::{tip}
|
||||
If managed memory performance is poor, check to see if managed memory is supported on your system and if page migration (XNACK) is enabled.
|
||||
:::
|
||||
|
||||
## Access behavior
|
||||
|
||||
Memory allocations for GPUs behave as follow:
|
||||
|
||||
| API | Data location | Host access | Device access |
|
||||
|--------------------|---------------|--------------|----------------------|
|
||||
| System allocated | Host | Local access | Unhandled page fault |
|
||||
| `hipMallocManaged` | Host | Local access | Zero-copy |
|
||||
| `hipHostMalloc` | Host | Local access | Zero-copy* |
|
||||
| `hipMalloc` | Device | Zero-copy | Local access |
|
||||
|
||||
Zero-copy accesses happen over the Infinity Fabric interconnect or PCI-E lanes on discrete GPUs.
|
||||
|
||||
:::{note}
|
||||
While `hipHostMalloc` allocated memory is accessible by a device, the host pointer must be converted to a device pointer with `hipHostGetDevicePointer`.
|
||||
|
||||
Memory allocated through standard system allocators such as `malloc`, can be accessed a device by registering the memory via `hipHostRegister`.
|
||||
The device pointer to be used in kernels can be retrieved with `hipHostGetDevicePointer`.
|
||||
Registered memory is treated like `hipHostMalloc` and will have similar performance.
|
||||
|
||||
On devices that support and have [](#xnack) enabled, such as the MI250X, `hipHostRegister` is not required as memory accesses are handled via automatic page migration.
|
||||
:::
|
||||
|
||||
### XNACK
|
||||
|
||||
Normally, host and device memory are separate and data has to be transferred manually via `hipMemcpy`.
|
||||
|
||||
On a subset of GPUs, such as the MI200, there is an option to automatically migrate pages of memory between host and device.
|
||||
This is important for managed memory, where the locality of the data is important for performance.
|
||||
Depending on the system, page migration may be disabled by default in which case managed memory will act like pinned host memory and suffer degraded performance.
|
||||
|
||||
*XNACK* describes the GPUs ability to retry memory accesses that failed due a page fault (which normally would lead to a memory access error), and instead retrieve the missing page.
|
||||
|
||||
This also affects memory allocated by the system as indicated by the following table:
|
||||
|
||||
| API | Data location | Host after device access | Device after host access |
|
||||
|--------------------|---------------|--------------------------|--------------------------|
|
||||
| System allocated | Host | Migrate page to host | Migrate page to device |
|
||||
| `hipMallocManaged` | Host | Migrate page to host | Migrate page to device |
|
||||
| `hipHostMalloc` | Host | Local access | Zero-copy |
|
||||
| `hipMalloc` | Device | Zero-copy | Local access |
|
||||
|
||||
To check if page migration is available on a platform, use `rocminfo`:
|
||||
|
||||
```sh
|
||||
$ rocminfo | grep xnack
|
||||
Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack-
|
||||
```
|
||||
|
||||
Here, `xnack-` means that XNACK is available but is disabled by default.
|
||||
Turning on XNACK by setting the environment variable `HSA_XNACK=1` and gives the expected result, `xnack+`:
|
||||
|
||||
```sh
|
||||
$ HSA_XNACK=1 rocminfo | grep xnack
|
||||
Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack+
|
||||
```
|
||||
|
||||
`hipcc`by default will generate code that runs correctly with both XNACK enabled or disabled.
|
||||
Setting the `--offload-arch=`-option with `xnack+` or `xnack-` forces code to be only run with XNACK enabled or disabled respectively.
|
||||
|
||||
```sh
|
||||
# Compiled kernels will run regardless if XNACK is enabled or is disabled.
|
||||
hipcc --offload-arch=gfx90a
|
||||
|
||||
# Compiled kernels will only be run if XNACK is enabled with XNACK=1.
|
||||
hipcc --offload-arch=gfx90a:xnack+
|
||||
|
||||
# Compiled kernels will only be run if XNACK is disabled with XNACK=0.
|
||||
hipcc --offload-arch=gfx90a:xnack-
|
||||
```
|
||||
|
||||
:::{tip}
|
||||
If you want to make use of page migration, use managed memory. While pageable memory will migrate correctly, it is not a portable solution and can have performance issues if the accessed data isn't page aligned.
|
||||
:::
|
||||
|
||||
### Coherence
|
||||
|
||||
* *Coarse-grained coherence* means that memory is only considered up to date at kernel boundaries, which can be enforced through `hipDeviceSynchronize`, `hipStreamSynchronize`, or any blocking operation that acts on the null stream (e.g. `hipMemcpy`).
|
||||
For example, cacheable memory is a type of coarse-grained memory where an up-to-date copy of the data can be stored elsewhere (e.g. in an L2 cache).
|
||||
* *Fine-grained coherence* means the coherence is supported while a CPU/GPU kernel is running.
|
||||
This can be useful if both host and device are operating on the same dataspace using system-scope atomic operations (e.g. updating an error code or flag to a buffer).
|
||||
Fine-grained memory implies that up-to-date data may be made visible to others regardless of kernel boundaries as discussed above.
|
||||
|
||||
| API | Flag | Coherence |
|
||||
|-------------------------|------------------------------|----------------|
|
||||
| `hipHostMalloc` | `hipHostMallocDefault` | Fine-grained |
|
||||
| `hipHostMalloc` | `hipHostMallocNonCoherent` | Coarse-grained |
|
||||
|
||||
| API | Flag | Coherence |
|
||||
|-------------------------|------------------------------|----------------|
|
||||
| `hipExtMallocWithFlags` | `hipDeviceMallocDefault` | Coarse-grained |
|
||||
| `hipExtMallocWithFlags` | `hipDeviceMallocFinegrained` | Fine-grained |
|
||||
|
||||
| API | `hipMemAdvise` argument | Coherence |
|
||||
|-------------------------|------------------------------|----------------|
|
||||
| `hipMallocManaged` | | Fine-grained |
|
||||
| `hipMallocManaged` | `hipMemAdviseSetCoarseGrain` | Coarse-grained |
|
||||
| `malloc` | | Fine-grained |
|
||||
| `malloc` | `hipMemAdviseSetCoarseGrain` | Coarse-grained |
|
||||
|
||||
:::{tip}
|
||||
Try to design your algorithms to avoid host-device memory coherence (e.g. system scope atomics). While it can be a useful feature in very specific cases, it is not supported on all systems, and can negatively impact performance by introducing the host-device interconnect bottleneck.
|
||||
:::
|
||||
|
||||
The availability of fine- and coarse-grained memory pools can be checked with `rocminfo`:
|
||||
|
||||
```sh
|
||||
$ rocminfo
|
||||
...
|
||||
*******
|
||||
Agent 1
|
||||
*******
|
||||
Name: AMD EPYC 7742 64-Core Processor
|
||||
...
|
||||
Pool Info:
|
||||
Pool 1
|
||||
Segment: GLOBAL; FLAGS: FINE GRAINED
|
||||
...
|
||||
Pool 3
|
||||
Segment: GLOBAL; FLAGS: COARSE GRAINED
|
||||
...
|
||||
*******
|
||||
Agent 9
|
||||
*******
|
||||
Name: gfx90a
|
||||
...
|
||||
Pool Info:
|
||||
Pool 1
|
||||
Segment: GLOBAL; FLAGS: COARSE GRAINED
|
||||
...
|
||||
```
|
||||
|
||||
## System direct memory access
|
||||
|
||||
In most cases, the default behavior for HIP in transferring data from a pinned host allocation to device will run at the limit of the interconnect.
|
||||
However, there are certain cases where the interconnect is not the bottleneck.
|
||||
|
||||
The primary way to transfer data onto and off of a GPU, such as the MI200, is to use the onboard System Direct Memory Access engine, which is used to feed blocks of memory to the off-device interconnect (either GPU-CPU or GPU-GPU).
|
||||
Each GCD has a separate SDMA engine for host-to-device and device-to-host memory transfers.
|
||||
Importantly, SDMA engines are separate from the computing infrastructure, meaning that memory transfers to and from a device will not impact kernel compute performance, though they do impact memory bandwidth to a limited extent.
|
||||
The SDMA engines are mainly tuned for PCIe-4.0 x16, which means they are designed to operate at bandwidths up to 32 GB/s.
|
||||
|
||||
:::{note}
|
||||
An important feature of the MI250X platform is the Infinity Fabric™ interconnect between host and device.
|
||||
The Infinity Fabric interconnect supports improved performance over standard PCIe-4.0 (usually ~50% more bandwidth); however, since the SDMA engine does not run at this speed, it will not max out the bandwidth of the faster interconnect.
|
||||
:::
|
||||
|
||||
The bandwidth limitation can be countered by bypassing the SDMA engine and replacing it with a type of copy kernel known as a "blit" kernel.
|
||||
Blit kernels will use the compute units on the GPU, thereby consuming compute resources, which may not always be beneficial.
|
||||
The easiest way to enable blit kernels is to set an environment variable `HSA_ENABLE_SDMA=0`, which will disable the SDMA engine.
|
||||
On systems where the GPU uses a PCIe interconnect instead of an Infinity Fabric interconnect, blit kernels will not impact bandwidth, but will still consume compute resources.
|
||||
The use of SDMA vs blit kernels also applies to MPI data transfers and GPU-GPU transfers.
|
||||
57
docs/conceptual/pcie-atomics.rst
Normal file
@@ -0,0 +1,57 @@
|
||||
.. meta::
|
||||
:description: How ROCm uses PCIe atomics
|
||||
:keywords: PCIe, PCIe atomics, atomics, Atomic operations, AMD, ROCm
|
||||
|
||||
*****************************************************************************
|
||||
How ROCm uses PCIe atomics
|
||||
*****************************************************************************
|
||||
AMD ROCm is an extension of the Heterogeneous System Architecture (HSA). To meet the requirements of an HSA-compliant system, ROCm supports queuing models, memory models, and signaling and synchronization protocols. ROCm can perform atomic Read-Modify-Write (RMW) transactions that extend inter-processor synchronization mechanisms to Input/Output (I/O) devices starting from Peripheral Component Interconnect Express 3.0 (PCIe™ 3.0). It supports the defined HSA capabilities for queuing and signaling memory operations. To learn more about the requirements of an HSA-compliant system, see the
|
||||
`HSA Platform System Architecture Specification <http://hsafoundation.com/wp-content/uploads/2021/02/HSA-SysArch-1.2.pdf>`_.
|
||||
|
||||
ROCm uses platform atomics to perform memory operations like queuing, signaling, and synchronization across multiple CPU, GPU agents, and I/O devices. Platform atomics ensure that atomic operations run synchronously, without interruptions or conflicts, across multiple shared resources.
|
||||
|
||||
Platform atomics in ROCm
|
||||
==============================
|
||||
Platform atomics enable the set of atomic operations that perform RMW actions across multiple processors, devices, and memory locations so that they run synchronously without interruption. An atomic operation is a sequence of computing instructions run as a single, indivisible unit. These instructions are completed in their entirety without any interruptions. If the instructions can't be completed as a unit without interruption, none of the instructions are run. These operations support 32-bit and 64-bit address formats.
|
||||
|
||||
Some of the operations for which ROCm uses platform atomics are:
|
||||
|
||||
* Update the HSA queue's ``read_dispatch_id``. The command processor on the GPU agent uses a 64-bit atomic add operation. It updates the packet ID it processed.
|
||||
* Update the HSA queue's ``write_dispatch_id``. The CPU and GPU agents use a 64-bit atomic add operation. It supports multi-writer queue insertions.
|
||||
* Update HSA Signals. A 64-bit atomic operation is used for CPU & GPU synchronization.
|
||||
|
||||
|
||||
PCIe for atomic operations
|
||||
----------------------------
|
||||
ROCm requires CPUs that support PCIe atomics. Similarly, all connected I/O devices should also support PCIe atomics for optimum compatibility. PCIe supports the ``CAS`` (Compare and Swap), ``FetchADD``, and ``SWAP`` atomic operations across multiple resources. These atomic operations are initiated by the I/O devices that support 32-bit, 64-bit, and 128-bit operands. Likewise, the target memory address where these atomic operations are performed should also be aligned to the size of the operand. This alignment ensures that the operations are performed efficiently and correctly without failure.
|
||||
|
||||
When an atomic operation is successful, the requester receives a response of completion along with the operation result. However, any errors associated with the operation are signaled to the requester by updating the Completion Status field. Issues accessing the target location or running the atomic operation are common errors. Depending upon the error, the Completion Status field is updated to Completer Abort (CA) or Unsupported Request (UR). The field is present in the Completion Descriptor.
|
||||
|
||||
To learn more about the industry standards and specifications of PCIe, see `PCI-SIG Specification <https://pcisig.com/specifications>`_.
|
||||
|
||||
To learn more about PCIe and its capabilities, consult the following white papers:
|
||||
|
||||
* `Atomic Read Modify Write Primitives by Intel <https://www.intel.es/content/dam/doc/white-paper/atomic-read-modify-write-primitives-i-o-devices-paper.pdf>`_
|
||||
* `PCI Express 3 Accelerator White paper by Intel <https://www.intel.sg/content/dam/doc/white-paper/pci-express3-accelerator-white-paper.pdf>`_
|
||||
* `PCIe Generation 4 Base Specification includes atomic operations <https://astralvx.com/storage/2020/11/PCI_Express_Base_4.0_Rev0.3_February19-2014.pdf>`_
|
||||
* `Xilinx PCIe Ultrascale White paper <https://docs.xilinx.com/v/u/8OZSA2V1b1LLU2rRCDVGQw>`_
|
||||
|
||||
Working with PCIe 3.0 in ROCm
|
||||
-------------------------------
|
||||
Starting with PCIe 3.0, atomic operations can be requested, routed through, and completed by PCIe components. Routing and completion do not require software support. Component support for each can be identified by the Device Capabilities 2 (DevCap2) register. Upstream
|
||||
bridges need to have atomic operations routing enabled. If not enabled, the atomic operations will fail even if the
|
||||
PCIe endpoint and PCIe I/O devices can perform atomic operations.
|
||||
|
||||
If your system uses PCIe switches to connect and enable communication between multiple PCIe components, the switches must also support atomic operations routing.
|
||||
|
||||
To enable atomic operations routing between multiple root ports, each root port must support atomic operation routing. This capability can be identified from the atomic operations routing support bit in the DevCap2 register. If the bit has value of 1, routing is supported. Atomic operation requests are permitted only if a component's ``DEVCTL2.ATOMICOP_REQUESTER_ENABLE``
|
||||
field is set. These requests can only be serviced if the upstream components also support atomic operation completion or if the requests can be routed to a component that supports atomic operation completion.
|
||||
|
||||
ROCm uses the PCIe-ID-based ordering technology for peer-to-peer (P2P) data transmission. PCIe-ID-based ordering technology is used when the GPU initiates multiple write operations to different memory locations.
|
||||
|
||||
For more information on changes implemented in PCIe 3.0, see `Overview of Changes to PCI Express 3.0 <https://www.mindshare.com/files/resources/PCIe%203-0.pdf>`_.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -30,19 +30,20 @@ if os.environ.get("READTHEDOCS", "") == "True":
|
||||
project = "ROCm Documentation"
|
||||
author = "Advanced Micro Devices, Inc."
|
||||
copyright = "Copyright (c) 2024 Advanced Micro Devices, Inc. All rights reserved."
|
||||
version = "6.3.0"
|
||||
release = "6.3.0"
|
||||
version = "6.3.1"
|
||||
release = "6.3.1"
|
||||
setting_all_article_info = True
|
||||
all_article_info_os = ["linux", "windows"]
|
||||
all_article_info_author = ""
|
||||
|
||||
# pages with specific settings
|
||||
article_pages = [
|
||||
{"file": "about/release-notes", "os": ["linux", "windows"], "date": "2024-12-03"},
|
||||
{"file": "about/release-notes", "os": ["linux", "windows"], "date": "2024-12-20"},
|
||||
{"file": "how-to/deep-learning-rocm", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/install", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/train-a-model", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/accelerate-training", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/deploy-your-model", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/hugging-face-models", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-hpc/index", "os": ["linux"]},
|
||||
|
||||
150
docs/contribute/building.md
Normal file
@@ -0,0 +1,150 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="Building ROCm documentation">
|
||||
<meta name="keywords" content="documentation, Visual Studio Code, GitHub, command line,
|
||||
AMD, ROCm">
|
||||
</head>
|
||||
|
||||
# Building documentation
|
||||
|
||||
## GitHub
|
||||
|
||||
If you open a pull request and scroll down to the summary panel,
|
||||
there is a commit status section. Next to the line
|
||||
`docs/readthedocs.com:advanced-micro-devices-demo`, there is a `Details` link.
|
||||
If you click this, it takes you to the Read the Docs build for your pull request.
|
||||
|
||||

|
||||
|
||||
If you don't see this line, click `Show all checks` to get an itemized view.
|
||||
|
||||
## Command line
|
||||
|
||||
You can build our documentation via the command line using Python.
|
||||
|
||||
See the `build.tools.python` setting in the [Read the Docs configuration file](https://github.com/ROCm/ROCm/blob/develop/.readthedocs.yaml) for the Python version used by Read the Docs to build documentation.
|
||||
|
||||
See the [Python requirements file](https://github.com/ROCm/ROCm/blob/develop/docs/sphinx/requirements.txt) for Python packages needed to build the documentation.
|
||||
|
||||
Use the Python Virtual Environment (`venv`) and run the following commands from the project root:
|
||||
|
||||
```sh
|
||||
python3 -mvenv .venv
|
||||
|
||||
.venv/bin/python -m pip install -r docs/sphinx/requirements.txt
|
||||
.venv/bin/python -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html
|
||||
```
|
||||
|
||||
Navigate to `_build/html/index.html` and open this file in a web browser.
|
||||
|
||||
## Visual Studio Code
|
||||
|
||||
With the help of a few extensions, you can create a productive environment to author and test
|
||||
documentation locally using Visual Studio (VS) Code. Follow these steps to configure VS Code:
|
||||
|
||||
1. Install the required extensions:
|
||||
|
||||
* Python: `(ms-python.python)`
|
||||
* Live Server: `(ritwickdey.LiveServer)`
|
||||
|
||||
2. Add the following entries to `.vscode/settings.json`.
|
||||
|
||||
```json
|
||||
{
|
||||
"liveServer.settings.root": "/.vscode/build/html",
|
||||
"liveServer.settings.wait": 1000,
|
||||
"python.terminal.activateEnvInCurrentTerminal": true
|
||||
}
|
||||
```
|
||||
|
||||
* `liveServer.settings.root`: Sets the root of the output website for live previews. Must be changed
|
||||
alongside the `tasks.json` command.
|
||||
* `liveServer.settings.wait`: Tells the live server to wait with the update in order to give Sphinx time to
|
||||
regenerate the site contents and not refresh before the build is complete.
|
||||
* `python.terminal.activateEnvInCurrentTerminal`: Activates the automatic virtual environment, so you
|
||||
can build the site from the integrated terminal.
|
||||
|
||||
3. Add the following tasks to `.vscode/tasks.json`.
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Build Docs",
|
||||
"type": "process",
|
||||
"windows": {
|
||||
"command": "${workspaceFolder}/.venv/Scripts/python.exe"
|
||||
},
|
||||
"command": "${workspaceFolder}/.venv/bin/python3",
|
||||
"args": [
|
||||
"-m",
|
||||
"sphinx",
|
||||
"-j",
|
||||
"auto",
|
||||
"-T",
|
||||
"-b",
|
||||
"html",
|
||||
"-d",
|
||||
"${workspaceFolder}/.vscode/build/doctrees",
|
||||
"-D",
|
||||
"language=en",
|
||||
"${workspaceFolder}/docs",
|
||||
"${workspaceFolder}/.vscode/build/html"
|
||||
],
|
||||
"problemMatcher": [
|
||||
{
|
||||
"owner": "sphinx",
|
||||
"fileLocation": "absolute",
|
||||
"pattern": {
|
||||
"regexp": "^(?:.*\\.{3}\\s+)?(\\/[^:]*|[a-zA-Z]:\\\\[^:]*):(\\d+):\\s+(WARNING|ERROR):\\s+(.*)$",
|
||||
"file": 1,
|
||||
"line": 2,
|
||||
"severity": 3,
|
||||
"message": 4
|
||||
}
|
||||
},
|
||||
{
|
||||
"owner": "sphinx",
|
||||
"fileLocation": "absolute",
|
||||
"pattern": {
|
||||
"regexp": "^(?:.*\\.{3}\\s+)?(\\/[^:]*|[a-zA-Z]:\\\\[^:]*):{1,2}\\s+(WARNING|ERROR):\\s+(.*)$",
|
||||
"file": 1,
|
||||
"severity": 2,
|
||||
"message": 3
|
||||
}
|
||||
}
|
||||
],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
> Implementation detail: two problem matchers were needed to be defined,
|
||||
> because VS Code doesn't tolerate some problem information being potentially
|
||||
> absent. While a single regex could match all types of errors, if a capture
|
||||
> group remains empty (the line number doesn't show up in all warning/error
|
||||
> messages) but the `pattern` references said empty capture group, VS Code
|
||||
> discards the message completely.
|
||||
|
||||
4. Configure the Python virtual environment (`venv`).
|
||||
|
||||
From the Command Palette, run `Python: Create Environment`. Select `venv` environment and
|
||||
`docs/sphinx/requirements.txt`.
|
||||
|
||||
5. Build the docs.
|
||||
|
||||
Launch the default build task using one of the following options:
|
||||
|
||||
* A hotkey (the default is `Ctrl+Shift+B`)
|
||||
* Issuing the `Tasks: Run Build Task` from the Command Palette
|
||||
|
||||
6. Open the live preview.
|
||||
|
||||
Navigate to the site output within VS Code: right-click on `.vscode/build/html/index.html` and
|
||||
select `Open with Live Server`. The contents should update on every rebuild without having to
|
||||
refresh the browser.
|
||||
|
Before Width: | Height: | Size: 13 KiB |
BIN
docs/data/how-to/rocm-for-ai/2-node-training-master.png
Normal file
|
After Width: | Height: | Size: 139 KiB |
BIN
docs/data/how-to/rocm-for-ai/2-node-training-worker.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
docs/data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
|
After Width: | Height: | Size: 35 KiB |
|
After Width: | Height: | Size: 242 KiB |
BIN
docs/data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png
Normal file
|
After Width: | Height: | Size: 155 KiB |
BIN
docs/data/how-to/rocm-for-ai/rccl-tests-8-gpu.png
Normal file
|
After Width: | Height: | Size: 242 KiB |
|
Before Width: | Height: | Size: 173 KiB |
BIN
docs/data/rocm-software-stack-6_3_1.jpg
Normal file
|
After Width: | Height: | Size: 340 KiB |
99
docs/how-to/Bar-Memory.rst
Normal file
@@ -0,0 +1,99 @@
|
||||
.. meta::
|
||||
:description: Learn about BAR configuration in AMD GPUs and ways to troubleshoot physical addressing limit
|
||||
:keywords: BAR memory, MMIO, GPU memory, Physical Addressing Limit, AMD, ROCm
|
||||
|
||||
**************************************
|
||||
Troubleshoot BAR access limitation
|
||||
**************************************
|
||||
Direct Memory Access (DMA) to PCIe devices using Base Address Registers (BARs) can be restricted due to physical addressing limits. These restrictions can result in data access failures between the system components. Peer-to-peer (P2P) DMA is used to access resources such as registers and memory between devices. PCIe devices need memory-mapped input/output (MMIO) space for DMA, and these MMIO spaces are defined in the PCIe BARs.
|
||||
|
||||
These BARs are a set of 32-bit or 64-bit registers that are used to define the resources that PCIe devices provide. The CPU and other system devices also use these to access the resources of the PCIe devices. P2P DMA only works when one device can directly access the local BAR memory of another. If the memory address of a BAR memory exceeds the physical addressing limit of a device, the device will not be able to access that BAR. This could be the device's own BAR or the BAR of another device in the system.
|
||||
|
||||
If the BAR memory exceeds than the physical addressing limit of the device, the device will not be able to access the remote BAR.
|
||||
|
||||
To handle any BAR access issues that might occur, you need to be aware of the physical address limitations of the devices and understand the :ref:`BAR configuration of AMD GPUs <bar-configuration>`. This information is important when setting up additional MMIO apertures for PCIe devices in the system's physical address space.
|
||||
|
||||
Handling physical address limitation
|
||||
=============================================
|
||||
When a system boots, the system BIOS allocates the physical address space for the components in the system, including system memory and MMIO apertures. On modern 64-bit platforms, there are generally two or more MMIO apertures: one located below 4 GB of physical address space for 32-bit compatibility, and one or more above 4 GB for devices needing more space.
|
||||
|
||||
You can control the memory address of the high MMIO aperture from the system BIOS configuration options. This lets you configure the additional MMIO space to align with the physical addressing limit and allows P2P DMA between the devices. For example, if a PCIe device is limited to 44-bit of physical addressing, you should ensure that the MMIO aperture is set below 44-bit in the system physical address space.
|
||||
|
||||
There are two ways to handle this:
|
||||
|
||||
* Ensure that the high MMIO aperture is within the physical addressing limits of the devices in the system. For example, if the devices have a 44-bit physical addressing limit, set the ``MMIO High Base`` and ``MMIO High size`` options in the BIOS such that the aperture is within the 44-bit address range, and ensure that the ``Above 4G Decoding`` option is Enabled.
|
||||
|
||||
* Enable the Input-Output Memory Management Unit (IOMMU). When the IOMMU is enabled in non-passthrough mode, it will create a virtual I/O address space for each device on the system. It also ensures that all virtual addresses created in that space are within the physical addressing limits of the device. For more information on IOMMU, see :doc:`../conceptual/iommu`.
|
||||
|
||||
.. _bar-configuration:
|
||||
|
||||
BAR configuration for AMD GPUs
|
||||
================================================
|
||||
|
||||
The following table shows how the BARs are configured for AMD GPUs.
|
||||
|
||||
|
||||
.. list-table::
|
||||
:widths: 25 25 50
|
||||
:header-rows: 1
|
||||
|
||||
* - BAR Type
|
||||
- Value
|
||||
- Description
|
||||
* - BAR0-1 registers
|
||||
- 64-bit, Prefetchable, GPU memory
|
||||
- 8 GB or 16 GB depending on GPU. Set to less than 2^44 to support P2P access from other GPUs with a 44-bit physical address limit. Prefetchable memory enables faster read operation for high-performance computing (HPC) by fetching the contiguous data from the same data source even before requested as an anticipation of a future request.
|
||||
* - BAR2-3 registers
|
||||
- 64-bit, Prefetchable, Doorbell
|
||||
- Set to less than 2^44 to support P2P access from other GPUs with a 44-bit physical address limit. As a Doorbell BAR, it indicates to the GPU that a new operation is in its queue to be processed.
|
||||
* - BAR4 register
|
||||
- Optional
|
||||
- Not a boot device
|
||||
* - BAR5 register
|
||||
- 32-bit, Non-prefetchable, MMIO
|
||||
- Is set to less than 4 GB.
|
||||
|
||||
Example of BAR usage on AMD GPUs
|
||||
-------------------------------------
|
||||
Following is an example configuration of BARs set by the system BIOS on GFX8 GPUs with the 40-bit physical addressing limit:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
11:00.0 Display controller: Advanced Micro Devices, Inc. [AMD/ATI] Fiji [Radeon R9 FURY / NANO
|
||||
Series] (rev c1)
|
||||
|
||||
Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device 0b35
|
||||
|
||||
Flags: bus master, fast devsel, latency 0, IRQ 119
|
||||
|
||||
Memory at bf40000000 (64-bit, prefetchable) [size=256M]
|
||||
|
||||
Memory at bf50000000 (64-bit, prefetchable) [size=2M]
|
||||
|
||||
I/O ports at 3000 [size=256]
|
||||
|
||||
Memory at c7400000 (32-bit, non-prefetchable) [size=256K]
|
||||
|
||||
Expansion ROM at c7440000 [disabled] [size=128K]
|
||||
|
||||
Details of the BARs configured in the example are:
|
||||
|
||||
**GPU Frame Buffer BAR:** ``Memory at bf40000000 (64-bit, prefetchable) [size=256M]``
|
||||
|
||||
The size of the BAR in the example is 256 MB. Generally, it will be the size of the GPU memory (typically 4 GB+). Depending upon the physical address limit and generation of AMD GPUs, the BAR can be set below 2^40, 2^44, or 2^48.
|
||||
|
||||
**Doorbell BAR:** ``Memory at bf50000000 (64-bit, prefetchable) [size=2M]``
|
||||
|
||||
The size of the BAR should typically be less than 10 MB for this generation of GPUs and has been set to 2 MB in the example. This BAR is placed less than 2^40 to allow peer-to-peer access from other generations of AMD GPUs.
|
||||
|
||||
**I/O BAR:** ``I/O ports at 3000 [size=256]``
|
||||
|
||||
This is for legacy VGA and boot device support. Because the GPUs used are not connected to a display (VGA devices), this is not a concern, even if it isn't set up in the system BIOS.
|
||||
|
||||
**MMIO BAR:** ``Memory at c7400000 (32-bit, non-prefetchable) [size=256K]``
|
||||
|
||||
The AMD Driver requires this to access the configuration registers. Since the reminder of the BAR available is only 1 DWORD (32-bit), this is set less than 4 GB. In the example, it is fixed at 256 KB.
|
||||
|
||||
**Expansion ROM:** ``Expansion ROM at c7440000 [disabled] [size=128K]``
|
||||
|
||||
This is required by the AMD Driver to access the GPU video-BIOS. In the example, it is fixed at 128 KB.
|
||||
@@ -11,20 +11,24 @@ ROCm provides a comprehensive ecosystem for deep learning development, including
|
||||
deep learning frameworks and libraries such as PyTorch, TensorFlow, and JAX. ROCm works closely with these
|
||||
frameworks to ensure that framework-specific optimizations take advantage of AMD accelerator and GPU architectures.
|
||||
|
||||
The following guides cover installation processes for ROCm-aware deep learning frameworks.
|
||||
The following guides provide information on compatibility and supported
|
||||
features for these ROCm-enabled deep learning frameworks.
|
||||
|
||||
* :doc:`PyTorch for ROCm <rocm-install-on-linux:install/3rd-party/pytorch-install>`
|
||||
* :doc:`TensorFlow for ROCm <rocm-install-on-linux:install/3rd-party/tensorflow-install>`
|
||||
* :doc:`JAX for ROCm <rocm-install-on-linux:install/3rd-party/jax-install>`
|
||||
* :doc:`PyTorch compatibility <../compatibility/pytorch-compatibility>`
|
||||
.. * :doc:`TensorFlow compatibility <../compatibility/tensorflow-compatibility>`
|
||||
.. * :doc:`JAX compatibility <../compatibility/jax-compatibility>`
|
||||
|
||||
The following chart steps through typical installation workflows for installing deep learning frameworks for ROCm.
|
||||
This chart steps through typical installation workflows for installing deep learning frameworks for ROCm.
|
||||
|
||||
.. image:: ../data/how-to/framework_install_2024_07_04.png
|
||||
:alt: Flowchart for installing ROCm-aware machine learning frameworks
|
||||
:align: center
|
||||
|
||||
Find information on version compatibility and framework release notes in :doc:`Third-party support matrix
|
||||
<rocm-install-on-linux:reference/3rd-party-support-matrix>`.
|
||||
See the installation instructions to get started.
|
||||
|
||||
* :doc:`PyTorch for ROCm <rocm-install-on-linux:install/3rd-party/pytorch-install>`
|
||||
* :doc:`TensorFlow for ROCm <rocm-install-on-linux:install/3rd-party/tensorflow-install>`
|
||||
* :doc:`JAX for ROCm <rocm-install-on-linux:install/3rd-party/jax-install>`
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -36,3 +40,4 @@ through the following guides.
|
||||
* :doc:`rocm-for-ai/index`
|
||||
|
||||
* :doc:`llm-fine-tuning-optimization/index`
|
||||
|
||||
|
||||
@@ -1,264 +0,0 @@
|
||||
.. meta::
|
||||
:description: GPU-enabled Message Passing Interface
|
||||
:keywords: Message Passing Interface, MPI, AMD, ROCm
|
||||
|
||||
***************************************************************************************************
|
||||
GPU-enabled Message Passing Interface
|
||||
***************************************************************************************************
|
||||
|
||||
The Message Passing Interface (`MPI <https://www.mpi-forum.org>`_) is a standard API for distributed
|
||||
and parallel application development that can scale to multi-node clusters. To facilitate the porting of
|
||||
applications to clusters with GPUs, ROCm enables various technologies. You can use these
|
||||
technologies add GPU pointers to MPI calls and enable ROCm-aware MPI libraries to deliver optimal
|
||||
performance for both intra-node and inter-node GPU-to-GPU communication.
|
||||
|
||||
The AMD kernel driver exposes remote direct memory access (RDMA) through *PeerDirect* interfaces.
|
||||
This allows network interface cards (NICs) to directly read and write to RDMA-capable GPU device
|
||||
memory, resulting in high-speed direct memory access (DMA) transfers between GPU and NIC. These
|
||||
interfaces are used to optimize inter-node MPI message communication.
|
||||
|
||||
The Open MPI project is an open source implementation of the MPI. It's developed and maintained by
|
||||
a consortium of academic, research, and industry partners. To compile Open MPI with ROCm support,
|
||||
refer to the following sections:
|
||||
|
||||
* :ref:`open-mpi-ucx`
|
||||
* :ref:`open-mpi-libfabric`
|
||||
|
||||
.. _open-mpi-ucx:
|
||||
|
||||
ROCm-aware Open MPI on InfiniBand and RoCE networks using UCX
|
||||
================================================================
|
||||
|
||||
The `Unified Communication Framework <https://www.openucx.org/documentation>`_ (UCX), is an
|
||||
open source, cross-platform framework designed to provide a common set of communication
|
||||
interfaces for various network programming models and interfaces. UCX uses ROCm technologies to
|
||||
implement various network operation primitives. UCX is the standard communication library for
|
||||
InfiniBand and RDMA over Converged Ethernet (RoCE) network interconnect. To optimize data
|
||||
transfer operations, many MPI libraries, including Open MPI, can leverage UCX internally.
|
||||
|
||||
UCX and Open MPI have a compile option to enable ROCm support. To install and configure UCX to compile Open MPI for ROCm, use the following instructions.
|
||||
|
||||
1. Set environment variables to install all software components in the same base directory. We use the
|
||||
home directory in our example, but you can specify a different location if you want.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export INSTALL_DIR=$HOME/ompi_for_gpu
|
||||
export BUILD_DIR=/tmp/ompi_for_gpu_build
|
||||
mkdir -p $BUILD_DIR
|
||||
|
||||
2. Install UCX. To view UCX and ROCm version compatibility, refer to the
|
||||
`communication libraries tables <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/3rd-party-support-matrix.html>`_
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export UCX_DIR=$INSTALL_DIR/ucx
|
||||
cd $BUILD_DIR
|
||||
git clone https://github.com/openucx/ucx.git -b v1.15.x
|
||||
cd ucx
|
||||
./autogen.sh
|
||||
mkdir build
|
||||
cd build
|
||||
../configure -prefix=$UCX_DIR \
|
||||
--with-rocm=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make -j $(nproc) install
|
||||
|
||||
3. Install Open MPI.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OMPI_DIR=$INSTALL_DIR/ompi
|
||||
cd $BUILD_DIR
|
||||
git clone --recursive https://github.com/open-mpi/ompi.git \
|
||||
-b v5.0.x
|
||||
cd ompi
|
||||
./autogen.pl
|
||||
mkdir build
|
||||
cd build
|
||||
../configure --prefix=$OMPI_DIR --with-ucx=$UCX_DIR \
|
||||
--with-rocm=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make install
|
||||
|
||||
.. _rocm-enabled-osu:
|
||||
|
||||
ROCm-enabled OSU benchmarks
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
You can use OSU Micro Benchmarks (OMB) to evaluate the performance of various primitives on
|
||||
ROCm-supported AMD GPUs. The ``--enable-rocm`` option exposes this functionality.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OSU_DIR=$INSTALL_DIR/osu
|
||||
cd $BUILD_DIR
|
||||
wget http://mvapich.cse.ohio-state.edu/download/mvapich/osu-micro-benchmarks-7.2.tar.gz
|
||||
tar xfz osu-micro-benchmarks-7.2.tar.gz
|
||||
cd osu-micro-benchmarks-7.2
|
||||
./configure --enable-rocm \
|
||||
--with-rocm=/opt/rocm \
|
||||
CC=$OMPI_DIR/bin/mpicc CXX=$OMPI_DIR/bin/mpicxx \
|
||||
LDFLAGS="-L$OMPI_DIR/lib/ -lmpi -L/opt/rocm/lib/ \
|
||||
$(hipconfig -C) -lamdhip64" CXXFLAGS="-std=c++11"
|
||||
make -j $(nproc)
|
||||
|
||||
Intra-node run
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Before running an Open MPI job, you must set the following environment variables to ensure that
|
||||
you're using the correct versions of Open MPI and UCX.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export LD_LIBRARY_PATH=$OMPI_DIR/lib:$UCX_DIR/lib:/opt/rocm/lib
|
||||
export PATH=$OMPI_DIR/bin:$PATH
|
||||
|
||||
To run the OSU bandwidth benchmark between the first two GPU devices (``GPU 0`` and ``GPU 1``)
|
||||
inside the same node, use the following code.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$OMPI_DIR/bin/mpirun -np 2 \
|
||||
-x UCX_TLS=sm,self,rocm \
|
||||
--mca pml ucx \
|
||||
./c/mpi/pt2pt/standard/osu_bw D D
|
||||
|
||||
This measures the unidirectional bandwidth from the first device (``GPU 0``) to the second device
|
||||
(``GPU 1``). To select specific devices, for example ``GPU 2`` and ``GPU 3``, include the following
|
||||
command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export HIP_VISIBLE_DEVICES=2,3
|
||||
|
||||
To force using a copy kernel instead of a DMA engine for the data transfer, use the following
|
||||
command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export HSA_ENABLE_SDMA=0
|
||||
|
||||
The following output shows the effective transfer bandwidth measured for inter-die data transfer
|
||||
between ``GPU 2`` and ``GPU 3`` on a system with MI250 GPUs. For messages larger than 67 MB, an effective
|
||||
utilization of about 150 GB/sec is achieved:
|
||||
|
||||
.. image:: ../data/how-to/gpu-enabled-mpi-1.png
|
||||
:width: 400
|
||||
:alt: Inter-GPU bandwidth for various payload sizes
|
||||
|
||||
Collective operations
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Collective operations on GPU buffers are best handled through the Unified Collective Communication
|
||||
(UCC) library component in Open MPI. To accomplish this, you must configure and compile the UCC
|
||||
library with ROCm support.
|
||||
|
||||
.. note::
|
||||
|
||||
You can verify UCC and ROCm version compatibility using the
|
||||
`communication libraries tables <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/3rd-party-support-matrix.html>`_
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export UCC_DIR=$INSTALL_DIR/ucc
|
||||
git clone https://github.com/openucx/ucc.git -b v1.2.x
|
||||
cd ucc
|
||||
./autogen.sh
|
||||
./configure --with-rocm=/opt/rocm \
|
||||
--with-ucx=$UCX_DIR \
|
||||
--prefix=$UCC_DIR
|
||||
make -j && make install
|
||||
|
||||
# Configure and compile Open MPI with UCX, UCC, and ROCm support
|
||||
cd ompi
|
||||
./configure --with-rocm=/opt/rocm \
|
||||
--with-ucx=$UCX_DIR \
|
||||
--with-ucc=$UCC_DIR
|
||||
--prefix=$OMPI_DIR
|
||||
|
||||
To use the UCC component with an MPI application, you must set additional parameters:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
mpirun --mca pml ucx --mca osc ucx \
|
||||
--mca coll_ucc_enable 1 \
|
||||
--mca coll_ucc_priority 100 -np 64 ./my_mpi_app
|
||||
|
||||
.. _open-mpi-libfabric:
|
||||
|
||||
ROCm-aware Open MPI using libfabric
|
||||
================================================================
|
||||
|
||||
For network interconnects that are not covered in the previous category, such as HPE Slingshot,
|
||||
ROCm-aware communication can often be achieved through the libfabric library. For more information,
|
||||
refer to the `libfabric documentation <https://github.com/ofiwg/libfabric/wiki>`_.
|
||||
|
||||
.. note::
|
||||
|
||||
When using Open MPI v5.0.x with libfabric support, shared memory communication between
|
||||
processes on the same node goes through the *ob1/sm* component. This component has
|
||||
fundamental support for GPU memory that is, accomplished by using a staging host buffer
|
||||
Consequently, the performance of device-to-device shared memory communication is lower than
|
||||
the theoretical peak performance allowed by the GPU-to-GPU interconnect.
|
||||
|
||||
1. Install libfabric. Note that libfabric is often pre-installed. To determine if it's already installed, run:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
module avail libfabric
|
||||
|
||||
Alternatively, you can download and compile libfabric with ROCm support. Note that not all
|
||||
components required to support some networks (e.g., HPE Slingshot) are available in the open source
|
||||
repository. Therefore, using a pre-installed libfabric library is strongly recommended over compiling
|
||||
libfabric manually.
|
||||
|
||||
If a pre-compiled libfabric library is available on your system, you can skip the following step.
|
||||
|
||||
2. Compile libfabric with ROCm support.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OFI_DIR=$INSTALL_DIR/ofi
|
||||
cd $BUILD_DIR
|
||||
git clone https://github.com/ofiwg/libfabric.git -b v1.19.x
|
||||
cd libfabric
|
||||
./autogen.sh
|
||||
./configure --prefix=$OFI_DIR \
|
||||
--with-rocr=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make install
|
||||
|
||||
Installing Open MPI with libfabric support
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
To build Open MPI with libfabric, use the following code:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OMPI_DIR=$INSTALL_DIR/ompi
|
||||
cd $BUILD_DIR
|
||||
git clone --recursive https://github.com/open-mpi/ompi.git \
|
||||
-b v5.0.x
|
||||
cd ompi
|
||||
./autogen.pl
|
||||
mkdir build
|
||||
cd build
|
||||
../configure --prefix=$OMPI_DIR --with-ofi=$OFI_DIR \
|
||||
--with-rocm=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make install
|
||||
|
||||
ROCm-aware OSU with Open MPI and libfabric
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Compiling a ROCm-aware version of OSU benchmarks with Open MPI and libfabric uses the same
|
||||
process described in :ref:`rocm-enabled-osu`.
|
||||
|
||||
To run an OSU benchmark using multiple nodes, use the following code:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export LD_LIBRARY_PATH=$OMPI_DIR/lib:$OFI_DIR/lib64:/opt/rocm/lib
|
||||
$OMPI_DIR/bin/mpirun --mca pml ob1 --mca btl_ofi_mode 2 -np 2 \
|
||||
./c/mpi/pt2pt/standard/osu_bw D D
|
||||
@@ -399,9 +399,6 @@ Further reading
|
||||
- To learn how to optimize inference on LLMs, see
|
||||
:doc:`Fine-tuning LLMs and inference optimization </how-to/llm-fine-tuning-optimization/index>`.
|
||||
|
||||
- For a list of other ready-made Docker images for ROCm, see the
|
||||
:doc:`Docker image support matrix <rocm-install-on-linux:reference/docker-image-support-matrix>`.
|
||||
|
||||
- To compare with the previous version of the ROCm vLLM Docker image for performance validation, refer to
|
||||
`LLM inference performance validation on AMD Instinct MI300X (ROCm 6.2.0) <https://rocm.docs.amd.com/en/docs-6.2.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_.
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ In this guide, you'll learn about:
|
||||
|
||||
- :doc:`Installing ROCm and machine learning frameworks <install>`
|
||||
|
||||
- :doc:`Scaling model training <scale-model-training>`
|
||||
|
||||
- :doc:`Training a model <train-a-model>`
|
||||
|
||||
- :doc:`Running models from Hugging Face <hugging-face-models>`
|
||||
|
||||
135
docs/how-to/rocm-for-ai/scale-model-training.rst
Normal file
@@ -0,0 +1,135 @@
|
||||
.. meta::
|
||||
:description: How to scale and accelerate model training
|
||||
:keywords: ROCm, AI, LLM, train, fine-tune, deploy, FSDP, DeepSpeed, LLaMA, tutorial
|
||||
|
||||
**********************
|
||||
Scaling model training
|
||||
**********************
|
||||
|
||||
To train a large-scale model like OpenAI GPT-2 or Meta Llama 2 70B, a single accelerator or GPU cannot store all the
|
||||
model parameters required for training. This immense scale presents a fundamental challenge: no single GPU or
|
||||
accelerator can simultaneously store and process the entire model's parameters during training. PyTorch
|
||||
provides an answer to this computational constraint through its distributed training frameworks.
|
||||
|
||||
.. _rocm-for-ai-pytorch-distributed:
|
||||
|
||||
PyTorch distributed
|
||||
===================
|
||||
|
||||
Features in ``torch.distributed`` are categorized into three main components:
|
||||
|
||||
- `Distributed data-parallel training
|
||||
<https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html>`_ (DDP)
|
||||
|
||||
- `RPC-Based distributed training <https://pytorch.org/docs/stable/rpc.html>`_ (RPC)
|
||||
|
||||
- `Collective communication <https://pytorch.org/docs/stable/distributed.html>`_
|
||||
|
||||
In this topic, the focus is on the distributed data-parallelism strategy as it’s the most popular. To get started with DDP,
|
||||
you need to first understand how to coordinate the model and its training data across multiple accelerators or GPUs.
|
||||
|
||||
The DDP workflow on multiple accelerators or GPUs is as follows:
|
||||
|
||||
#. Split the current global training batch into small local batches on each GPU. For instance, if you have 8 GPUs and
|
||||
the global batch is set at 32 samples, each of the 8 GPUs will have a local batch size of 4 samples.
|
||||
|
||||
#. Copy the model to every device so each can process its local batches independently.
|
||||
|
||||
#. Run a forward pass, then a backward pass, and output the gradient of the weights with respect to the loss of the
|
||||
model for that local batch. This happens in parallel on multiple devices.
|
||||
|
||||
#. Synchronize the local gradients computed by each device and combine them to update the model weights. The updated
|
||||
weights are then redistributed to each device.
|
||||
|
||||
In DDP training, each process or worker owns a replica of the model and processes a batch of data, and then the reducer uses
|
||||
``allreduce`` to sum up gradients over different workers.
|
||||
|
||||
See the following developer blogs for more in-depth explanations and examples.
|
||||
|
||||
* `Multi GPU training with DDP — PyTorch Tutorials <https://pytorch.org/tutorials/beginner/ddp_series_multigpu.html>`_
|
||||
|
||||
* `Building a decoder transformer model on AMD GPUs — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/decoder-transformer/README.html#distributed-training-on-multiple-gpus>`_
|
||||
|
||||
.. _rocm-for-ai-pytorch-fsdp:
|
||||
|
||||
PyTorch FSDP
|
||||
------------
|
||||
|
||||
As noted in :ref:`PyTorch distributed <rocm-for-ai-pytorch-distributed>`, DDP model weights and optimizer states
|
||||
are evenly replicated across all workers. Fully Sharded Data Parallel (FSDP) is a type of data parallelism that shards
|
||||
model parameters, optimizer states, and gradients across DDP ranks.
|
||||
|
||||
When training with FSDP, the GPU memory footprint is smaller than when training with DDP across all workers. This makes
|
||||
training some very large models feasible by allowing larger models or batch sizes to fit on-device. However, this
|
||||
comes with the cost of increased communication volume. The communication overhead is reduced by internal optimizations
|
||||
like overlapping communication and computation.
|
||||
|
||||
For a high-level overview of how FSDP works, review `Getting started with Fully Sharded Data Parallel
|
||||
<https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html#how-fsdp-works>`_.
|
||||
|
||||
For detailed training steps, see `PyTorch FSDP examples
|
||||
<https://github.com/pytorch/examples/tree/main/distributed/FSDP>`_.
|
||||
|
||||
.. _rocm-for-ai-deepspeed:
|
||||
|
||||
DeepSpeed
|
||||
---------
|
||||
|
||||
`DeepSpeed <https://deepspeed.ai>`_ offers system innovations that make large-scale deep learning training effective,
|
||||
efficient, and easy to use. Innovations such as ZeRO, 3D-Parallelism, DeepSpeed-MoE, ZeRO-Infinity, and so on fall under
|
||||
the training pillar.
|
||||
|
||||
See `Pre-training a large language model with Megatron-DeepSpeed on multiple AMD GPUs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/megatron-deepspeed-pretrain/README.html>`_ for a detailed example of
|
||||
training with DeepSpeed on an AMD accelerator or GPU.
|
||||
|
||||
.. _rocm-for-ai-automatic-mixed-precision:
|
||||
|
||||
Automatic mixed precision (AMP)
|
||||
-------------------------------
|
||||
|
||||
As models increase in size, so do the time and memory needed to train them; their cost also increases. Any measure we
|
||||
can take to reduce training time and memory usage through `automatic mixed precision
|
||||
<https://pytorch.org/docs/stable/amp.html>`_ (AMP) is highly beneficial for most use cases.
|
||||
|
||||
See `Automatic mixed precision in PyTorch using AMD GPUs — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/automatic-mixed-precision/README.html#automatic-mixed-precision-in-pytorch-using-amd-gpus>`_
|
||||
for more information about running AMP on an AMD accelerator.
|
||||
|
||||
.. _rocm-for-ai-fine-tune:
|
||||
|
||||
Fine-tuning your model
|
||||
======================
|
||||
|
||||
ROCm supports multiple techniques for :ref:`optimizing fine-tuning <fine-tuning-llms-concept-optimizations>`, for
|
||||
example, LoRA, QLoRA, PEFT, and FSDP.
|
||||
|
||||
Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`.
|
||||
|
||||
The following developer blogs showcase examples of fine-tuning a model on an AMD accelerator or GPU.
|
||||
|
||||
* Fine-tuning Llama2 with LoRA
|
||||
|
||||
* `Fine-tune Llama 2 with LoRA: Customizing a large language model for question-answering
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-lora/README.html>`_
|
||||
|
||||
* Fine-tuning Llama2 with QLoRA
|
||||
|
||||
* `Enhancing LLM accessibility: A deep dive into QLoRA through fine-tuning Llama 2 on a single AMD GPU
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-Qlora/README.html>`_
|
||||
|
||||
* Fine-tuning a BERT-based LLM for a text classification task using JAX
|
||||
|
||||
* `LLM distributed supervised fine-tuning with JAX
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
|
||||
|
||||
* Fine-tuning StarCoder using PEFT
|
||||
|
||||
* `Instruction fine-tuning of StarCoder with PEFT on multiple AMD GPUs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/starcoder-fine-tune/README.html>`_
|
||||
|
||||
* Recipes for fine-tuning Llama2 and 3 with ``llama-recipes``
|
||||
|
||||
* `meta-llama/llama-recipes: Scripts for fine-tuning Meta Llama3 with composable FSDP & PEFT methods to cover
|
||||
single/multi-node GPUs <https://github.com/meta-llama/llama-recipes/tree/main/recipes/quickstart/finetuning>`_
|
||||
@@ -1,140 +1,503 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for AI
|
||||
:keywords: ROCm, AI, LLM, train, fine-tune, FSDP, DeepSpeed, LLaMA, tutorial
|
||||
:description: How to train a model using ROCm Megatron-LM
|
||||
:keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch
|
||||
|
||||
****************
|
||||
Training a model
|
||||
****************
|
||||
**************************************
|
||||
Training a model with ROCm Megatron-LM
|
||||
**************************************
|
||||
|
||||
The following is a brief overview of popular component paths per AI development use-case, such as training, LLMs,
|
||||
and inferencing.
|
||||
.. _amd-megatron-lm:
|
||||
|
||||
Accelerating model training
|
||||
===========================
|
||||
The ROCm Megatron-LM framework is a specialized fork of the robust Megatron-LM, designed to
|
||||
enable efficient training of large-scale language models on AMD GPUs. By leveraging AMD Instinct™ MI300X
|
||||
accelerators, AMD Megatron-LM delivers enhanced scalability, performance, and resource utilization for AI
|
||||
workloads. It is purpose-built to :ref:`support models <amd-megatron-lm-model-support>`
|
||||
like Meta's Llama 2, Llama 3, and Llama 3.1, enabling developers to train next-generation AI models with greater
|
||||
efficiency. See the GitHub repository at `<https://github.com/ROCm/Megatron-LM>`__.
|
||||
|
||||
To train a large model like GPT2 or Llama 2 70B, a single accelerator or GPU cannot store all the model parameters
|
||||
required for training. What if you could convert the single-GPU training code to run on multiple accelerators or GPUs?
|
||||
PyTorch offers distributed training solutions to facilitate this.
|
||||
For ease of use, AMD provides a ready-to-use Docker image for MI300X accelerators containing essential
|
||||
components, including PyTorch, PyTorch Lightning, ROCm libraries, and Megatron-LM utilities. It contains the
|
||||
following software to accelerate training workloads:
|
||||
|
||||
.. _rocm-for-ai-pytorch-distributed:
|
||||
+--------------------------+--------------------------------+
|
||||
| Software component | Version |
|
||||
+==========================+================================+
|
||||
| ROCm | 6.1 |
|
||||
+--------------------------+--------------------------------+
|
||||
| PyTorch | 2.4.0 |
|
||||
+--------------------------+--------------------------------+
|
||||
| PyTorch Lightning | 2.4.0 |
|
||||
+--------------------------+--------------------------------+
|
||||
| Megatron Core | 0.9.0 |
|
||||
+--------------------------+--------------------------------+
|
||||
| Transformer Engine | 1.5.0 |
|
||||
+--------------------------+--------------------------------+
|
||||
| Flash Attention | v2.6 |
|
||||
+--------------------------+--------------------------------+
|
||||
| Transformers | 4.44.0 |
|
||||
+--------------------------+--------------------------------+
|
||||
|
||||
PyTorch distributed
|
||||
-------------------
|
||||
Supported features and models
|
||||
=============================
|
||||
|
||||
As of PyTorch 1.6.0, features in ``torch.distributed`` are categorized into three main components:
|
||||
Megatron-LM provides the following key features to train large language models efficiently:
|
||||
|
||||
- `Distributed data-parallel training
|
||||
<https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html>`_ (DDP)
|
||||
- Transformer Engine (TE)
|
||||
|
||||
- `RPC-Based distributed training <https://pytorch.org/docs/stable/rpc.html>`_ (RPC)
|
||||
- APEX
|
||||
|
||||
- `Collective communication <https://pytorch.org/docs/stable/distributed.html>`_
|
||||
- GEMM tuning
|
||||
|
||||
In this guide, the focus is on the distributed data-parallelism strategy as it’s the most popular. To get started with DDP,
|
||||
let’s first understand how to coordinate the model and its training data across multiple accelerators or GPUs.
|
||||
- Torch.compile
|
||||
|
||||
The DDP workflow on multiple accelerators or GPUs is as follows:
|
||||
- 3D parallelism: TP + SP + CP
|
||||
|
||||
#. Split the current global training batch into small local batches on each GPU. For instance, if you have 8 GPUs and
|
||||
the global batch is set at 32 samples, each of the 8 GPUs will have a local batch size of 4 samples.
|
||||
- Distributed optimizer
|
||||
|
||||
#. Copy the model to every device so each device can process its local batches independently.
|
||||
- Flash Attention (FA) 2
|
||||
|
||||
#. Run a forward pass, then a backward pass, and output the gradient of the weights with respect to the loss of the
|
||||
model for that local batch. This happens in parallel on multiple devices.
|
||||
- Fused kernels
|
||||
|
||||
#. Synchronize the local gradients computed by each device and combine them to update the model weights. The updated
|
||||
weights are then redistributed to each device.
|
||||
- Pre-training
|
||||
|
||||
In DDP training, each process or worker owns a replica of the model and processes a batch of data, then the reducer uses
|
||||
``allreduce`` to sum up gradients over different workers.
|
||||
.. _amd-megatron-lm-model-support:
|
||||
|
||||
See the following developer blogs for more in-depth explanations and examples.
|
||||
The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator.
|
||||
|
||||
* `Multi GPU training with DDP — PyTorch Tutorials <https://pytorch.org/tutorials/beginner/ddp_series_multigpu.html>`_
|
||||
* Llama 2 7B
|
||||
|
||||
* `Building a decoder transformer model on AMD GPUs — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/decoder-transformer/README.html#distributed-training-on-multiple-gpus>`_
|
||||
* Llama 2 70B
|
||||
|
||||
.. _rocm-for-ai-pytorch-fsdp:
|
||||
* Llama 3 8B
|
||||
|
||||
PyTorch FSDP
|
||||
------------
|
||||
* Llama 3 70B
|
||||
|
||||
As noted in :ref:`PyTorch distributed <rocm-for-ai-pytorch-distributed>`, in DDP model weights and optimizer states
|
||||
are evenly replicated across all workers. Fully Sharded Data Parallel (FSDP) is a type of data parallelism that shards
|
||||
model parameters, optimizer states, and gradients across DDP ranks.
|
||||
* Llama 3.1 8B
|
||||
|
||||
When training with FSDP, the GPU memory footprint is smaller than when training with DDP across all workers. This makes
|
||||
the training of some very large models feasible by allowing larger models or batch sizes to fit on-device. However, this
|
||||
comes with the cost of increased communication volume. The communication overhead is reduced by internal optimizations
|
||||
like overlapping communication and computation.
|
||||
* Llama 3.1 70B
|
||||
|
||||
For a high-level overview of how FSDP works, review `Getting started with Fully Sharded Data Parallel
|
||||
<https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html#how-fsdp-works>`_.
|
||||
Prerequisite system validation steps
|
||||
====================================
|
||||
|
||||
For detailed training steps, refer to the `PyTorch FSDP examples
|
||||
<https://github.com/pytorch/examples/tree/main/distributed/FSDP>`_.
|
||||
Complete the following system validation and optimization steps to set up your system before starting training.
|
||||
|
||||
.. _rocm-for-ai-deepspeed:
|
||||
Disable NUMA auto-balancing
|
||||
---------------------------
|
||||
|
||||
DeepSpeed
|
||||
---------
|
||||
Generally, application performance can benefit from disabling NUMA auto-balancing. However,
|
||||
it might be detrimental to performance with certain types of workloads.
|
||||
|
||||
`DeepSpeed <https://deepspeed.ai>`_ offers system innovations that make large-scale deep learning training effective,
|
||||
efficient, and easy to use. Innovations such as ZeRO, 3D-Parallelism, DeepSpeed-MoE, ZeRO-Infinity, and so on fall under
|
||||
the training pillar.
|
||||
Run the command ``cat /proc/sys/kernel/numa_balancing`` to check your current NUMA (Non-Uniform
|
||||
Memory Access) settings. Output ``0`` indicates this setting is disabled. If there is no output or
|
||||
the output is ``1``, run the following command to disable NUMA auto-balancing.
|
||||
|
||||
See `Pre-training a large language model with Megatron-DeepSpeed on multiple AMD GPUs — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/megatron-deepspeed-pretrain/README.html>`_ for a detailed example of
|
||||
training with DeepSpeed on an AMD accelerator or GPU.
|
||||
.. code-block:: shell
|
||||
|
||||
.. _rocm-for-ai-automatic-mixed-precision:
|
||||
sudo sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
|
||||
|
||||
Automatic mixed precision (AMP)
|
||||
See :ref:`mi300x-disable-numa` for more information.
|
||||
|
||||
Hardware verification with ROCm
|
||||
-------------------------------
|
||||
|
||||
As models increase in size, the time and memory needed to train them; that is, their cost also increases. Any measure we
|
||||
can take to reduce training time and memory usage through `automatic mixed precision
|
||||
<https://pytorch.org/docs/stable/amp.html>`_ (AMP) is highly beneficial for most use cases.
|
||||
Use the command ``rocm-smi --setperfdeterminism 1900`` to set the max clock speed up to 1900 MHz
|
||||
instead of the default 2100 MHz. This can reduce the chance of a PCC event lowering the attainable
|
||||
GPU clocks. This setting will not be required for new IFWI releases with the production PRC feature.
|
||||
You can restore this setting to its default value with the ``rocm-smi -r`` command.
|
||||
|
||||
See `Automatic mixed precision in PyTorch using AMD GPUs — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/automatic-mixed-precision/README.html#automatic-mixed-precision-in-pytorch-using-amd-gpus>`_
|
||||
for more information about running AMP on an AMD accelerator.
|
||||
Run the command:
|
||||
|
||||
.. _rocm-for-ai-fine-tune:
|
||||
.. code-block:: shell
|
||||
|
||||
Fine-tuning your model
|
||||
======================
|
||||
rocm-smi --setperfdeterminism 1900
|
||||
|
||||
ROCm supports multiple techniques for :ref:`optimizing fine-tuning <fine-tuning-llms-concept-optimizations>`, for
|
||||
example, LoRA, QLoRA, PEFT, and FSDP.
|
||||
See :ref:`mi300x-hardware-verification-with-rocm` for more information.
|
||||
|
||||
Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`.
|
||||
RCCL Bandwidth Test
|
||||
-------------------
|
||||
|
||||
The following developer blogs showcase examples of how to fine-tune a model on an AMD accelerator or GPU.
|
||||
ROCm Collective Communications Library (RCCL) is a standalone library of standard collective communication
|
||||
routines for GPUs. See the :doc:`RCCL documentation <rccl:index>` for more information. Before starting
|
||||
pre-training, running a RCCL bandwidth test helps ensure that the multi-GPU or multi-node setup is optimized
|
||||
for efficient distributed training.
|
||||
|
||||
* Fine-tuning Llama2 with LoRA
|
||||
Running the RCCL bandwidth test helps verify that:
|
||||
|
||||
* `Fine-tune Llama 2 with LoRA: Customizing a large language model for question-answering — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-lora/README.html>`_
|
||||
- The GPUs can communicate across nodes or within a single node.
|
||||
|
||||
* Fine-tuning Llama2 with QLoRA
|
||||
- The interconnect (such as InfiniBand, Ethernet, or Infinite fabric) is functioning as expected and
|
||||
provides adequate bandwidth for communication.
|
||||
|
||||
* `Enhancing LLM accessibility: A deep dive into QLoRA through fine-tuning Llama 2 on a single AMD GPU — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/llama2-Qlora/README.html>`_
|
||||
- No hardware setup or cabling issues could affect the communication between GPUs
|
||||
|
||||
* Fine-tuning a BERT-based LLM for a text classification task using JAX
|
||||
Tuning and optimizing hyperparameters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* `LLM distributed supervised fine-tuning with JAX — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
|
||||
In distributed training, specific hyperparameters related to distributed communication can be tuned based on
|
||||
the results of the RCCL bandwidth test. These variables are already set in the Docker image:
|
||||
|
||||
* Fine-tuning StarCoder using PEFT
|
||||
.. code-block:: shell
|
||||
|
||||
* `Instruction fine-tuning of StarCoder with PEFT on multiple AMD GPUs — ROCm Blogs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/starcoder-fine-tune/README.html>`_
|
||||
# force all RCCL streams to be high priority
|
||||
export TORCH_NCCL_HIGH_PRIORITY=1
|
||||
|
||||
* Recipes for fine-tuning Llama2 and 3 with ``llama-recipes``
|
||||
# specify which RDMA interfaces to use for communication
|
||||
export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7
|
||||
|
||||
# define the Global ID index used in RoCE mode
|
||||
export NCCL_IB_GID_INDEX=3
|
||||
|
||||
# avoid data corruption/mismatch issue that existed in past releases
|
||||
export RCCL_MSCCL_ENABLE=0
|
||||
|
||||
Running the RCCL Bandwidth Test
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It's recommended you run the RCCL bandwidth test before launching training. It ensures system
|
||||
performance is sufficient to launch training. RCCL is not included in the AMD Megatron-LM Docker
|
||||
image; follow the instructions in `<https://github.com/ROCm/rccl-tests>`__ to get started.
|
||||
See :ref:`mi300x-rccl` for more information.
|
||||
|
||||
Run on 8 GPUs (``-g 8``), scanning from 8 bytes to 10 GB:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
./build/all_reduce_perf -b 8 -e 10G -f 2 -g 8
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-8-gpu.png
|
||||
:width: 800
|
||||
|
||||
Using one MPI process per GPU and ``-g 1`` for performance-oriented runs on both single-node and multi-node is
|
||||
recommended. So, a run on 8 GPUs looks something like:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
mpirun -np 8 --bind-to numa ./build/all_reduce_perf -b 8 -e 10G -f 2 -g 1
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png
|
||||
:width: 800
|
||||
|
||||
Running with one MPI process per GPU ensures a one-to-one mapping for CPUs and GPUs, which can be beneficial
|
||||
for smaller message sizes. This better represents the real-world use of RCCL in deep learning frameworks like
|
||||
PyTorch and TensorFlow.
|
||||
|
||||
Use the following script to run the RCCL test for four MI300X GPU nodes. Modify paths and node addresses as needed.
|
||||
|
||||
.. code-block::
|
||||
|
||||
/home/$USER/ompi_for_gpu/ompi/bin/mpirun -np 32 -H tw022:8,tw024:8,tw010:8, tw015:8 \
|
||||
--mca pml ucx \
|
||||
--mca btl ^openib \
|
||||
-x NCCL_SOCKET_IFNAME=ens50f0np0 \
|
||||
-x NCCL_IB_HCA=rdma0:1,rdma1:1,rdma2:1,rdma3:1,rdma4:1,rdma5:1,rdma6:1,rdma7:1 \
|
||||
-x NCCL_IB_GID_INDEX=3 \
|
||||
-x NCCL_MIN_NCHANNELS=40 \
|
||||
-x NCCL_DEBUG=version \
|
||||
$HOME/rccl-tests/build/all_reduce_perf -b 8 -e 8g -f 2 -g 1
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png
|
||||
:width: 800
|
||||
|
||||
.. _mi300x-amd-megatron-lm-training:
|
||||
|
||||
Start training on MI300X accelerators
|
||||
=====================================
|
||||
|
||||
The pre-built ROCm Megatron-LM environment allows users to quickly validate system performance, conduct
|
||||
training benchmarks, and achieve superior performance for models like Llama 2 and Llama 3.1.
|
||||
|
||||
Use the following instructions to set up the environment, configure the script to train models, and
|
||||
reproduce the benchmark results on the MI300X accelerators with the AMD Megatron-LM Docker
|
||||
image.
|
||||
|
||||
.. _amd-megatron-lm-requirements:
|
||||
|
||||
Download the Docker image and required packages
|
||||
-----------------------------------------------
|
||||
|
||||
1. Use the following command to pull the Docker image from Docker Hub.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker pull rocm/megatron-lm:24.12-dev
|
||||
|
||||
2. Launch the Docker container.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $CACHE_DIR:/root/.cache --name megatron-dev-env rocm/megatron-lm:24.12-dev /bin/bash
|
||||
|
||||
3. Clone the ROCm Megatron-LM repository to a local directory and install the required packages on the host machine.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://github.com/ROCm/Megatron-LM
|
||||
cd Megatron-LM
|
||||
|
||||
.. note::
|
||||
|
||||
This release is validated with ``ROCm/Megatron-LM`` commit `bb93ccb <https://github.com/ROCm/Megatron-LM/tree/bb93ccbfeae6363c67b361a97a27c74ab86e7e92>`_.
|
||||
Checking out this specific commit is recommended for a stable and reproducible environment.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git checkout bb93ccbfeae6363c67b361a97a27c74ab86e7e92
|
||||
|
||||
Prepare training datasets
|
||||
-------------------------
|
||||
|
||||
If you already have the preprocessed data, you can skip this section.
|
||||
|
||||
Use the following command to process datasets. We use GPT data as an example. You may change the merge table, use an
|
||||
end-of-document token, remove sentence splitting, and use the tokenizer type.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
python tools/preprocess_data.py \
|
||||
--input my-corpus.json \
|
||||
--output-prefix my-gpt2 \
|
||||
--vocab-file gpt2-vocab.json \
|
||||
--tokenizer-type GPT2BPETokenizer \
|
||||
--merge-file gpt2-merges.txt \
|
||||
--append-eod
|
||||
|
||||
In this case, the automatically generated output files are named ``my-gpt2_text_document.bin`` and
|
||||
``my-gpt2_text_document.idx``.
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png
|
||||
:width: 800
|
||||
|
||||
.. _amd-megatron-lm-environment-setup:
|
||||
|
||||
Environment setup
|
||||
-----------------
|
||||
|
||||
In the ``examples/llama`` directory of Megatron-LM, if you're working with Llama 2 7B or Llama 2 70 B, use the
|
||||
``train_llama2.sh`` configuration script. Likewise, if you're working with Llama 3 or Llama 3.1, then use
|
||||
``train_llama3.sh`` and update the configuration script accordingly.
|
||||
|
||||
Network interface
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
To avoid connectivity issues, ensure the correct network interface is set in your training scripts.
|
||||
|
||||
1. Run the following command to find the active network interface on your system.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
ip a
|
||||
|
||||
2. Update the ``NCCL_SOCKET_IFNAME`` and ``GLOO_SOCKET_IFNAME`` variables with your system’s network interface. For
|
||||
example:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export NCCL_SOCKET_IFNAME=ens50f0np0
|
||||
|
||||
export GLOO_SOCKET_IFNAME=ens50f0np0
|
||||
|
||||
Dataset options
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
You can use either mock data or real data for training.
|
||||
|
||||
* If you're using a real dataset, update the ``DATA_PATH`` variable to point to the location of your dataset.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
DATA_DIR="/root/.cache/data" # Change to where your dataset is stored
|
||||
|
||||
DATA_PATH=${DATA_DIR}/bookcorpus_text_sentence
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
--data-path $DATA_PATH
|
||||
|
||||
Ensure that the files are accessible inside the Docker container.
|
||||
|
||||
* Mock data can be useful for testing and validation. If you're using mock data, replace ``--data-path $DATA_PATH`` with the ``--mock-data`` option.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
--mock-data
|
||||
|
||||
Tokenizer
|
||||
^^^^^^^^^
|
||||
|
||||
Tokenization is the process of converting raw text into tokens that can be processed by the model. For Llama
|
||||
models, this typically involves sub-word tokenization, where words are broken down into smaller units based on
|
||||
a fixed vocabulary. The tokenizer is trained along with the model on a large corpus of text, and it learns a
|
||||
fixed vocabulary that can represent a wide range of text from different domains. This allows Llama models to
|
||||
handle a variety of input sequences, including unseen words or domain-specific terms.
|
||||
|
||||
To train any of the Llama 2 models that this Docker image supports, use the ``Llama2Tokenizer``.
|
||||
|
||||
To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``.
|
||||
Set the Hugging Face model link in the ``TOKENIZER_MODEL`` variable.
|
||||
|
||||
For example, if you're using the Llama 3.1 8B model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
TOKENIZER_MODEL=meta-llama/Llama-3.1-8B
|
||||
|
||||
Run benchmark tests
|
||||
-------------------
|
||||
|
||||
.. note::
|
||||
|
||||
If you're running **multi node training**, update the following environment variables. They can
|
||||
also be passed as command line arguments.
|
||||
|
||||
* Change ``localhost`` to the master node's hostname:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
MASTER_ADDR="${MASTER_ADDR:-localhost}"
|
||||
|
||||
* Set the number of nodes you want to train on (for instance, ``2``, ``4``, ``8``):
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
NNODES="${NNODES:-1}"
|
||||
|
||||
* Set the rank of each node (0 for master, 1 for the first worker node, and so on):
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
NODE_RANK="${NODE_RANK:-0}"
|
||||
|
||||
* Use this command to run a performance benchmark test of any of the Llama 2 models that this Docker image supports (see :ref:`variables <amd-megatron-lm-benchmark-test-vars>`).
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
{variables} bash examples/llama/train_llama2.sh
|
||||
|
||||
* Use this command to run a performance benchmark test of any of the Llama 3 and Llama 3.1 models that this Docker image supports (see :ref:`variables <amd-megatron-lm-benchmark-test-vars>`).
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
{variables} bash examples/llama/train_llama3.sh
|
||||
|
||||
.. _amd-megatron-lm-benchmark-test-vars:
|
||||
|
||||
The benchmark tests support the same set of variables:
|
||||
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| Name | Options | Description |
|
||||
+==========================+=======================+=======================+
|
||||
| ``TEE_OUTPUT`` | 0 or 1 | 0: disable training |
|
||||
| | | log |
|
||||
| | | |
|
||||
| | | 1: enable training |
|
||||
| | | log |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``MBS`` | | Micro batch size |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``BS`` | | Batch size |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``TP`` | 1, 2, 4, 8 | Tensor parallel |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``TE_FP8`` | 0 or 1 | Datatype. |
|
||||
| | | If it is set to 1, |
|
||||
| | | FP8. |
|
||||
| | | |
|
||||
| | | If it is set to 0. |
|
||||
| | | BP16 |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``NO_TORCH_COMPILE`` | 0 or 1 | If it is set to 1, |
|
||||
| | | enable torch.compile. |
|
||||
| | | |
|
||||
| | | If it is set to 0. |
|
||||
| | | Disable torch.compile |
|
||||
| | | (default) |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``SEQ_LENGTH`` | | Input sequence length |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``GEMM_TUNING`` | 0 or 1 | If it is set to 1, |
|
||||
| | | enable gemm tuning. |
|
||||
| | | |
|
||||
| | | If it is set to 0, |
|
||||
| | | disable gemm tuning |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``USE_FLASH_ATTN`` | 0 or 1 | 0: disable flash |
|
||||
| | | attention |
|
||||
| | | |
|
||||
| | | 1: enable flash |
|
||||
| | | attention |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``ENABLE_PROFILING`` | 0 or 1 | 0: disable torch |
|
||||
| | | profiling |
|
||||
| | | |
|
||||
| | | 1: enable torch |
|
||||
| | | profiling |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``MODEL_SIZE`` | | The size of the mode: |
|
||||
| | | 7B/70B, etc. |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``TOTAL_ITERS`` | | Total number of |
|
||||
| | | iterations |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
| ``transformer-impl`` | transformer_engine or | Enable transformer |
|
||||
| | local | engine by default |
|
||||
+--------------------------+-----------------------+-----------------------+
|
||||
|
||||
Benchmarking examples
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Single node training
|
||||
:sync: single
|
||||
|
||||
Use this command to run training with Llama 2 7B model on a single node. You can specify MBS, BS, FP,
|
||||
datatype, and so on.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
TEE_OUTPUT=1 MBS=5 BS=120 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
|
||||
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
|
||||
|
||||
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
|
||||
|
||||
See the sample output:
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png
|
||||
:width: 800
|
||||
|
||||
.. tab-item:: Multi node training
|
||||
:sync: multi
|
||||
|
||||
Launch the Docker container on each node.
|
||||
|
||||
In this example, run training with Llama 2 7B model on 2 nodes with specific MBS, BS, FP, datatype, and
|
||||
so on.
|
||||
|
||||
On the master node:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
|
||||
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
|
||||
|
||||
On the worker node:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
|
||||
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
|
||||
|
||||
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
|
||||
|
||||
Sample output for 2-node training:
|
||||
|
||||
Master node:
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/2-node-training-master.png
|
||||
:width: 800
|
||||
|
||||
Worker node:
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/2-node-training-worker.png
|
||||
:width: 800
|
||||
|
||||
* `meta-llama/llama-recipes: Scripts for fine-tuning Meta Llama3 with composable FSDP & PEFT methods to cover
|
||||
single/multi-node GPUs <https://github.com/meta-llama/llama-recipes/tree/main/recipes/quickstart/finetuning>`_
|
||||
|
||||
@@ -537,6 +537,8 @@ installation was successful, refer to the
|
||||
:doc:`rocm-install-on-linux:install/post-install`.
|
||||
Should verification fail, consult :doc:`/how-to/system-debugging`.
|
||||
|
||||
.. _mi300x-hardware-verification-with-rocm:
|
||||
|
||||
Hardware verification with ROCm
|
||||
-------------------------------
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ involves configuring tensor parallelism, leveraging advanced features, and
|
||||
ensuring efficient execution. Here’s how to optimize vLLM performance:
|
||||
|
||||
* Tensor parallelism: Configure the
|
||||
:ref:`tensor-parallel-size parameter <mi300x-vllm-optimize-tp-gemm>` to distribute
|
||||
:ref:`tensor-parallel-size parameter <mi300x-vllm-multiple-gpus>` to distribute
|
||||
tensor computations across multiple GPUs. Adjust parameters such as
|
||||
``batch-size``, ``input-len``, and ``output-len`` based on your workload.
|
||||
|
||||
@@ -2062,11 +2062,10 @@ collectives.
|
||||
Multi-node FSDP and RCCL settings
|
||||
---------------------------------
|
||||
|
||||
It's recommended to use high-priority HIP streams with RCCL.
|
||||
|
||||
The simplest way to enable this is by using the nightly PyTorch wheels, as the required changes from
|
||||
`PR #122830 <https://github.com/pytorch/pytorch/pull/122830>`_ were not included in the PyTorch 2.3
|
||||
release but are available in the nightly builds.
|
||||
When using PyTorch's FSDP (Full Sharded Data Parallel) feature, the HIP
|
||||
streams used by RCCL and HIP streams used for compute kernels do not
|
||||
always overlap well. As a workaround, it's recommended to use
|
||||
high-priority HIP streams with RCCL.
|
||||
|
||||
To configure high-priority streams:
|
||||
|
||||
|
||||
@@ -37,17 +37,16 @@ ROCm documentation is organized into the following categories:
|
||||
:::{grid-item-card} How to
|
||||
:class-body: rocm-card-banner rocm-hue-12
|
||||
|
||||
* [Programming guide](./how-to/hip_programming_guide.rst)
|
||||
* [Use ROCm for AI](./how-to/rocm-for-ai/index.rst)
|
||||
* [Use ROCm for HPC](./how-to/rocm-for-hpc/index.rst)
|
||||
* [Fine-tune LLMs and inference optimization](./how-to/llm-fine-tuning-optimization/index.rst)
|
||||
* [System optimization](./how-to/system-optimization/index.rst)
|
||||
* [AMD Instinct MI300X performance validation and tuning](./how-to/tuning-guides/mi300x/index.rst)
|
||||
* [GPU cluster networking](https://rocm.docs.amd.com/projects/gpu-cluster-networking/en/latest/index.html)
|
||||
* [System debugging](./how-to/system-debugging.md)
|
||||
* [Use MPI](./how-to/gpu-enabled-mpi.rst)
|
||||
* [Use advanced compiler features](./conceptual/compiler-topics.md)
|
||||
* [Set the number of CUs](./how-to/setting-cus)
|
||||
* [Set the number of CUs](./how-to/setting-cus)
|
||||
* [Troubleshoot BAR access limitation](./how-to/Bar-Memory.rst)
|
||||
* [ROCm examples](https://github.com/amd/rocm-examples)
|
||||
:::
|
||||
|
||||
@@ -55,12 +54,11 @@ ROCm documentation is organized into the following categories:
|
||||
:class-body: rocm-card-banner rocm-hue-8
|
||||
|
||||
* [GPU architecture overview](./conceptual/gpu-arch.md)
|
||||
* [GPU memory](./conceptual/gpu-memory.md)
|
||||
* [Input-Output Memory Management Unit (IOMMU)](./conceptual/iommu.rst)
|
||||
* [File structure (Linux FHS)](./conceptual/file-reorg.md)
|
||||
* [GPU isolation techniques](./conceptual/gpu-isolation.md)
|
||||
* [Using CMake](./conceptual/cmake-packages.rst)
|
||||
* [ROCm & PCIe atomics](./conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst)
|
||||
* [PCIe atomics in ROCm](./conceptual/pcie-atomics.rst)
|
||||
* [Inception v3 with PyTorch](./conceptual/ai-pytorch-inception.md)
|
||||
* [Oversubscription of hardware resources](./conceptual/oversubscription.rst)
|
||||
:::
|
||||
@@ -73,6 +71,7 @@ ROCm documentation is organized into the following categories:
|
||||
* [ROCm tools, compilers, and runtimes](./reference/rocm-tools.md)
|
||||
* [Accelerator and GPU hardware specifications](./reference/gpu-arch-specs.rst)
|
||||
* [Precision support](./reference/precision-support.rst)
|
||||
* [Graph safe support](./reference/graph-safe-support.rst)
|
||||
:::
|
||||
<!-- markdownlint-enable MD051 -->
|
||||
|
||||
|
||||
@@ -32,6 +32,21 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- L1 Instruction Cache (KiB)
|
||||
- VGPR File (KiB)
|
||||
- SGPR File (KiB)
|
||||
*
|
||||
- MI325X
|
||||
- CDNA3
|
||||
- gfx942
|
||||
- 256
|
||||
- 304 (38 per XCD)
|
||||
- 64
|
||||
- 64
|
||||
- 256
|
||||
- 32 (4 per XCD)
|
||||
- 32
|
||||
- 16 per 2 CUs
|
||||
- 64 per 2 CUs
|
||||
- 512
|
||||
- 12.5
|
||||
*
|
||||
- MI300X
|
||||
- CDNA3
|
||||
|
||||
111
docs/reference/graph-safe-support.rst
Normal file
@@ -0,0 +1,111 @@
|
||||
.. meta::
|
||||
:description: This page lists supported graph safe ROCm libraries.
|
||||
:keywords: AMD, ROCm, HIP, hipGRAPH
|
||||
|
||||
********************************************************************************
|
||||
Graph-safe support for ROCm libraries
|
||||
********************************************************************************
|
||||
|
||||
HIP graph-safe libraries operate safely in HIP execution graphs.
|
||||
:ref:`hip:how_to_HIP_graph` are an alternative way of executing tasks on a GPU
|
||||
that can provide performance benefits over launching kernels using the standard
|
||||
method via streams.
|
||||
|
||||
Functions and routines from graph-safe libraries shouldn’t result in issues like
|
||||
race conditions, deadlocks, or unintended dependencies.
|
||||
|
||||
The following table shows whether a ROCm library is graph-safe.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
*
|
||||
- ROCm library
|
||||
- Graph safe support
|
||||
*
|
||||
- `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
|
||||
- ❌
|
||||
*
|
||||
- `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- ✅
|
||||
*
|
||||
- `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- ⚠️
|
||||
*
|
||||
- `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- ✅
|
||||
*
|
||||
- `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- ✅ (see :ref:`details <hipfft:hip-graph-support-for-hipfft>`)
|
||||
*
|
||||
- `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- ✅
|
||||
*
|
||||
- `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- ⚠️ (experimental)
|
||||
*
|
||||
- `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- ✅
|
||||
*
|
||||
- `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- ⚠️ (experimental)
|
||||
*
|
||||
- `hipTensor <https://github.com/ROCm/hipTensor>`_
|
||||
- ❌
|
||||
*
|
||||
- `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- ❌
|
||||
*
|
||||
- `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- ✅
|
||||
*
|
||||
- `rocAL <https://github.com/ROCm/rocAL>`_
|
||||
- ❌
|
||||
*
|
||||
- `rocALUTION <https://github.com/ROCm/rocALUTION>`_
|
||||
- ❌
|
||||
*
|
||||
- `rocBLAS <https://github.com/ROCm/rocBLAS>`_
|
||||
- ✅ (see :doc:`details <rocblas:reference/beta-features>`)
|
||||
*
|
||||
- `rocDecode <https://github.com/ROCm/rocDecode>`_
|
||||
- ❌
|
||||
*
|
||||
- `rocFFT <https://github.com/ROCm/rocFFT>`_
|
||||
- ✅ (see :ref:`details <rocfft:hip-graph-support-for-rocfft>`)
|
||||
*
|
||||
- `rocHPCG <https://github.com/ROCm/rocHPCG>`_
|
||||
- ❌
|
||||
*
|
||||
- `rocJPEG <https://github.com/ROCm/rocJPEG>`_
|
||||
- ❌
|
||||
*
|
||||
- `rocPRIM <https://github.com/ROCm/rocPRIM>`_
|
||||
- ✅
|
||||
*
|
||||
- `rocRAND <https://github.com/ROCm/rocRAND>`_
|
||||
- ✅
|
||||
*
|
||||
- `rocSOLVER <https://github.com/ROCm/rocSOLVER>`_
|
||||
- ⚠️ (experimental)
|
||||
*
|
||||
- `rocSPARSE <https://github.com/ROCm/rocSPARSE>`_
|
||||
- ⚠️ (experimental)
|
||||
*
|
||||
- `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- ❌ (see :doc:`details <rocthrust:hipgraph-support>`)
|
||||
*
|
||||
- `rocWMMA <https://github.com/ROCm/rocWMMA>`_
|
||||
- ❌
|
||||
*
|
||||
- `RPP <https://github.com/ROCm/rpp>`_
|
||||
- ⚠️
|
||||
*
|
||||
- `Tensile <https://github.com/ROCm/Tensile>`_
|
||||
- ✅
|
||||
|
||||
✅: full support
|
||||
|
||||
⚠️: partial support
|
||||
|
||||
❌: not supported
|
||||
@@ -1,7 +1,7 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="ROCm API libraries & tools">
|
||||
<meta name="keywords" content="ROCm, API, libraries, tools, artificial intelligence, development,
|
||||
<meta name="keywords" content="ROCm, API, libraries, tools, AI, artificial intelligence, development,
|
||||
Communications, C++ primitives, Fast Fourier transforms, FFTs, random number generators, linear
|
||||
algebra, AMD">
|
||||
</head>
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
| Version | Release date |
|
||||
| ------- | ------------ |
|
||||
| [6.3.1](https://rocm.docs.amd.com/en/docs-6.3.1/) | December 20, 2024 |
|
||||
| [6.3.0](https://rocm.docs.amd.com/en/docs-6.3.0/) | December 3, 2024 |
|
||||
| [6.2.4](https://rocm.docs.amd.com/en/docs-6.2.4/) | November 6, 2024 |
|
||||
| [6.2.2](https://rocm.docs.amd.com/en/docs-6.2.2/) | September 27, 2024 |
|
||||
|
||||
@@ -40,6 +40,8 @@ subtrees:
|
||||
title: Installation
|
||||
- file: how-to/rocm-for-ai/train-a-model.rst
|
||||
title: Train a model
|
||||
- file: how-to/rocm-for-ai/scale-model-training.rst
|
||||
title: Scale model training
|
||||
- file: how-to/rocm-for-ai/hugging-face-models.rst
|
||||
title: Run models from Hugging Face
|
||||
- file: how-to/rocm-for-ai/deploy-your-model.rst
|
||||
@@ -92,10 +94,6 @@ subtrees:
|
||||
title: System tuning
|
||||
- file: how-to/tuning-guides/mi300x/workload.rst
|
||||
title: Workload tuning
|
||||
- url: https://rocm.docs.amd.com/projects/gpu-cluster-networking/en/${branch}/index.html
|
||||
title: GPU cluster networking
|
||||
- file: how-to/gpu-enabled-mpi.rst
|
||||
title: Use MPI
|
||||
- file: how-to/system-debugging.md
|
||||
- file: conceptual/compiler-topics.md
|
||||
title: Use advanced compiler features
|
||||
@@ -108,7 +106,9 @@ subtrees:
|
||||
- url: https://rocm.docs.amd.com/projects/llvm-project/en/latest/conceptual/openmp.html
|
||||
title: OpenMP support
|
||||
- file: how-to/setting-cus
|
||||
title: Set the number of CUs
|
||||
title: Set the number of CUs
|
||||
- file: how-to/Bar-Memory.rst
|
||||
title: Troubleshoot BAR access limitation
|
||||
- url: https://github.com/amd/rocm-examples
|
||||
title: ROCm examples
|
||||
|
||||
@@ -144,8 +144,6 @@ subtrees:
|
||||
title: AMD Instinct MI100/CDNA1 ISA
|
||||
- url: https://www.amd.com/system/files/documents/amd-cdna-whitepaper.pdf
|
||||
title: White paper
|
||||
- file: conceptual/gpu-memory.md
|
||||
title: GPU memory
|
||||
- file: conceptual/iommu.rst
|
||||
title: Input-Output Memory Management Unit (IOMMU)
|
||||
- file: conceptual/file-reorg.md
|
||||
@@ -154,8 +152,8 @@ subtrees:
|
||||
title: GPU isolation techniques
|
||||
- file: conceptual/cmake-packages.rst
|
||||
title: Using CMake
|
||||
- file: conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst
|
||||
title: ROCm & PCIe atomics
|
||||
- file: conceptual/pcie-atomics.rst
|
||||
title: PCIe atomics in ROCm
|
||||
- file: conceptual/ai-pytorch-inception.md
|
||||
title: Inception v3 with PyTorch
|
||||
- file: conceptual/oversubscription.rst
|
||||
@@ -171,6 +169,8 @@ subtrees:
|
||||
title: Hardware specifications
|
||||
- file: reference/precision-support.rst
|
||||
title: Precision support
|
||||
- file: reference/graph-safe-support.rst
|
||||
title: Graph safe support
|
||||
|
||||
- caption: Contribute
|
||||
entries:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
rocm-docs-core==1.11.0
|
||||
rocm-docs-core==1.12.0
|
||||
sphinx-reredirects
|
||||
sphinx-sitemap
|
||||
|
||||
@@ -90,7 +90,7 @@ requests==2.32.3
|
||||
# via
|
||||
# pygithub
|
||||
# sphinx
|
||||
rocm-docs-core==1.11.0
|
||||
rocm-docs-core==1.12.0
|
||||
# via -r requirements.in
|
||||
smmap==5.0.1
|
||||
# via gitdb
|
||||
|
||||
@@ -10,9 +10,9 @@ ROCm is a software stack, composed primarily of open-source software, that
|
||||
provides the tools for programming AMD Graphics Processing Units (GPUs), from
|
||||
low-level kernels to high-level end-user applications.
|
||||
|
||||
.. image:: data/rocm-software-stack-6_3_0.jpg
|
||||
.. image:: data/rocm-software-stack-6_3_1.jpg
|
||||
:width: 800
|
||||
:alt: AMD's ROCm software stack and neighboring technologies.
|
||||
:alt: AMD's ROCm software stack and enabling technologies.
|
||||
:align: center
|
||||
|
||||
Specifically, ROCm provides the tools for
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<manifest>
|
||||
<remote name="rocm-org" fetch="https://github.com/ROCm/" />
|
||||
<default revision="refs/tags/rocm-6.3.0"
|
||||
<default revision="refs/tags/rocm-6.3.1"
|
||||
remote="rocm-org"
|
||||
sync-c="true"
|
||||
sync-j="4" />
|
||||
|
||||
61
tools/autotag/templates/highlights/6.3.1.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# ROCm 6.3.1 release notes
|
||||
|
||||
The release notes provide a summary of notable changes since the previous ROCm release.
|
||||
|
||||
- [Release highlights](#release-highlights)
|
||||
|
||||
- [Operating system and hardware support changes](#operating-system-and-hardware-support-changes)
|
||||
|
||||
- [ROCm components versioning](#rocm-components)
|
||||
|
||||
- [Detailed component changes](#detailed-component-changes)
|
||||
|
||||
- [ROCm known issues](#rocm-known-issues)
|
||||
|
||||
- [ROCm resolved issues](#rocm-resolved-issues)
|
||||
|
||||
- [ROCm upcoming changes](#rocm-upcoming-changes)
|
||||
|
||||
```{note}
|
||||
If you’re using Radeon™ PRO or Radeon GPUs in a workstation setting with a
|
||||
display connected, continue to use ROCm 6.2.3. See the [Use ROCm on Radeon GPUs](https://rocm.docs.amd.com/projects/radeon/en/latest/index.html)
|
||||
documentation to verify compatibility and system requirements.
|
||||
```
|
||||
## Release highlights
|
||||
|
||||
The following are notable new features and improvements in ROCm 6.3.1. For changes to individual components, see
|
||||
[Detailed component changes](#detailed-component-changes).
|
||||
|
||||
### Per queue resiliency for Instinct MI300 accelerators
|
||||
|
||||
The AMDGPU driver now includes enhanced resiliency for misbehaving applications on AMD Instinct MI300 accelerators. This helps isolate the impact of misbehaving applications, ensuring other workloads running on the same accelerator are unaffected.
|
||||
|
||||
### ROCm Runfile Installer
|
||||
|
||||
ROCm 6.3.1 introduces the ROCm Runfile Installer, with initial support for Ubuntu 22.04. The ROCm Runfile Installer facilitates ROCm installation without using a native Linux package management system, with or without network or internet access. For more information, see the [ROCm Runfile Installer documentation](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.1/install/rocm-runfile-installer.html).
|
||||
|
||||
### ROCm documentation updates
|
||||
|
||||
ROCm documentation continues to be updated to provide clearer and more comprehensive guidance for a wider variety of user needs and use cases.
|
||||
|
||||
* Added documentation on training a model with ROCm Megatron-LM. AMD offers a Docker image for MI300X accelerators
|
||||
containing essential components to get started, including ROCm libraries, PyTorch, and Megatron-LM utilities. See
|
||||
[Training a model using ROCm Megatron-LM](https://rocm.docs.amd.com/en/latest/how-to/rocm-for-ai/train-a-model.html)
|
||||
to get started.
|
||||
|
||||
The new ROCm Megatron-LM training Docker accompanies the [ROCm vLLM inference
|
||||
Docker](https://rocm.docs.amd.com/en/latest/how-to/performance-validation/mi300x/vllm-benchmark.html)
|
||||
as a set of ready-to-use containerized solutions to get started with using ROCm
|
||||
for AI.
|
||||
|
||||
* Updated the [Instinct MI300X workload tuning
|
||||
guide](https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html) with more current optimization
|
||||
strategies. The updated sections include guidance on vLLM optimization, PyTorch TunableOp, and hipBLASLt tuning.
|
||||
|
||||
* HIP graph-safe libraries operate safely in HIP execution graphs. [HIP graphs](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/hipgraph.html#how-to-hip-graph) are an alternative way of executing tasks on a GPU that can provide performance benefits over launching kernels using the standard method via streams. A topic that shows whether a [ROCm library is graph-safe](https://advanced-micro-devices-demo--3953.com.readthedocs.build/en/3953/reference/graph-safe-support.html) has been added.
|
||||
|
||||
* The [Device memory](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/memory_management/device_memory.html) topic in the HIP memory management section has been updated.
|
||||
|
||||
* The HIP documentation has expanded with new resources for developers:
|
||||
* [Multi device management](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/multi_device.html)
|
||||
* [OpenGL interoperability](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/opengl_interop.html)
|
||||
8
tools/autotag/templates/known_issues/6.3.1.md
Normal file
@@ -0,0 +1,8 @@
|
||||
## ROCm known issues
|
||||
|
||||
ROCm known issues are noted on {fab}`github` [GitHub](https://github.com/ROCm/ROCm/labels/Verified%20Issue). For known
|
||||
issues related to individual components, review the [Detailed component changes](#detailed-component-changes).
|
||||
|
||||
### PCI Express Qualification Tool failure on Debian 12
|
||||
|
||||
The PCI Express Qualification Tool (PEQT) module present in the ROCm Validation Suite (RVS) might fail due to the segmentation issue in Debian 12 (bookworm). This will result in failure to determine the characteristics of the PCIe interconnect between the host platform and the GPU like support for Gen 3 atomic completers, DMA transfer statistics, link speed, and link width. The standard PCIe command `lspci` can be used as an alternative to view the characteristics of the PCIe bus interconnect with the GPU. This issue is under investigation and will be addressed in a future release. See [GitHub issue #4175](https://github.com/ROCm/ROCm/issues/4175).
|
||||
23
tools/autotag/templates/resolved_issues/6.3.1.md
Normal file
@@ -0,0 +1,23 @@
|
||||
## ROCm resolved issues
|
||||
|
||||
The following are previously known issues resolved in this release. For resolved issues related to
|
||||
individual components, review the [Detailed component changes](#detailed-component-changes).
|
||||
|
||||
### Instinct MI300 series: backward weights convolution performance issue
|
||||
|
||||
Fixed a performance issue affecting certain tensor shapes during backward weights convolution when using FP16 or FP32 data types on Instinct MI300 series accelerators. See [GitHub issue #4080](https://github.com/ROCm/ROCm/issues/4080).
|
||||
|
||||
### ROCm Compute Profiler and ROCm Systems Profiler post-upgrade issues
|
||||
|
||||
Packaging metadata for ROCm Compute Profiler (`rocprofiler-compute`) and ROCm Systems Profiler
|
||||
(`rocprofiler-systems`) has been updated to handle the renaming from Omniperf and Omnitrace,
|
||||
respectively. This fixes minor issues when upgrading from ROCm 6.2 to 6.3. For more information, see the GitHub issues
|
||||
[#4082](https://github.com/ROCm/ROCm/issues/4082) and
|
||||
[#4083](https://github.com/ROCm/ROCm/issues/4082).
|
||||
|
||||
### Stale file due to OpenCL ICD loader deprecation
|
||||
|
||||
When upgrading from ROCm 6.2.x to ROCm 6.3.0, the issue of removal of the `rocm-icd-loader` package
|
||||
leaving a stale file in the old `rocm-6.2.x` directory has been resolved. The stale files left during
|
||||
the upgrade from ROCm 6.2.x to ROCm 6.3.0 will be removed when upgrading to ROCm 6.3.1. For more
|
||||
information, see [GitHub issue #4084](https://github.com/ROCm/ROCm/issues/4084).
|
||||
9
tools/autotag/templates/support/6.3.1.md
Normal file
@@ -0,0 +1,9 @@
|
||||
## Operating system and hardware support changes
|
||||
|
||||
ROCm 6.3.1 adds support for Debian 12 (kernel: 6.1). Debian is supported only on AMD Instinct accelerators. See the installation instructions at [Debian native installation](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.1/install/native-install/debian.html).
|
||||
|
||||
ROCm 6.3.1 enables support for AMD Instinct MI325X accelerator. For more information, see [AMD Instinct™ MI325X Accelerators](https://www.amd.com/en/products/accelerators/instinct/mi300/mi325x.html).
|
||||
|
||||
See the [Compatibility
|
||||
matrix](https://rocm.docs.amd.com/en/docs-6.3.1/compatibility/compatibility-matrix.html)
|
||||
for more information about operating system and hardware compatibility.
|
||||
13
tools/autotag/templates/upcoming_changes/6.3.1.md
Normal file
@@ -0,0 +1,13 @@
|
||||
## ROCm upcoming changes
|
||||
|
||||
The following changes to the ROCm software stack are anticipated for future releases.
|
||||
|
||||
### AMDGPU wavefront size compiler macro deprecation
|
||||
|
||||
The `__AMDGCN_WAVEFRONT_SIZE__` macro will be deprecated in an upcoming
|
||||
release. It is recommended to remove any use of this macro. For more information, see [AMDGPU
|
||||
support](https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.1/LLVM/clang/html/AMDGPUSupport.html).
|
||||
|
||||
### HIPCC Perl scripts deprecation
|
||||
|
||||
The HIPCC Perl scripts (`hipcc.pl` and `hipconfig.pl`) will be removed in an upcoming release.
|
||||
@@ -66,46 +66,44 @@ endef
|
||||
# It is a space seperated list with zero or more elements.
|
||||
|
||||
$(call adddep,amd_smi_lib,${ASAN_DEP})
|
||||
$(call adddep,aqlprofile,${ASAN_DEP} hsa)
|
||||
$(call adddep,aqlprofile,${ASAN_DEP} rocr)
|
||||
$(call adddep,comgr,lightning devicelibs)
|
||||
$(call adddep,dbgapi,hsa comgr)
|
||||
$(call adddep,dbgapi,rocr comgr)
|
||||
$(call adddep,devicelibs,lightning)
|
||||
$(call adddep,hip_on_rocclr,${ASAN_DEP} hsa comgr hipcc rocprofiler-register)
|
||||
$(call adddep,hip_on_rocclr,${ASAN_DEP} rocr comgr hipcc rocprofiler-register)
|
||||
$(call adddep,hipcc,)
|
||||
$(call adddep,hipify_clang,hip_on_rocclr lightning)
|
||||
$(call adddep,hsa,${ASAN_DEP} thunk lightning devicelibs rocprofiler-register)
|
||||
$(call adddep,lightning,)
|
||||
$(call adddep,omniperf,${ASAN_DEP})
|
||||
$(call adddep,omnitrace,hipcc hsa hip_on_rocclr rocm_smi_lib rocprofiler roctracer)
|
||||
$(call adddep,opencl_icd_loader,)
|
||||
$(call adddep,opencl_on_rocclr,${ASAN_DEP} hsa comgr opencl_icd_loader)
|
||||
$(call adddep,openmp_extras,thunk lightning devicelibs hsa)
|
||||
$(call adddep,rdc,${ASAN_DEP} rocm_smi_lib hsa rocprofiler)
|
||||
$(call adddep,rocclr,${ASAN_DEP} hsa comgr hipcc rocprofiler-register)
|
||||
$(call adddep,rocm_bandwidth_test,${ASAN_DEP} hsa)
|
||||
$(call adddep,opencl_on_rocclr,${ASAN_DEP} rocr comgr)
|
||||
$(call adddep,openmp_extras,lightning devicelibs rocr)
|
||||
$(call adddep,rocm_bandwidth_test,${ASAN_DEP} rocr)
|
||||
$(call adddep,rocm_smi_lib,${ASAN_DEP})
|
||||
$(call adddep,rocm-cmake,${ASAN_DEP})
|
||||
$(call adddep,rocm-core,${ASAN_DEP})
|
||||
$(call adddep,rocm-gdb,dbgapi)
|
||||
$(call adddep,rocminfo,${ASAN_DEP} hsa)
|
||||
$(call adddep,rocminfo,${ASAN_DEP} rocr)
|
||||
$(call adddep,rocprofiler-register,${ASAN_DEP})
|
||||
$(call adddep,rocprofiler-sdk,${ASAN_DEP} hsa aqlprofile opencl_on_rocclr hip_on_rocclr comgr)
|
||||
$(call adddep,rocprofiler,${ASAN_DEP} hsa roctracer aqlprofile opencl_on_rocclr hip_on_rocclr comgr)
|
||||
$(call adddep,rocr_debug_agent,${ASAN_DEP} hip_on_rocclr hsa dbgapi)
|
||||
$(call adddep,roctracer,${ASAN_DEP} hsa hip_on_rocclr)
|
||||
$(call adddep,thunk,${ASAN_DEP})
|
||||
$(call adddep,rocprofiler-sdk,${ASAN_DEP} rocr aqlprofile opencl_on_rocclr hip_on_rocclr comgr)
|
||||
$(call adddep,rocprofiler-systems,${ASAN_DEP} hipcc rocr hip_on_rocclr rocm_smi_lib rocprofiler roctracer rocprofiler-sdk)
|
||||
$(call adddep,rocprofiler,${ASAN_DEP} rocr roctracer aqlprofile opencl_on_rocclr hip_on_rocclr comgr)
|
||||
$(call adddep,rocprofiler-compute,${ASAN_DEP})
|
||||
$(call adddep,rocr,${ASAN_DEP} lightning rocm_smi_lib devicelibs rocprofiler-register)
|
||||
$(call adddep,rocr_debug_agent,${ASAN_DEP} hip_on_rocclr rocr dbgapi)
|
||||
$(call adddep,roctracer,${ASAN_DEP} rocr hip_on_rocclr)
|
||||
|
||||
# rocm-dev points to all possible last finish components of Stage1 build.
|
||||
rocm-dev-components :=rdc hipify_clang openmp_extras \
|
||||
omniperf omnitrace rocm-core amd_smi_lib hipcc \
|
||||
rocm_bandwidth_test rocr_debug_agent rocm-gdb
|
||||
$(call adddep,rocm-dev,$(filter-out ${NOBUILD},${rocm-dev-components}))
|
||||
rocm-dev-components :=amd_smi_lib aqlprofile comgr dbgapi devicelibs hip_on_rocclr hipcc hipify_clang \
|
||||
lightning rocprofiler-compute opencl_on_rocclr openmp_extras rocm_bandwidth_test rocm_smi_lib \
|
||||
rocm-cmake rocm-core rocm-gdb rocminfo rocprofiler-register rocprofiler-sdk rocprofiler-systems \
|
||||
rocprofiler rocr rocr_debug_agent roctracer
|
||||
$(call adddep,rocm-dev,$(filter-out ${NOBUILD} kernel_ubuntu,${rocm-dev-components}))
|
||||
|
||||
$(call adddep,amdmigraphx,hip_on_rocclr half rocblas miopen-hip lightning hipcc)
|
||||
$(call adddep,amdmigraphx,hip_on_rocclr half rocblas miopen-hip lightning hipcc hiptensor)
|
||||
$(call adddep,composable_kernel,lightning hipcc hip_on_rocclr rocm-cmake)
|
||||
$(call adddep,half,rocm-cmake)
|
||||
$(call adddep,hipblas-common,lightning)
|
||||
$(call adddep,hipblas,hip_on_rocclr rocblas rocsolver lightning hipcc)
|
||||
$(call adddep,hipblaslt,hip_on_rocclr openmp_extras hipblas lightning hipcc)
|
||||
$(call adddep,hipblaslt,hip_on_rocclr openmp_extras lightning hipcc hipblas-common rocm-dev)
|
||||
$(call adddep,hipcub,hip_on_rocclr rocprim lightning hipcc)
|
||||
$(call adddep,hipfft,hip_on_rocclr openmp_extras rocfft rocrand hiprand lightning hipcc)
|
||||
$(call adddep,hipfort,rocblas hipblas rocsparse hipsparse rocfft hipfft rocrand hiprand rocsolver hipsolver lightning hipcc)
|
||||
@@ -115,22 +113,25 @@ $(call adddep,hipsparse,hip_on_rocclr rocsparse lightning hipcc)
|
||||
$(call adddep,hipsparselt,hip_on_rocclr hipsparse lightning hipcc openmp_extras)
|
||||
$(call adddep,hiptensor,hip_on_rocclr composable_kernel lightning hipcc)
|
||||
$(call adddep,miopen-deps,lightning hipcc)
|
||||
$(call adddep,miopen-hip,composable_kernel half hip_on_rocclr miopen-deps rocblas roctracer lightning hipcc)
|
||||
$(call adddep,miopen-hip,composable_kernel half hip_on_rocclr miopen-deps hipblas hipblaslt rocrand roctracer lightning hipcc)
|
||||
$(call adddep,mivisionx,amdmigraphx miopen-hip rpp lightning hipcc)
|
||||
$(call adddep,rccl,hip_on_rocclr hsa lightning hipcc rocm_smi_lib hipify_clang)
|
||||
$(call adddep,rccl,rocm-core hip_on_rocclr rocr lightning hipcc rocm_smi_lib hipify_clang)
|
||||
$(call adddep,rdc,rocm_smi_lib rocprofiler rocmvalidationsuite)
|
||||
$(call adddep,rocalution,rocblas rocsparse rocrand lightning hipcc)
|
||||
$(call adddep,rocblas,hip_on_rocclr openmp_extras lightning hipcc)
|
||||
$(call adddep,rocblas,hip_on_rocclr openmp_extras lightning hipcc hipblaslt)
|
||||
$(call adddep,rocal,mivisionx)
|
||||
$(call adddep,rocdecode,hip_on_rocclr lightning hipcc)
|
||||
$(call adddep,rocdecode,hip_on_rocclr lightning hipcc amdmigraphx)
|
||||
$(call adddep,rocfft,hip_on_rocclr rocrand hiprand lightning hipcc openmp_extras)
|
||||
$(call adddep,rocmvalidationsuite,hip_on_rocclr hsa rocblas rocm-core lightning hipcc rocm_smi_lib)
|
||||
$(call adddep,rocjpeg,hip_on_rocclr lightning hipcc rocm-dev)
|
||||
$(call adddep,rocmvalidationsuite,hip_on_rocclr rocr hipblas hiprand hipblaslt rocm-core lightning hipcc rocm_smi_lib)
|
||||
$(call adddep,rocprim,hip_on_rocclr lightning hipcc)
|
||||
$(call adddep,rocrand,hip_on_rocclr lightning hipcc)
|
||||
$(call adddep,rocsolver,hip_on_rocclr rocblas rocsparse lightning hipcc)
|
||||
$(call adddep,rocsolver,hip_on_rocclr rocblas rocsparse rocprim lightning hipcc)
|
||||
$(call adddep,rocsparse,hip_on_rocclr rocprim lightning hipcc)
|
||||
$(call adddep,rocthrust,hip_on_rocclr rocprim lightning hipcc)
|
||||
$(call adddep,rocwmma,hip_on_rocclr rocblas lightning hipcc rocm-cmake rocm_smi_lib)
|
||||
$(call adddep,rpp,half lightning hipcc openmp_extras)
|
||||
$(call adddep,transferbench,hip_on_rocclr lightning hipcc)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
@@ -189,7 +190,7 @@ else # } {
|
||||
# Pass in jobserver info using the RMAKE variable
|
||||
${RMAKE}@( if set -x && source $${INFRA_REPO}/envsetup.sh && \
|
||||
rm -f $$@.errors $$@ $$@.repackaged && \
|
||||
$${INFRA_REPO}/build_$1.sh -c && source $${INFRA_REPO}/ccache-env-mathlib.sh && \
|
||||
$${INFRA_REPO}/build_$1.sh -c && \
|
||||
time bash -x $${INFRA_REPO}/build_$1.sh $${RELEASE_FLAG} $${SANITIZER_FLAG} && $${INFRA_REPO}/post_inst_pkg.sh "$1" ; \
|
||||
then mv $$@.inprogress $$@ ; \
|
||||
else mv $$@.inprogress $$@.errors ; echo Error in $1 >&2 ; exit 1 ;\
|
||||
@@ -216,11 +217,14 @@ $(call peval,$(foreach dep,$(strip ${components}),$(call toplevel,${dep})))
|
||||
all: $(addprefix T_,$(filter-out ${NOBUILD},${components}))
|
||||
@echo All ROCm components built
|
||||
# Do not document this target
|
||||
upload: $(addprefix U_,${components})
|
||||
upload: $(addprefix U_,$(filter-out ${NOBUILD},${components}))
|
||||
@echo All ROCm components built and uploaded
|
||||
|
||||
upload-rocm-dev: $(addprefix U_,$(filter-out ${NOBUILD},${components}))
|
||||
@echo All rocm-dev components built and uploaded
|
||||
|
||||
##help rocm-dev: Build a subset of ROCm
|
||||
rocm-dev: T_rocm-dev
|
||||
rocm-dev: $(addprefix T_,$(filter-out ${NOBUILD},${components}))
|
||||
@echo rocm-dev built
|
||||
|
||||
${OUT_DIR}/logs:
|
||||
|
||||
@@ -22,15 +22,15 @@ printUsage() {
|
||||
return 0
|
||||
}
|
||||
|
||||
PROJ_NAME="amdsmi"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
TARGET="build"
|
||||
|
||||
PACKAGE_LIB=$(getLibPath)
|
||||
PACKAGE_INCLUDE="$(getIncludePath)"
|
||||
|
||||
AMDSMI_BUILD_DIR=$(getBuildPath amdsmi)
|
||||
AMDSMI_PACKAGE_DEB_DIR="$(getPackageRoot)/deb/amdsmi"
|
||||
AMDSMI_PACKAGE_RPM_DIR="$(getPackageRoot)/rpm/amdsmi"
|
||||
AMDSMI_BUILD_DIR=$(getBuildPath $PROJ_NAME)
|
||||
AMDSMI_PACKAGE_DEB_DIR="$PACKAGE_ROOT/deb/$PROJ_NAME"
|
||||
AMDSMI_PACKAGE_RPM_DIR="$PACKAGE_ROOT/rpm/$PROJ_NAME"
|
||||
AMDSMI_BUILD_TYPE="debug"
|
||||
BUILD_TYPE="Debug"
|
||||
|
||||
@@ -57,10 +57,9 @@ do
|
||||
(-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
# TODO - support standard option of passing cmake environment vars - CFLAGS,CXXFLAGS etc., to enable address sanitizer
|
||||
ADDRESS_SANITIZER=true ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
ack_and_skip_static ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
|
||||
@@ -10,7 +10,9 @@ build_amdmigraphx() {
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
|
||||
pip3 install https://github.com/RadeonOpenCompute/rbuild/archive/master.tar.gz
|
||||
if ! command -v rbuild &> /dev/null; then
|
||||
pip3 install https://github.com/RadeonOpenCompute/rbuild/archive/master.tar.gz
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
@@ -20,7 +22,7 @@ build_amdmigraphx() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx1102;gfx942;gfx1200;gfx1201"
|
||||
fi
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
@@ -29,7 +31,7 @@ build_amdmigraphx() {
|
||||
--cxx="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
--cc="${ROCM_PATH}/llvm/bin/clang" \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DCMAKE_MODULE_LINKER_FLAGS="-Wl,--enable-new-dtags -Wl,--rpath,$ROCM_LIB_RPATH" \
|
||||
-DCMAKE_MODULE_LINKER_FLAGS="-Wl,--enable-new-dtags,--build-id=sha1,--rpath,$ROCM_LIB_RPATH" \
|
||||
-DGPU_TARGETS="${GPU_TARGETS}" \
|
||||
-DCMAKE_INSTALL_RPATH=""
|
||||
|
||||
|
||||
@@ -11,7 +11,9 @@ printUsage() {
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
type referred to by pkg_type"
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore.
|
||||
No effect of the param on this build"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [-c|-r|-h] [makeopts]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Removes all clang-ocl build artifacts"
|
||||
echo " -r, --release Build non-debug version clang-ocl (default is debug)"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
TARGET="build"
|
||||
CLANG_OCL_DEST="$(getBinPath)"
|
||||
CLANG_OCL_SRC_ROOT="$CLANG_OCL_ROOT"
|
||||
CLANG_OCL_BUILD_DIR="$(getBuildPath clang-ocl)"
|
||||
|
||||
MAKEARG="$DASH_JAY"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_UTILS="$(getUtilsPath)"
|
||||
CLANG_OCL_PACKAGE_DEB="$PACKAGE_ROOT/deb/clang-ocl"
|
||||
CLANG_OCL_PACKAGE_RPM="$PACKAGE_ROOT/rpm/clang-ocl"
|
||||
BUILD_TYPE="Debug"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
|
||||
|
||||
VALID_STR=`getopt -o hcraso:g: --long help,clean,release,clean,static,address_sanitizer,outdir:,gpu_list: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
(-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
(-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
(-r | --release)
|
||||
MAKEARG="$MAKEARG BUILD_TYPE=rel" ; BUILD_TYPE="Release" ; shift ;;
|
||||
(-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-g | --gpu_list )
|
||||
GPU_LIST=$2; shift 2 ;;
|
||||
--) shift; break;;
|
||||
(*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean_clang-ocl() {
|
||||
echo "Removing clang-ocl"
|
||||
rm -rf $CLANG_OCL_DEST/clang-ocl
|
||||
rm -rf $CLANG_OCL_BUILD_DIR
|
||||
rm -rf $CLANG_OCL_PACKAGE_DEB
|
||||
rm -rf $CLANG_OCL_PACKAGE_RPM
|
||||
}
|
||||
|
||||
build_clang-ocl() {
|
||||
if [ ! -d "$CLANG_OCL_BUILD_DIR" ]; then
|
||||
mkdir -p $CLANG_OCL_BUILD_DIR
|
||||
pushd $CLANG_OCL_BUILD_DIR
|
||||
|
||||
if [ -e $PACKAGE_ROOT/lib/bitcode/opencl.amdgcn.bc ]; then
|
||||
BC_DIR="$ROCM_INSTALL_PATH/lib"
|
||||
else
|
||||
BC_DIR="$ROCM_INSTALL_PATH/amdgcn/bitcode"
|
||||
fi
|
||||
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
-DDISABLE_CHECKS="ON" \
|
||||
-DCLANG_BIN="$ROCM_INSTALL_PATH/llvm/bin" \
|
||||
-DBITCODE_DIR="$BC_DIR" \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCPACK_SET_DESTDIR="OFF" \
|
||||
$CLANG_OCL_SRC_ROOT
|
||||
|
||||
echo "Making clang-ocl:"
|
||||
cmake --build . -- $MAKEARG
|
||||
cmake --build . -- $MAKEARG install
|
||||
cmake --build . -- $MAKEARG package
|
||||
popd
|
||||
fi
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$CLANG_OCL_PACKAGE_DEB" $CLANG_OCL_BUILD_DIR/rocm-clang-ocl*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$CLANG_OCL_PACKAGE_RPM" $CLANG_OCL_BUILD_DIR/rocm-clang-ocl*.rpm
|
||||
}
|
||||
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo ${CLANG_OCL_PACKAGE_DEB};;
|
||||
("rpm")
|
||||
echo ${CLANG_OCL_PACKAGE_RPM};;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
case $TARGET in
|
||||
(clean) clean_clang-ocl ;;
|
||||
(build) build_clang-ocl ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
exit 0
|
||||
|
||||
@@ -6,73 +6,53 @@ source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
|
||||
|
||||
set_component_src composable_kernel
|
||||
|
||||
GPU_ARCH_LIST="gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
|
||||
build_miopen_ck() {
|
||||
echo "Start Building Composable Kernel"
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
GPU_ARCH_LIST="gfx908:xnack+;gfx90a:xnack+;gfx942:xnack+"
|
||||
else
|
||||
unset_asan_env_vars
|
||||
set_address_sanitizer_off
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
GPU_ARCH_LIST="gfx942"
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
PYTHON_VERSION_WORKAROUND=''
|
||||
echo "DISTRO_ID: ${DISTRO_ID}"
|
||||
if [ "$DISTRO_ID" = "rhel-8.8" ] || [ "$DISTRO_ID" = "sles-15.5" ] ; then
|
||||
EXTRA_PYTHON_PATH=/opt/Python-3.8.13
|
||||
PYTHON_VERSION_WORKAROUND="-DCK_USE_ALTERNATIVE_PYTHON=${EXTRA_PYTHON_PATH}/bin/python3.8"
|
||||
# For the python interpreter we need to export LD_LIBRARY_PATH.
|
||||
export LD_LIBRARY_PATH=${EXTRA_PYTHON_PATH}/lib:$LD_LIBRARY_PATH
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
mkdir "$BUILD_DIR" && cd "$BUILD_DIR"
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="-DAMDGPU_TARGETS=${GPU_ARCHS}"
|
||||
fi
|
||||
|
||||
if [ "${ASAN_CMAKE_PARAMS}" == "true" ] ; then
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE:-'RelWithDebInfo'} \
|
||||
-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++ \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH%-*}/lib/cmake;${ROCM_PATH%-*}/$ASAN_LIBDIR;${ROCM_PATH%-*}/llvm;${ROCM_PATH%-*}" \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_LIB_RPATH" \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_EXE_RPATH" \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH=${ROCM_PATH} \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DINSTANCES_ONLY=ON \
|
||||
-DENABLE_ASAN_PACKAGING=true \
|
||||
"${GPU_TARGETS}" \
|
||||
"$COMPONENT_SRC"
|
||||
else
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++ \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " \
|
||||
-DCMAKE_PREFIX_PATH=${ROCM_PATH%-*} \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN' \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN/../lib' \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH=${ROCM_PATH} \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DINSTANCES_ONLY=ON \
|
||||
"${GPU_TARGETS}" \
|
||||
"$COMPONENT_SRC"
|
||||
fi
|
||||
cmake \
|
||||
-DBUILD_DEV=OFF \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
${PYTHON_VERSION_WORKAROUND} \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DGPU_ARCHS="${GPU_ARCH_LIST}" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
cmake --build . -- -j${PROC} package
|
||||
cmake --build "$BUILD_DIR" -- install
|
||||
mkdir -p $PACKAGE_DIR && cp ./*.${PKGTYPE} $PACKAGE_DIR
|
||||
rm -rf *
|
||||
}
|
||||
|
||||
unset_asan_env_vars() {
|
||||
@@ -176,7 +156,7 @@ clean_miopen_ck() {
|
||||
stage2_command_args "$@"
|
||||
|
||||
case $TARGET in
|
||||
build) build_miopen_ck; build_miopen_ckProf;;
|
||||
build) build_miopen_ck ;;
|
||||
outdir) print_output_directory ;;
|
||||
clean) clean_miopen_ck ;;
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
|
||||
@@ -15,7 +15,7 @@ printUsage() {
|
||||
type referred to by pkg_type"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -M, --skip_man_pages Do not build the 'docs' target"
|
||||
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
@@ -65,7 +65,7 @@ do
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ;;
|
||||
ack_and_skip_static ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; ((CLEAN_OR_OUT|=2)) ; shift 1 ;;
|
||||
(-M | --skip_man_pages) DODOCSBUILD=false;;
|
||||
|
||||
@@ -96,6 +96,7 @@ build_devicelibs() {
|
||||
if [ ! -e Makefile ]; then
|
||||
cmake $(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DROCM_DEVICE_LIBS_BITCODE_INSTALL_LOC_NEW="$bitcodeInstallLoc/amdgcn" \
|
||||
-DROCM_DEVICE_LIBS_BITCODE_INSTALL_LOC_OLD="amdgcn" \
|
||||
"$DEVICELIBS_ROOT"
|
||||
|
||||
@@ -24,14 +24,15 @@ printUsage() {
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
PROJ_NAME="hip-on-rocclr"
|
||||
|
||||
BUILD_PATH="$(getBuildPath hip-on-rocclr)"
|
||||
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
|
||||
|
||||
TARGET="build"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_SRC="$(getSrcPath)"
|
||||
PACKAGE_DEB="$PACKAGE_ROOT/deb/hip-on-rocclr"
|
||||
PACKAGE_RPM="$PACKAGE_ROOT/rpm/hip-on-rocclr"
|
||||
PACKAGE_DEB="$PACKAGE_ROOT/deb/$PROJ_NAME"
|
||||
PACKAGE_RPM="$PACKAGE_ROOT/rpm/$PROJ_NAME"
|
||||
PREFIX_PATH="$PACKAGE_ROOT"
|
||||
CORE_BUILD_DIR="$(getBuildPath hsa-core)"
|
||||
ROCclr_BUILD_DIR="$(getBuildPath rocclr)"
|
||||
@@ -52,7 +53,7 @@ MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
OFFLOAD_ARCH=()
|
||||
|
||||
DEFAULT_OFFLOAD_ARCH=(gfx900 gfx906 gfx908 gfx90a gfx940 gfx941 gfx942 gfx1030 gfx1031 gfx1033 gfx1034 gfx1035 gfx1100 gfx1101 gfx1102 gfx1103)
|
||||
DEFAULT_OFFLOAD_ARCH=(gfx900 gfx906 gfx908 gfx90a gfx940 gfx941 gfx942 gfx1030 gfx1031 gfx1033 gfx1034 gfx1035 gfx1100 gfx1101 gfx1102 gfx1200 gfx1201)
|
||||
|
||||
VALID_STR=`getopt -o hcrast:o: --long help,clean,release,address_sanitizer,static,offload-arch=:,outdir: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
@@ -168,9 +169,11 @@ build_catch_tests() {
|
||||
export ROCM_PATH="$ROCM_INSTALL_PATH"
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DHIP_PLATFORM=amd \
|
||||
-DROCM_PATH="$ROCM_INSTALL_PATH" \
|
||||
-DOFFLOAD_ARCH_STR="$OFFLOAD_ARCH_STR" \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCPACK_RPM_DEBUGINFO_PACKAGE=FALSE \
|
||||
-DCPACK_DEBIAN_DEBUGINFO_PACKAGE=FALSE \
|
||||
@@ -206,6 +209,8 @@ package_samples() {
|
||||
export ROCM_PATH="$ROCM_INSTALL_PATH"
|
||||
cmake \
|
||||
-DROCM_PATH="$ROCM_INSTALL_PATH" \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCMAKE_MODULE_PATH="$CMAKE_PATH/hip" \
|
||||
-DCPACK_INSTALL_PREFIX="$ROCM_INSTALL_PATH" \
|
||||
|
||||
41
tools/rocm-build/build_hipblas-common.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
|
||||
|
||||
set_component_src hipBLAS-common
|
||||
|
||||
build_hipblas-common() {
|
||||
echo "Start build"
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
|
||||
|
||||
init_rocm_common_cmake_params
|
||||
cmake \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
"$COMPONENT_SRC"
|
||||
cmake --build "$BUILD_DIR" -- install
|
||||
cmake --build "$BUILD_DIR" -- package
|
||||
|
||||
rm -rf _CPack_Packages/ && find -name '*.o' -delete
|
||||
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
|
||||
|
||||
show_build_cache_stats
|
||||
}
|
||||
|
||||
clean_hipblas-common() {
|
||||
echo "Cleaning hipBLAS-common build directory: ${BUILD_DIR} ${PACKAGE_DIR}"
|
||||
rm -rf "$BUILD_DIR" "$PACKAGE_DIR"
|
||||
echo "Done!"
|
||||
}
|
||||
|
||||
stage2_command_args "$@"
|
||||
|
||||
case $TARGET in
|
||||
build) build_hipblas-common ;;
|
||||
outdir) print_output_directory ;;
|
||||
clean) clean_hipblas-common ;;
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
@@ -10,6 +10,12 @@ build_hipblas() {
|
||||
echo "Start build"
|
||||
|
||||
CXX="g++"
|
||||
CXX_FLAG=
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
CXX="amdclang++"
|
||||
CXX_FLAG="-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++"
|
||||
fi
|
||||
|
||||
CLIENTS_SAMPLES="ON"
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
@@ -17,6 +23,8 @@ build_hipblas() {
|
||||
CLIENTS_SAMPLES="OFF"
|
||||
fi
|
||||
|
||||
SHARED_LIBS="ON"
|
||||
|
||||
echo "C compiler: $CC"
|
||||
echo "CXX compiler: $CXX"
|
||||
echo "FC compiler: $FC"
|
||||
@@ -33,11 +41,12 @@ build_hipblas() {
|
||||
${LAUNCHER_FLAGS} \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DUSE_CUDA=OFF \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DBUILD_CLIENTS_BENCHMARKS=ON \
|
||||
-DBUILD_CLIENTS_SAMPLES="${CLIENTS_SAMPLES}" \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
|
||||
${CXX_FLAG} \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
cmake --build "$BUILD_DIR" -- -j${PROC}
|
||||
|
||||
@@ -8,6 +8,10 @@ set_component_src hipBLASLt
|
||||
build_hipblaslt() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
@@ -40,7 +44,6 @@ build_hipblaslt() {
|
||||
-DBUILD_CLIENTS_SAMPLES=ON \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DBUILD_CLIENTS_BENCHMARKS=ON \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
|
||||
@@ -9,10 +9,12 @@ printUsage() {
|
||||
echo "Options:"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -r, --release Makes a release build"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo
|
||||
|
||||
return 0
|
||||
@@ -25,25 +27,31 @@ PROJ_NAME=$API_NAME
|
||||
TARGET="build"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
BUILD_TYPE="Debug"
|
||||
|
||||
SHARED_LIBS="ON"
|
||||
BUILD_DIR=$(getBuildPath $API_NAME)
|
||||
PACKAGE_DEB=$(getPackageRoot)/deb/$API_NAME
|
||||
PACKAGE_RPM=$(getPackageRoot)/rpm/$API_NAME
|
||||
PACKAGE_SRC="$(getSrcPath)"
|
||||
|
||||
while [ "$1" != "" ];
|
||||
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,address_sanitizer,static,outdir,wheel:,package: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case $1 in
|
||||
case "$1" in
|
||||
(-a | --address_sanitizer)
|
||||
ack_and_ignore_asan ;;
|
||||
(-c | --clean)
|
||||
TARGET="clean" ;;
|
||||
(-o | --outdir)
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 1 ;;
|
||||
(-r | --release)
|
||||
BUILD_TYPE="RelWithDebInfo" ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ;;
|
||||
(-h | --help)
|
||||
printUsage ; exit 0 ;;
|
||||
--) shift; break;;
|
||||
(*)
|
||||
echo "Invalid option [$1]" >&2; printUsage; exit 1 ;;
|
||||
esac
|
||||
@@ -79,6 +87,7 @@ build() {
|
||||
fi
|
||||
|
||||
cmake \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DHIPCC_BACKWARD_COMPATIBILITY=OFF \
|
||||
@@ -87,7 +96,7 @@ build() {
|
||||
popd
|
||||
|
||||
cmake --build "$BUILD_DIR" -- $MAKEOPTS
|
||||
|
||||
|
||||
echo "Installing and Packaging hipcc"
|
||||
cmake --build "$BUILD_DIR" -- $MAKEOPTS install
|
||||
cmake --build "$BUILD_DIR" -- $MAKEOPTS package
|
||||
|
||||
@@ -9,6 +9,10 @@ set_component_src hipCUB
|
||||
build_hipcub() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
@@ -22,7 +26,7 @@ build_hipcub() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
CXX=$(set_build_variables CXX)\
|
||||
|
||||
@@ -21,7 +21,7 @@ build_hipfft() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
cmake \
|
||||
|
||||
@@ -8,11 +8,18 @@ set_component_src hipfort
|
||||
build_hipfort() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
|
||||
cmake --trace \
|
||||
cmake \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX=${ROCM_PATH}\
|
||||
-DHIPFORT_INSTALL_DIR="${ROCM_PATH}" \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH}/llvm;${ROCM_PATH}" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCPACK_SET_DESTDIR="OFF" \
|
||||
-DCPACK_RPM_PACKAGE_RELOCATABLE="ON" \
|
||||
-DHIPFORT_COMPILER="${ROCM_PATH}/${ROCM_LLVMDIR}/bin/flang" \
|
||||
-DCMAKE_Fortran_FLAGS="-Mfree" \
|
||||
-DHIPFORT_COMPILER_FLAGS="-cpp" \
|
||||
|
||||
@@ -22,12 +22,12 @@ printUsage() {
|
||||
TARGET="build"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
HIPIFY_CLANG_BUILD_DIR="$(getBuildPath $HIPIFY_ROOT)"
|
||||
HIPIFY_CLANG_DIST_DIR="$HIPIFY_CLANG_BUILD_DIR/dist"
|
||||
BUILD_TYPE="Debug"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
HIPIFY_CLANG_HASH=""
|
||||
LIGHTNING_PATH="$ROCM_INSTALL_PATH/llvm"
|
||||
ADDRESS_SANITIZER=false
|
||||
INSTALL_CLANG_HEADERS="OFF"
|
||||
DEB_PATH="$(getDebPath hipify)"
|
||||
RPM_PATH="$(getRpmPath hipify)"
|
||||
SHARED_LIBS="ON"
|
||||
@@ -53,7 +53,7 @@ do
|
||||
set_address_sanitizer_on
|
||||
ADDRESS_SANITIZER=true ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
ack_and_skip_static ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
--) shift; break;;
|
||||
@@ -74,7 +74,6 @@ fi
|
||||
clean_hipify() {
|
||||
echo "Cleaning hipify-clang"
|
||||
rm -rf "$HIPIFY_CLANG_BUILD_DIR"
|
||||
rm -rf "$HIPIFY_CLANG_DIST_DIR"
|
||||
rm -rf "$DEB_PATH"
|
||||
rm -rf "$RPM_PATH"
|
||||
}
|
||||
@@ -101,16 +100,16 @@ package_hipify() {
|
||||
build_hipify() {
|
||||
echo "Building hipify-clang binaries"
|
||||
mkdir -p "$HIPIFY_CLANG_BUILD_DIR"
|
||||
mkdir -p "$HIPIFY_CLANG_DIST_DIR"
|
||||
|
||||
pushd "$HIPIFY_CLANG_BUILD_DIR"
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE="$BUILD_TYPE" \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCMAKE_INSTALL_PREFIX="$HIPIFY_CLANG_DIST_DIR" \
|
||||
-DCMAKE_INSTALL_PREFIX="$ROCM_INSTALL_PATH" \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX=$ROCM_INSTALL_PATH \
|
||||
-DCMAKE_PREFIX_PATH="$LIGHTNING_PATH" \
|
||||
-DADDRESS_SANITIZER="$ADDRESS_SANITIZER" \
|
||||
-DHIPIFY_INSTALL_CLANG_HEADERS="$INSTALL_CLANG_HEADERS" \
|
||||
$HIPIFY_ROOT
|
||||
|
||||
cmake --build . -- $MAKEOPTS install
|
||||
|
||||
@@ -21,6 +21,11 @@ done
|
||||
build_hiprand() {
|
||||
echo "Start build"
|
||||
|
||||
SHARED_LIBS="ON"
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
SHARED_LIBS="OFF"
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
@@ -34,17 +39,20 @@ build_hiprand() {
|
||||
|
||||
mkdir "$BUILD_DIR" && cd "$BUILD_DIR"
|
||||
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
CXX=$(set_build_variables CXX)\
|
||||
cmake \
|
||||
${LAUNCHER_FLAGS} \
|
||||
$(rocm_common_cmake_params) \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DAMDGPU_TARGETS=${GPU_TARGETS} \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DBUILD_TEST=ON \
|
||||
-DBUILD_BENCHMARK=ON \
|
||||
-DBUILD_CRUSH_TEST=ON \
|
||||
@@ -60,7 +68,6 @@ build_hiprand() {
|
||||
|
||||
rm -rf _CPack_Packages/ && find -name '*.o' -delete
|
||||
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
|
||||
|
||||
}
|
||||
|
||||
clean_hiprand() {
|
||||
|
||||
@@ -9,14 +9,23 @@ set_component_src hipSOLVER
|
||||
build_hipsolver() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
CXX_FLAG="-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++"
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
|
||||
CXX="g++"
|
||||
CXX="amdclang++"
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
fi
|
||||
|
||||
SHARED_LIBS="ON"
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
SHARED_LIBS="OFF"
|
||||
fi
|
||||
|
||||
echo "C compiler: $CC"
|
||||
echo "CXX compiler: $CXX"
|
||||
echo "FC compiler: $FC"
|
||||
@@ -30,13 +39,15 @@ build_hipsolver() {
|
||||
init_rocm_common_cmake_params
|
||||
cmake \
|
||||
-DUSE_CUDA=OFF \
|
||||
-DCMAKE_CXX_COMPILER=${CXX} \
|
||||
${LAUNCHER_FLAGS} \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DBUILD_CLIENTS_BENCHMARKS=ON \
|
||||
-DBUILD_CLIENTS_SAMPLES=ON \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
|
||||
${CXX_FLAG} \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
cmake --build "$BUILD_DIR" -- -j${PROC}
|
||||
|
||||
@@ -10,14 +10,26 @@ set_component_src hipSPARSE
|
||||
build_hipsparse() {
|
||||
echo "Start build"
|
||||
|
||||
CXX="g++"
|
||||
CXX_FLAG=
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
CXX="${ROCM_PATH}/llvm/bin/clang++"
|
||||
CXX_FLAG="-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++"
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
|
||||
CXX="g++"
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
fi
|
||||
|
||||
SHARED_LIBS="ON"
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
SHARED_LIBS="OFF"
|
||||
fi
|
||||
|
||||
echo "C compiler: $CC"
|
||||
echo "CXX compiler: $CXX"
|
||||
|
||||
@@ -25,15 +37,16 @@ build_hipsparse() {
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
cmake \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
${LAUNCHER_FLAGS} \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DUSE_CUDA=OFF \
|
||||
-DBUILD_CLIENTS_SAMPLES=ON \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DCMAKE_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DCMAKE_MODULE_PATH="${ROCM_PATH}/lib/cmake/hip;${ROCM_PATH}/hip/cmake" \
|
||||
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
|
||||
${CXX_FLAG} \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
cmake --build "$BUILD_DIR" -- -j${PROC}
|
||||
|
||||
@@ -21,6 +21,10 @@ done
|
||||
build_hipsparselt() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
@@ -50,7 +54,6 @@ build_hipsparselt() {
|
||||
-DBUILD_CLIENTS_SAMPLES=ON \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DBUILD_CLIENTS_BENCHMARKS=ON \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=${ROCM_PATH} \
|
||||
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
@@ -9,6 +9,10 @@ set_component_src hipTensor
|
||||
build_hiptensor() {
|
||||
echo "Start build hipTensor"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
@@ -18,7 +22,6 @@ build_hiptensor() {
|
||||
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [options ...] [make options]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of type referred to by pkg_type"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
TARGET="build"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_SRC="$(getSrcPath)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
PACKAGE_BIN="$(getBinPath)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/rocr"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/rocr"
|
||||
MAKEARG=""
|
||||
CORE_BUILD_DIR="$(getBuildPath hsa-core)"
|
||||
ROCR_DEV_BUILD_DIR="$(getBuildPath hsa-rocr-dev)"
|
||||
PREFIX_PATH="$PACKAGE_ROOT"
|
||||
BUILD_TYPE="Debug"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
|
||||
unset HIP_DEVICE_LIB_PATH
|
||||
unset ROCM_PATH
|
||||
|
||||
VALID_STR=`getopt -o hcraso: --long help,clean,release,static,address_sanitizer,outdir: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
(-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
(-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
(-r | --release)
|
||||
BUILD_TYPE="RelWithDebInfo" ; shift ;;
|
||||
(-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
--) shift; break;;
|
||||
(*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean_hsa() {
|
||||
echo "Cleaning HSA"
|
||||
|
||||
rm -rf "$CORE_BUILD_DIR"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -f "$PACKAGE_ROOT"/lib/libhsa-runtime*
|
||||
rm -rf "$PACKAGE_ROOT/lib/cmake/hsa-runtime64"
|
||||
rm -rf "$PACKAGE_ROOT/include/hsa"
|
||||
rm -rf "$PACKAGE_ROOT/share/doc/hsa-runtime64"
|
||||
rm -rf "$PACKAGE_ROOT/hsa"
|
||||
}
|
||||
|
||||
|
||||
build_hsa_core() {
|
||||
echo "Build HSA"
|
||||
local coreMakeOpts="$DASH_JAY -C $CORE_BUILD_DIR"
|
||||
|
||||
echo "$HSA_CORE_ROOT"
|
||||
|
||||
if [ ! -d "$CORE_BUILD_DIR" ]; then
|
||||
mkdir -p "$CORE_BUILD_DIR"
|
||||
pushd "$CORE_BUILD_DIR"
|
||||
print_lib_type $SHARED_LIBS
|
||||
|
||||
cmake $(rocm_cmake_params) \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DENABLE_LDCONFIG=OFF \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DADDRESS_SANITIZER="$ADDRESS_SANITIZER" \
|
||||
"$HSA_CORE_ROOT"
|
||||
popd
|
||||
fi
|
||||
time cmake --build "$CORE_BUILD_DIR" -- $coreMakeOpts
|
||||
time cmake --build "$CORE_BUILD_DIR" -- $coreMakeOpts install
|
||||
time cmake --build "$CORE_BUILD_DIR" -- $coreMakeOpts package
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" $CORE_BUILD_DIR/hsa-rocr*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" $CORE_BUILD_DIR/hsa-rocr*.rpm
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo ${PACKAGE_DEB};;
|
||||
("rpm")
|
||||
echo ${PACKAGE_RPM};;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
case $TARGET in
|
||||
(clean) clean_hsa ;;
|
||||
(build) build_hsa_core;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -11,7 +11,6 @@ printUsage() {
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -t, --alt Build the 'alt' variant"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -d, --debug Build a debug version of llvm (excludes packaging)"
|
||||
echo " -r, --release Build a release version of the package"
|
||||
@@ -33,14 +32,14 @@ printUsage() {
|
||||
return 0
|
||||
}
|
||||
|
||||
PROJ_NAME="lightning"
|
||||
ROCM_LLVM_LIB_RPATH='\$ORIGIN'
|
||||
ROCM_LLVM_EXE_RPATH='\$ORIGIN/../lib:\$ORIGIN/../../../lib'
|
||||
|
||||
PACKAGE_OUT="$(getPackageRoot)"
|
||||
|
||||
BUILD_PATH="$(getBuildPath lightning)"
|
||||
DEB_PATH="$(getDebPath lightning)"
|
||||
RPM_PATH="$(getRpmPath lightning)"
|
||||
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
|
||||
DEB_PATH="$(getDebPath $PROJ_NAME)"
|
||||
RPM_PATH="$(getRpmPath $PROJ_NAME)"
|
||||
INSTALL_PATH="${ROCM_INSTALL_PATH}/lib/llvm"
|
||||
LLVM_ROOT_LCL="${LLVM_ROOT}"
|
||||
ROCM_WHEEL_DIR="${BUILD_PATH}/_wheel"
|
||||
@@ -48,20 +47,12 @@ ROCM_WHEEL_DIR="${BUILD_PATH}/_wheel"
|
||||
TARGET="all"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
BUILD_TYPE="Release"
|
||||
case "${JOB_NAME}" in
|
||||
( *"rel"* | \
|
||||
*"afar"* | \
|
||||
*"nfar"* )
|
||||
ENABLE_ASSERTIONS=0 ;;
|
||||
( * )
|
||||
ENABLE_ASSERTIONS=1 ;;
|
||||
esac
|
||||
ENABLE_ASSERTIONS=0
|
||||
SHARED_LIBS="ON"
|
||||
|
||||
BUILD_LLVM_DYLIB="OFF"
|
||||
|
||||
FLANG_NEW=0
|
||||
BUILD_ALT=0
|
||||
CLEAN_OR_OUT=0;
|
||||
PKGTYPE="deb"
|
||||
MAKETARGET="deb"
|
||||
@@ -74,10 +65,10 @@ BUILD_MANPAGES="ON"
|
||||
STATIC_FLAG=
|
||||
|
||||
SANITIZER_AMDGPU=1
|
||||
HSA_INC_PATH="$WORK_ROOT/ROCR-Runtime/src/inc"
|
||||
COMGR_INC_PATH="$WORK_ROOT/llvm-project/amd/comgr/include"
|
||||
HSA_INC_PATH="$WORK_ROOT/ROCR-Runtime/runtime/hsa-runtime/inc/"
|
||||
COMGR_INC_PATH="$COMGR_ROOT/include"
|
||||
|
||||
VALID_STR=`getopt -o htcV:v:draAswlo:BPNM --long help,alt,clean,assert_llvm_ver_major:,assert_llvm_ver_minor:,debug,release,address_sanitizer,no_address_sanitizer,static,build_llvm_static,wheel,build,package,skip_lit_tests,skip_man_pages,outdir: -- "$@"`
|
||||
VALID_STR=`getopt -o hcV:v:draAswlo:BPNM --long help,clean,assert_llvm_ver_major:,assert_llvm_ver_minor:,debug,release,address_sanitizer,no_address_sanitizer,static,build_llvm_static,wheel,build,package,skip_lit_tests,skip_man_pages,outdir: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
set_dwarf_version(){
|
||||
@@ -96,11 +87,10 @@ set_dwarf_version(){
|
||||
|
||||
while true ;
|
||||
do
|
||||
#echo "processing $1"
|
||||
case "$1" in
|
||||
(-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
(-t | --alt)
|
||||
BUILD_ALT=1 ; shift ;;
|
||||
(-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
(-V | --assert_llvm_ver_major)
|
||||
@@ -115,7 +105,7 @@ do
|
||||
set_dwarf_version
|
||||
SANITIZER_AMDGPU=1 ;
|
||||
HSA_INC_PATH="$WORK_ROOT/hsa/runtime/opensrc/hsa-runtime/inc" ;
|
||||
COMGR_INC_PATH="$WORK_ROOT/external/llvm-project/amd/comgr/include" ; shift ;;
|
||||
COMGR_INC_PATH="$COMGR_ROOT/include" ; shift ;;
|
||||
(-A | --no_address_sanitizer)
|
||||
SANITIZER_AMDGPU=0 ;
|
||||
unset HSA_INC_PATH ;
|
||||
@@ -155,21 +145,9 @@ LLVM_PROJECTS="clang;lld;clang-tools-extra"
|
||||
ENABLE_RUNTIMES="compiler-rt;libunwind"
|
||||
BOOTSTRAPPING_BUILD_LIBCXX=0
|
||||
BUILD_AMDCLANG="ON"
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
BUILD_PATH="${BUILD_PATH}-alt"
|
||||
DEB_PATH="${DEB_PATH}-alt"
|
||||
RPM_PATH="${RPM_PATH}-alt"
|
||||
INSTALL_PATH="${INSTALL_PATH}/alt"
|
||||
LLVM_ROOT_LCL="${LLVM_ALT_ROOT}"
|
||||
BUILD_AMDCLANG="OFF"
|
||||
BUILD_MANPAGES="OFF"
|
||||
SANITIZER_AMDGPU=0
|
||||
unset HSA_INC_PATH
|
||||
unset COMGR_INC_PATH
|
||||
else
|
||||
ENABLE_RUNTIMES="$ENABLE_RUNTIMES;libcxx;libcxxabi";
|
||||
BOOTSTRAPPING_BUILD_LIBCXX=1
|
||||
fi
|
||||
|
||||
ENABLE_RUNTIMES="$ENABLE_RUNTIMES;libcxx;libcxxabi"
|
||||
BOOTSTRAPPING_BUILD_LIBCXX=1
|
||||
|
||||
clean_lightning() {
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
@@ -188,22 +166,14 @@ setup_llvm_info() {
|
||||
local LLVM_URL_BRANCH
|
||||
|
||||
if [[ "${JOB_NAME}" == *rel* ]]; then
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
LLVM_URL_BRANCH=$(git rev-parse HEAD)
|
||||
else
|
||||
LLVM_URL_NAME="https://github.com/RadeonOpenCompute/llvm-project"
|
||||
LLVM_BRANCH_NAME="roc-${ROCM_VERSION}"
|
||||
LLVM_URL_BRANCH="${LLVM_URL_NAME} ${LLVM_BRANCH_NAME}"
|
||||
fi
|
||||
else
|
||||
LLVM_REMOTE_NAME=$(git remote)
|
||||
LLVM_URL_NAME=$(git config --get remote."${LLVM_REMOTE_NAME}".url)
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
LLVM_BRANCH_NAME=$(repo manifest | sed -n 's/.*path="external\/llvm-project-alt\/llvm-project".* upstream="\([^"]*\)".*/\1/p' )
|
||||
else
|
||||
LLVM_REMOTE_NAME=$(git remote)
|
||||
LLVM_URL_NAME=$(git config --get remote."${LLVM_REMOTE_NAME}".url)
|
||||
LLVM_BRANCH_NAME=$(repo manifest | sed -n 's/.*path="external\/llvm-project".* upstream="\([^"]*\)".*/\1/p' )
|
||||
fi
|
||||
LLVM_URL_BRANCH="${LLVM_URL_NAME} ${LLVM_BRANCH_NAME}"
|
||||
LLVM_URL_BRANCH="${LLVM_URL_NAME} ${LLVM_BRANCH_NAME}"
|
||||
fi
|
||||
|
||||
LLVM_COMMIT_GITDATE=$(git show -s --format=@%ct | xargs | date -f - --utc +%y%U%w)
|
||||
@@ -283,24 +253,27 @@ build_lightning() {
|
||||
mkdir -p "$BUILD_PATH"
|
||||
pushd "$BUILD_PATH"
|
||||
|
||||
eval EXTRA_LLVM_CMAKE_PARAMS_ARRAY=($EXTRA_LLVM_CMAKE_PARAMS)
|
||||
|
||||
if [ ! -e Makefile ]; then
|
||||
echo "Building LLVM CMake environment"
|
||||
if [ -e "$LLVM_ROOT_LCL/../flang/AFARrelease" ]; then
|
||||
LLVM_PROJECTS="$LLVM_PROJECTS;mlir"
|
||||
if [ -e "$LLVM_ROOT_LCL/../flang/EnableFlangBuild" ]; then
|
||||
FLANG_NEW=1
|
||||
LLVM_PROJECTS="$LLVM_PROJECTS;flang;mlir"
|
||||
LLVM_PROJECTS="$LLVM_PROJECTS;flang"
|
||||
ENABLE_RUNTIMES="$ENABLE_RUNTIMES;openmp";
|
||||
else
|
||||
|
||||
if [[ "${JOB_NAME}" != *afar* ]] && [ -e "$LLVM_ROOT_LCL/../flang/DoROCmRelease" ]; then
|
||||
FLANG_NEW=1
|
||||
LLVM_PROJECTS="$LLVM_PROJECTS;flang;mlir"
|
||||
else
|
||||
echo "NOT building project flang"
|
||||
fi
|
||||
if [[ "${JOB_NAME}" != *afar* ]] && [ -e "$LLVM_ROOT_LCL/../flang/DoROCmRelease" ]; then
|
||||
FLANG_NEW=1
|
||||
LLVM_PROJECTS="$LLVM_PROJECTS;flang"
|
||||
else
|
||||
echo "NOT building project flang"
|
||||
fi
|
||||
fi
|
||||
set -x
|
||||
cmake $(rocm_cmake_params) ${GEN_NINJA} \
|
||||
${STATIC_FLAG} \
|
||||
${PYTHON_VERSION_WORKAROUND} \
|
||||
-DCMAKE_INSTALL_PREFIX="$INSTALL_PATH" \
|
||||
-DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" \
|
||||
-DLLVM_ENABLE_PROJECTS="$LLVM_PROJECTS" \
|
||||
@@ -342,6 +315,7 @@ build_lightning() {
|
||||
-DCLANG_LINK_FLANG_LEGACY=ON \
|
||||
-DCMAKE_CXX_STANDARD=17 \
|
||||
-DFLANG_INCLUDE_DOCS=OFF \
|
||||
"${EXTRA_LLVM_CMAKE_PARAMS_ARRAY[@]}" \
|
||||
"$LLVM_ROOT_LCL"
|
||||
set +x
|
||||
echo "CMake complete"
|
||||
@@ -368,18 +342,10 @@ build_lightning() {
|
||||
esac
|
||||
|
||||
if [ $SKIP_LIT_TESTS -eq 0 ]; then
|
||||
if [ $RHEL_BUILD -eq 1 ] && [ $BUILD_ALT != 1 ]; then
|
||||
if [ $FLANG_NEW -eq 1 ]; then
|
||||
cmake --build . -- $MAKEOPTS check-lld check-mlir
|
||||
else
|
||||
cmake --build . -- $MAKEOPTS check-lld
|
||||
fi
|
||||
elif [ "$DISTRO_NAME" != "sles" ] && [ $BUILD_ALT != 1 ]; then
|
||||
if [ $FLANG_NEW -eq 1 ]; then
|
||||
cmake --build . -- $MAKEOPTS check-llvm check-clang check-lld check-mlir
|
||||
else
|
||||
cmake --build . -- $MAKEOPTS check-llvm check-clang check-lld
|
||||
fi
|
||||
if [ $RHEL_BUILD -eq 1 ]; then
|
||||
cmake --build . -- $MAKEOPTS check-lld check-mlir
|
||||
elif [ "$DISTRO_NAME" != "sles" ]; then
|
||||
cmake --build . -- $MAKEOPTS check-llvm check-clang check-lld check-mlir
|
||||
fi
|
||||
fi
|
||||
cmake --build . -- $MAKEOPTS clang-tidy
|
||||
@@ -396,23 +362,15 @@ package_lightning_dynamic(){
|
||||
|
||||
get_llvm_version
|
||||
local llvmParsedVersion="${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}"
|
||||
local packageName="rocm-llvm"
|
||||
local packageSummary="ROCm compiler"
|
||||
local packageSummaryLong="ROCm compiler based on LLVM $llvmParsedVersion"
|
||||
local installPath="$ROCM_INSTALL_PATH/lib/llvm/"
|
||||
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
local packageName="rocm-llvm-alt"
|
||||
local packageSummary="Proprietary ROCm compiler"
|
||||
local packageSummaryLong="ROCm compiler, including proprietary optimizations, based on LLVM $llvmParsedVersion"
|
||||
local installPath="$ROCM_INSTALL_PATH/lib/llvm/alt"
|
||||
else
|
||||
local packageName="rocm-llvm"
|
||||
local packageSummary="ROCm compiler"
|
||||
local packageSummaryLong="ROCm compiler based on LLVM $llvmParsedVersion"
|
||||
local installPath="$ROCM_INSTALL_PATH/lib/llvm/"
|
||||
|
||||
if [ "$BUILD_LLVM_DYLIB" == "ON" ] ; then
|
||||
local packageNameCore="rocm-llvm-core"
|
||||
local packageSummaryCore="ROCm core compiler dylibs"
|
||||
local packageSummaryLongCore="ROCm compiler based on LLVM $llvmParsedVersion"
|
||||
fi
|
||||
if [ "$BUILD_LLVM_DYLIB" == "ON" ] ; then
|
||||
local packageNameCore="rocm-llvm-core"
|
||||
local packageSummaryCore="ROCm core compiler dylibs"
|
||||
local packageSummaryLongCore="ROCm compiler based on LLVM $llvmParsedVersion"
|
||||
fi
|
||||
|
||||
local packageArch="amd64"
|
||||
@@ -433,9 +391,6 @@ package_lightning_dynamic(){
|
||||
local prermFile="$packageDeb/DEBIAN/prerm"
|
||||
local specFile="$packageDir/$packageName.spec"
|
||||
local debDependencies="python3, libc6, libstdc++6|libstdc++8, libstdc++-5-dev|libstdc++-7-dev|libstdc++-11-dev, libgcc-5-dev|libgcc-7-dev|libgcc-11-dev, rocm-core"
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
debDependencies="${debDependencies}, rocm-llvm"
|
||||
fi
|
||||
local debRecommends="gcc, g++, gcc-multilib, g++-multilib"
|
||||
|
||||
local packageRpm="$packageDir/rpm"
|
||||
@@ -508,42 +463,33 @@ package_lightning_dynamic(){
|
||||
debDependencies="${debDependencies}, ${packageNameCore}"
|
||||
fi
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ] ; then
|
||||
cp -r "$LLVM_ROOT_LCL/LICENSE.TXT" "$packageDeb/$licenseDir"
|
||||
else
|
||||
cp -r "$LLVM_PROJECT_ALT_ROOT/EULA" "$packageDeb/$licenseDir"
|
||||
cp -r "$LLVM_PROJECT_ALT_ROOT/DISCLAIMER.txt" "$packageDeb/$licenseDir"
|
||||
fi
|
||||
cp -r "$LLVM_ROOT_LCL/LICENSE.TXT" "$packageDeb/$licenseDir"
|
||||
cp -r "$distBin" "$packageDeb/$installPath/bin"
|
||||
cp -r "$distInc" "$packageDeb/$installPath/include"
|
||||
cp -r "$distLib" "$packageDeb/$installPath/lib"
|
||||
if [ "$BUILD_MANPAGES" == "ON" ]; then
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
for i in "${man_pages[@]}"; do
|
||||
gzip -f "$distMan/man1/$i"
|
||||
for i in "${man_pages[@]}"; do
|
||||
gzip -f "$distMan/man1/$i"
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
ln -sf "clang.1.gz" "$distMan/man1/$i"
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
ln -sf "clang.1.gz" "$distMan/man1/$i"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
cp -r "$distMan" "$packageDeb/$installPath/share"
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
touch "$postinstFile" "$prermFile"
|
||||
echo "mkdir -p \"$ROCM_INSTALL_PATH/bin\"" >> $postinstFile
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$packageDeb/$installPath/bin/$i" ]; then
|
||||
echo "ln -s \"../lib/llvm/bin/$i\" \"$ROCM_INSTALL_PATH/bin/$i\"" >> $postinstFile
|
||||
echo "rm -f \"$ROCM_INSTALL_PATH/bin/$i\"" >> $prermFile
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"$ROCM_INSTALL_PATH/bin\"" >> $prermFile
|
||||
chmod 0555 "$postinstFile" "$prermFile"
|
||||
cp -P "$backwardsCompatibleSymlink" "$packageDeb/$ROCM_INSTALL_PATH"
|
||||
fi
|
||||
touch "$postinstFile" "$prermFile"
|
||||
echo "mkdir -p \"$ROCM_INSTALL_PATH/bin\"" >> $postinstFile
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$packageDeb/$installPath/bin/$i" ]; then
|
||||
echo "ln -s \"../lib/llvm/bin/$i\" \"$ROCM_INSTALL_PATH/bin/$i\"" >> $postinstFile
|
||||
echo "rm -f \"$ROCM_INSTALL_PATH/bin/$i\"" >> $prermFile
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"$ROCM_INSTALL_PATH/bin\"" >> $prermFile
|
||||
chmod 0555 "$postinstFile" "$prermFile"
|
||||
cp -P "$backwardsCompatibleSymlink" "$packageDeb/$ROCM_INSTALL_PATH"
|
||||
|
||||
echo "Package: $packageName" > $controlFile
|
||||
echo "Architecture: $packageArch" >> $controlFile
|
||||
@@ -613,16 +559,12 @@ package_lightning_dynamic(){
|
||||
echo "Release: ${JOB_DESIGNATOR}${SLES_BUILD_ID_PREFIX}${BUILD_ID}%{?dist}" >> $specFile
|
||||
echo "Summary: $packageSummary" >> $specFile
|
||||
echo "Group: System Environment/Libraries" >> $specFile
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
echo "License: AMD Proprietary" >> $specFile
|
||||
else
|
||||
echo "License: ASL 2.0 with exceptions" >> $specFile
|
||||
fi
|
||||
echo "License: ASL 2.0 with exceptions" >> $specFile
|
||||
echo "Requires: $rpmRequires" >> $specFile
|
||||
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
echo "%define _build_id_links none" >> $specFile
|
||||
fi
|
||||
# The following is commented as Centos 7 has a version of rpm
|
||||
# that does not understand it. When we no longer support Centos 7
|
||||
# then we should have a correct recommends line.
|
||||
#echo "Recommends: $rpmRecommends" >> $specFile
|
||||
|
||||
echo "%description" >> $specFile
|
||||
echo "$packageSummaryLong" >> $specFile
|
||||
@@ -638,28 +580,20 @@ package_lightning_dynamic(){
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/share/man" >> $specFile
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "cp -R $LLVM_ROOT_LCL/LICENSE.TXT \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
echo "cp -P $backwardsCompatibleSymlink \$RPM_BUILD_ROOT/$ROCM_INSTALL_PATH" >> $specFile
|
||||
else
|
||||
echo "cp -R $LLVM_PROJECT_ALT_ROOT/EULA \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
echo "cp -R $LLVM_PROJECT_ALT_ROOT/DISCLAIMER.txt \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
fi
|
||||
|
||||
echo "cp -R $LLVM_ROOT_LCL/LICENSE.TXT \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
echo "cp -P $backwardsCompatibleSymlink \$RPM_BUILD_ROOT/$ROCM_INSTALL_PATH" >> $specFile
|
||||
echo "cp -R $distBin \$RPM_BUILD_ROOT/$installPath" >> $specFile
|
||||
echo "cp -R $distInc \$RPM_BUILD_ROOT/$installPath" >> $specFile
|
||||
echo "cp -R $distLib \$RPM_BUILD_ROOT/$installPath" >> $specFile
|
||||
if [ "$BUILD_MANPAGES" == "ON" ]; then
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
for i in "${man_pages[@]}"; do
|
||||
echo "gzip -f $distMan/man1/$i" >> $specFile
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
echo "ln -sf clang.1.gz \"$distMan/man1/$i\"" >> $specFile
|
||||
done
|
||||
fi
|
||||
fi
|
||||
for i in "${man_pages[@]}"; do
|
||||
echo "gzip -f $distMan/man1/$i" >> $specFile
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
echo "ln -sf clang.1.gz \"$distMan/man1/$i\"" >> $specFile
|
||||
done
|
||||
fi
|
||||
fi
|
||||
echo "cp -R $distMan \$RPM_BUILD_ROOT/$installPath/share" >> $specFile
|
||||
|
||||
@@ -676,25 +610,20 @@ package_lightning_dynamic(){
|
||||
echo "$ROCM_INSTALL_PATH" >> $specFile
|
||||
|
||||
echo "%post" >> $specFile
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "mkdir -p \"$ROCM_INSTALL_PATH/bin\"" >> $specFile
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "ln -sf ../lib/llvm/bin/$i \"$ROCM_INSTALL_PATH/bin/$i\"" >> $specFile
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo "mkdir -p \"$ROCM_INSTALL_PATH/bin\"" >> $specFile
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "ln -sf ../lib/llvm/bin/$i \"$ROCM_INSTALL_PATH/bin/$i\"" >> $specFile
|
||||
fi
|
||||
done
|
||||
|
||||
echo "%preun" >> $specFile
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "rm -f \"$ROCM_INSTALL_PATH/bin/$i\"" >> $specFile
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"$ROCM_INSTALL_PATH/bin\"" >> $specFile
|
||||
fi
|
||||
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "rm -f \"$ROCM_INSTALL_PATH/bin/$i\"" >> $specFile
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"$ROCM_INSTALL_PATH/bin\"" >> $specFile
|
||||
echo "%postun" >> $specFile
|
||||
|
||||
rpmbuild --define "_topdir $packageRpm" -ba $specFile
|
||||
@@ -711,32 +640,17 @@ package_lightning_static() {
|
||||
get_llvm_version
|
||||
local llvmParsedVersion="${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}"
|
||||
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
local packageName="rocm-llvm-alt"
|
||||
local packageSummary="Proprietary ROCm core compiler"
|
||||
local packageSummaryLong="ROCm core compiler, including proprietary optimizations based on LLVM $llvmParsedVersion"
|
||||
if [ "$PACKAGEEXT" = "deb" ]; then
|
||||
local packageNameExtra="rocm-llvm-alt-dev"
|
||||
else
|
||||
local packageNameExtra="rocm-llvm-alt-devel"
|
||||
fi
|
||||
local packageSummaryExtra="Proprietary ROCm compiler dev tools"
|
||||
local packageSummaryLongExtra="ROCm compiler dev tools and documentation, including proprietary optimizations, based on LLVM $llvmParsedVersion"
|
||||
local installPath="$ROCM_INSTALL_PATH/lib/llvm/alt"
|
||||
local packageName="rocm-llvm"
|
||||
local packageSummary="ROCm core compiler"
|
||||
local packageSummaryLong="ROCm core compiler based on LLVM $llvmParsedVersion"
|
||||
if [ "$PACKAGEEXT" = "deb" ]; then
|
||||
local packageNameExtra="rocm-llvm-dev"
|
||||
else
|
||||
local packageName="rocm-llvm"
|
||||
local packageSummary="ROCm core compiler"
|
||||
local packageSummaryLong="ROCm core compiler based on LLVM $llvmParsedVersion"
|
||||
if [ "$PACKAGEEXT" = "deb" ]; then
|
||||
local packageNameExtra="rocm-llvm-dev"
|
||||
else
|
||||
local packageNameExtra="rocm-llvm-devel"
|
||||
fi
|
||||
local packageSummaryExtra="ROCm compiler dev tools"
|
||||
local packageSummaryLongExtra="ROCm compiler dev tools and documentation, based on LLVM $llvmParsedVersion"
|
||||
local installPath="$ROCM_INSTALL_PATH/lib/llvm/"
|
||||
|
||||
local packageNameExtra="rocm-llvm-devel"
|
||||
fi
|
||||
local packageSummaryExtra="ROCm compiler dev tools"
|
||||
local packageSummaryLongExtra="ROCm compiler dev tools and documentation, based on LLVM $llvmParsedVersion"
|
||||
local installPath="$ROCM_INSTALL_PATH/lib/llvm/"
|
||||
|
||||
local packageArch="amd64"
|
||||
local packageVersion="${llvmParsedVersion}.${LLVM_COMMIT_GITDATE}"
|
||||
@@ -746,7 +660,6 @@ package_lightning_static() {
|
||||
local distLib="$INSTALL_PATH/lib"
|
||||
local distMan="$INSTALL_PATH/share/man"
|
||||
local licenseDir="$ROCM_INSTALL_PATH/share/doc/$packageName"
|
||||
local licenseDirExtra="$ROCM_INSTALL_PATH/share/doc/$packageNameExtra"
|
||||
local packageDir="$BUILD_PATH/package"
|
||||
local backwardsCompatibleSymlink="$ROCM_INSTALL_PATH/llvm"
|
||||
|
||||
@@ -756,9 +669,6 @@ package_lightning_static() {
|
||||
local prermFile="$packageDeb/DEBIAN/prerm"
|
||||
local specFile="$packageDir/$packageName.spec"
|
||||
local debDependencies="python3, libc6, libstdc++6|libstdc++8, libstdc++-5-dev|libstdc++-7-dev|libstdc++-11-dev, libgcc-5-dev|libgcc-7-dev|libgcc-11-dev, rocm-core"
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
debDependencies="${debDependencies}, rocm-llvm"
|
||||
fi
|
||||
local debRecommends="gcc, g++, gcc-multilib, g++-multilib"
|
||||
|
||||
local packageRpm="$packageDir/rpm"
|
||||
@@ -767,10 +677,6 @@ package_lightning_static() {
|
||||
local specFileExtra="$packageDir/$packageNameExtra.spec"
|
||||
local rpmRequires="rocm-core"
|
||||
local rpmRequiresExtra="rocm-core, $packageName"
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
rpmRequires+=", rocm-llvm"
|
||||
rpmRequiresExtra+=", rocm-llvm-devel"
|
||||
fi
|
||||
local rpmRecommends="gcc, gcc-c++, devtoolset-7-gcc-c++"
|
||||
|
||||
rm -rf "$packageDir"
|
||||
@@ -807,12 +713,7 @@ package_lightning_static() {
|
||||
mkdir -p "$DEB_PATH"
|
||||
mkdir -p "$packageDeb/$licenseDir"
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ] ; then
|
||||
cp -r "$LLVM_ROOT_LCL/LICENSE.TXT" "$packageDeb/$licenseDir"
|
||||
else
|
||||
cp -r "$LLVM_PROJECT_ALT_ROOT/EULA" "$packageDeb/$licenseDir"
|
||||
cp -r "$LLVM_PROJECT_ALT_ROOT/DISCLAIMER.txt" "$packageDeb/$licenseDir"
|
||||
fi
|
||||
cp -r "$LLVM_ROOT_LCL/LICENSE.TXT" "$packageDeb/$licenseDir"
|
||||
|
||||
mkdir -p "$packageDeb/$installPath/bin"
|
||||
for i in "${core_bin[@]}"; do
|
||||
@@ -838,36 +739,32 @@ package_lightning_static() {
|
||||
done
|
||||
|
||||
if [ "$BUILD_MANPAGES" == "ON" ]; then
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
mkdir -p "$packageDeb/$installPath/share/man1"
|
||||
for i in "${core_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
gzip -f "$distMan/man1/$i"
|
||||
cp -d "$distMan/man1/${i}.gz" "$packageDeb/$installPath/share/man1/"
|
||||
fi
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
ln -sf "clang.1.gz" "$distMan/man1/$i"
|
||||
cp -d "$distMan/man1/${i}" "$packageDeb/$installPath/share/man1/"
|
||||
done
|
||||
mkdir -p "$packageDeb/$installPath/share/man1"
|
||||
for i in "${core_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
gzip -f "$distMan/man1/$i"
|
||||
cp -d "$distMan/man1/${i}.gz" "$packageDeb/$installPath/share/man1/"
|
||||
fi
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
ln -sf "clang.1.gz" "$distMan/man1/$i"
|
||||
cp -d "$distMan/man1/${i}" "$packageDeb/$installPath/share/man1/"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
touch "$postinstFile" "$prermFile"
|
||||
echo "mkdir -p \"$ROCM_INSTALL_PATH/bin\"" >> $postinstFile
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$packageDeb/$installPath/bin/$i" ]; then
|
||||
echo "ln -s \"../lib/llvm/bin/$i\" \"$ROCM_INSTALL_PATH/bin/$i\"" >> $postinstFile
|
||||
echo "rm -f \"$ROCM_INSTALL_PATH/bin/$i\"" >> $prermFile
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"$ROCM_INSTALL_PATH/bin\"" >> $prermFile
|
||||
chmod 0555 "$postinstFile" "$prermFile"
|
||||
cp -P "$backwardsCompatibleSymlink" "$packageDeb/$ROCM_INSTALL_PATH"
|
||||
fi
|
||||
touch "$postinstFile" "$prermFile"
|
||||
echo "mkdir -p \"$ROCM_INSTALL_PATH/bin\"" >> $postinstFile
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$packageDeb/$installPath/bin/$i" ]; then
|
||||
echo "ln -s \"../lib/llvm/bin/$i\" \"$ROCM_INSTALL_PATH/bin/$i\"" >> $postinstFile
|
||||
echo "rm -f \"$ROCM_INSTALL_PATH/bin/$i\"" >> $prermFile
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"$ROCM_INSTALL_PATH/bin\"" >> $prermFile
|
||||
chmod 0555 "$postinstFile" "$prermFile"
|
||||
cp -P "$backwardsCompatibleSymlink" "$packageDeb/$ROCM_INSTALL_PATH"
|
||||
|
||||
{
|
||||
echo "Package: $packageName"
|
||||
@@ -892,14 +789,6 @@ package_lightning_static() {
|
||||
mkdir -p "$packageDeb/$installPath"
|
||||
mkdir "${controlFile%/*}"
|
||||
mkdir -p "$DEB_PATH"
|
||||
mkdir -p "$packageDeb/$licenseDirExtra"
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ] ; then
|
||||
cp -r "$LLVM_ROOT_LCL/LICENSE.TXT" "$packageDeb/$licenseDirExtra"
|
||||
else
|
||||
cp -r "$LLVM_PROJECT_ALT_ROOT/EULA" "$packageDeb/$licenseDirExtra"
|
||||
cp -r "$LLVM_PROJECT_ALT_ROOT/DISCLAIMER.txt" "$packageDeb/$licenseDirExtra"
|
||||
fi
|
||||
|
||||
mkdir -p "$packageDeb/$installPath/bin"
|
||||
for i in "$distBin"/*; do
|
||||
@@ -922,21 +811,16 @@ package_lightning_static() {
|
||||
fi
|
||||
|
||||
if [ "$BUILD_MANPAGES" == "ON" ]; then
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
mkdir -p "$packageDeb/$installPath/share/man1"
|
||||
for i in "${dev_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
gzip -f "$distMan/man1/$i"
|
||||
cp -d "$distMan/man1/${i}.gz" "$packageDeb/$installPath/share/man1/"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
mkdir -p "$packageDeb/$installPath/share/man1"
|
||||
for i in "${dev_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
gzip -f "$distMan/man1/$i"
|
||||
cp -d "$distMan/man1/${i}.gz" "$packageDeb/$installPath/share/man1/"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
debDependencies="${debDependencies}, ${packageName}"
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
debDependencies="${debDependencies}, rocm-llvm-dev"
|
||||
fi
|
||||
|
||||
echo "Package: $packageNameExtra" > $controlFile
|
||||
echo "Architecture: $packageArch" >> $controlFile
|
||||
@@ -979,13 +863,8 @@ package_lightning_static() {
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/bin" >> $specFile
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "cp -R $LLVM_ROOT_LCL/LICENSE.TXT \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
echo "cp -P $backwardsCompatibleSymlink \$RPM_BUILD_ROOT/$ROCM_INSTALL_PATH" >> $specFile
|
||||
else
|
||||
echo "cp -R $LLVM_PROJECT_ALT_ROOT/EULA \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
echo "cp -R $LLVM_PROJECT_ALT_ROOT/DISCLAIMER.txt \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
fi
|
||||
echo "cp -R $LLVM_ROOT_LCL/LICENSE.TXT \$RPM_BUILD_ROOT/$licenseDir" >> $specFile
|
||||
echo "cp -P $backwardsCompatibleSymlink \$RPM_BUILD_ROOT/$ROCM_INSTALL_PATH" >> $specFile
|
||||
|
||||
for i in "${core_bin[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
@@ -995,9 +874,7 @@ package_lightning_static() {
|
||||
|
||||
echo "cp -d \"$distBin/flang\" \$RPM_BUILD_ROOT/$installPath/bin/" >> $specFile
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "cp -d \"$distBin\"/*.cfg \$RPM_BUILD_ROOT/$installPath/bin/" >> $specFile
|
||||
fi
|
||||
echo "cp -d \"$distBin\"/*.cfg \$RPM_BUILD_ROOT/$installPath/bin/" >> $specFile
|
||||
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/lib/clang" >> $specFile
|
||||
echo "cp -R \"$distLib/clang/\" \$RPM_BUILD_ROOT/$installPath/lib/" >> $specFile
|
||||
@@ -1014,20 +891,18 @@ package_lightning_static() {
|
||||
done
|
||||
|
||||
if [ "$BUILD_MANPAGES" == "ON" ]; then
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/share/man/man1" >> $specFile
|
||||
for i in "${core_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
echo "gzip -f $distMan/man1/$i" >> $specFile
|
||||
echo "cp -d $distMan/man1/${i}.gz \$RPM_BUILD_ROOT/$installPath/share/man/man1/" >> $specFile
|
||||
fi
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
echo "ln -sf clang.1.gz \"$distMan/man1/$i\"" >> $specFile
|
||||
echo "cp -d $distMan/man1/${i} \$RPM_BUILD_ROOT/$installPath/share/man/man1/" >> $specFile
|
||||
done
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/share/man/man1" >> $specFile
|
||||
for i in "${core_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
echo "gzip -f $distMan/man1/$i" >> $specFile
|
||||
echo "cp -d $distMan/man1/${i}.gz \$RPM_BUILD_ROOT/$installPath/share/man/man1/" >> $specFile
|
||||
fi
|
||||
done
|
||||
if [ -f "$distMan/man1/clang.1.gz" ]; then
|
||||
for i in "${amd_man_pages[@]}"; do
|
||||
echo "ln -sf clang.1.gz \"$distMan/man1/$i\"" >> $specFile
|
||||
echo "cp -d $distMan/man1/${i} \$RPM_BUILD_ROOT/$installPath/share/man/man1/" >> $specFile
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -1039,24 +914,20 @@ package_lightning_static() {
|
||||
echo "$ROCM_INSTALL_PATH"
|
||||
|
||||
echo "%post"
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "mkdir -p \"$ROCM_INSTALL_PATH/bin\""
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "ln -sf ../lib/llvm/bin/$i \"$ROCM_INSTALL_PATH/bin/$i\""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo "mkdir -p \"\$RPM_INSTALL_PREFIX0/bin\""
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "ln -sf ../lib/llvm/bin/$i \"\$RPM_INSTALL_PREFIX0/bin/$i\""
|
||||
fi
|
||||
done
|
||||
|
||||
echo "%preun"
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "rm -f \"$ROCM_INSTALL_PATH/bin/$i\""
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"$ROCM_INSTALL_PATH/bin\""
|
||||
fi
|
||||
for i in "${amd_compiler_commands[@]}"; do
|
||||
if [ -f "$distBin/$i" ]; then
|
||||
echo "rm -f \"\$RPM_INSTALL_PREFIX0/bin/$i\""
|
||||
fi
|
||||
done
|
||||
echo "rmdir --ignore-fail-on-non-empty \"\$RPM_INSTALL_PREFIX0/bin\""
|
||||
|
||||
echo "%postun"
|
||||
} >> "$specFile"
|
||||
@@ -1071,16 +942,13 @@ package_lightning_static() {
|
||||
echo "Release: ${JOB_DESIGNATOR}${SLES_BUILD_ID_PREFIX}${BUILD_ID}%{?dist}" >> $specFileExtra
|
||||
echo "Summary: $packageSummaryExtra" >> $specFileExtra
|
||||
echo "Group: System Environment/Libraries" >> $specFileExtra
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
echo "License: AMD Proprietary" >> $specFileExtra
|
||||
else
|
||||
echo "License: ASL 2.0 with exceptions" >> $specFileExtra
|
||||
fi
|
||||
echo "License: ASL 2.0 with exceptions" >> $specFileExtra
|
||||
echo "Prefix: $ROCM_INSTALL_PATH" >> $specFileExtra
|
||||
echo "Requires: $rpmRequiresExtra" >> $specFileExtra
|
||||
|
||||
if [ $BUILD_ALT -eq 1 ]; then
|
||||
echo "%define _build_id_links none" >> $specFileExtra
|
||||
fi
|
||||
# The following is commented as Centos 7 has a version of rpm
|
||||
# that does not understand it. When we no longer support Centos 7
|
||||
# then we should have a correct recommends line.
|
||||
#echo "Recommends: $rpmRecommends" >> $specFileExtra
|
||||
|
||||
echo "%description" >> $specFileExtra
|
||||
echo "$packageSummaryLongExtra" >> $specFileExtra
|
||||
@@ -1093,15 +961,8 @@ package_lightning_static() {
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/bin" >> $specFileExtra
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/include" >> $specFileExtra
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/lib" >> $specFileExtra
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$licenseDirExtra" >> $specFileExtra
|
||||
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "cp -R $LLVM_ROOT_LCL/LICENSE.TXT \$RPM_BUILD_ROOT/$licenseDirExtra" >> $specFileExtra
|
||||
echo "cp -P $backwardsCompatibleSymlink \$RPM_BUILD_ROOT/$ROCM_INSTALL_PATH" >> $specFileExtra
|
||||
else
|
||||
echo "cp -R $LLVM_PROJECT_ALT_ROOT/EULA \$RPM_BUILD_ROOT/$licenseDirExtra" >> $specFileExtra
|
||||
echo "cp -R $LLVM_PROJECT_ALT_ROOT/DISCLAIMER.txt \$RPM_BUILD_ROOT/$licenseDirExtra" >> $specFileExtra
|
||||
fi
|
||||
echo "cp -P $backwardsCompatibleSymlink \$RPM_BUILD_ROOT/$ROCM_INSTALL_PATH" >> $specFileExtra
|
||||
|
||||
for i in "$distBin"/*; do
|
||||
bin=$(basename "$i")
|
||||
@@ -1122,15 +983,13 @@ package_lightning_static() {
|
||||
fi
|
||||
|
||||
if [ "$BUILD_MANPAGES" == "ON" ]; then
|
||||
if [ $BUILD_ALT -eq 0 ]; then
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/share/man/man1" >> $specFileExtra
|
||||
for i in "${extra_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
echo "gzip -f $distMan/man1/$i" >> $specFileExtra
|
||||
echo "cp -d \"$distMan/man1/${i}.gz\" \$RPM_BUILD_ROOT/$installPath/share/man/man1/" >> $specFileExtra
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT/$installPath/share/man/man1" >> $specFileExtra
|
||||
for i in "${dev_man_pages[@]}"; do
|
||||
if [ -f "$distMan/man1/$i" ]; then
|
||||
echo "gzip -f $distMan/man1/$i" >> $specFileExtra
|
||||
echo "cp -d \"$distMan/man1/${i}.gz\" \$RPM_BUILD_ROOT/$installPath/share/man/man1/" >> $specFileExtra
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "%clean" >> $specFileExtra
|
||||
@@ -1266,9 +1125,7 @@ print_output_directory() {
|
||||
build() {
|
||||
mkdir -p "${INSTALL_PATH}"
|
||||
build_lightning
|
||||
if [ $BUILD_ALT -eq 0 ] ; then
|
||||
create_compiler_config_files
|
||||
fi
|
||||
create_compiler_config_files
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
|
||||
@@ -12,6 +12,10 @@ RPM_PATH=$PACKAGE_DIR
|
||||
build_miopen_hip() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
git config --global --add safe.directory "$COMPONENT_SRC"
|
||||
checkout_lfs
|
||||
@@ -26,10 +30,12 @@ build_miopen_hip() {
|
||||
cmake \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DMIOPEN_BACKEND=HIP \
|
||||
-DMIOPEN_OFFLINE_COMPILER_PATHS_V2=1 \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH};${ROCM_PATH}/hip;${HOME}/miopen-deps" \
|
||||
-DHIP_OC_COMPILER="${ROCM_PATH}/bin/clang-ocl" \
|
||||
-DMIOPEN_TEST_DISCRETE=OFF \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
cmake --build "$BUILD_DIR" -- -j${PROC}
|
||||
|
||||
@@ -9,6 +9,10 @@ BUILD_DEV=ON
|
||||
build_mivisionx() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
mkdir -p $BUILD_DIR && cd $BUILD_DIR
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
@@ -21,7 +25,7 @@ build_mivisionx() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100"
|
||||
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
cmake \
|
||||
@@ -40,8 +44,7 @@ build_mivisionx() {
|
||||
cpack -G ${PKGTYPE^^}
|
||||
|
||||
rm -rf _CPack_Packages/ && find -name '*.o' -delete
|
||||
mkdir -p $PACKAGE_DIR
|
||||
cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
|
||||
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
|
||||
|
||||
show_build_cache_stats
|
||||
}
|
||||
|
||||
@@ -21,14 +21,15 @@ printUsage() {
|
||||
|
||||
return 0
|
||||
}
|
||||
PROJ_NAME="opencl-on-rocclr"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
|
||||
BUILD_PATH="$(getBuildPath opencl-on-rocclr)"
|
||||
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
|
||||
|
||||
TARGET="build"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_DEB="$PACKAGE_ROOT/deb/opencl-on-rocclr"
|
||||
PACKAGE_RPM="$PACKAGE_ROOT/rpm/opencl-on-rocclr"
|
||||
PACKAGE_DEB="$PACKAGE_ROOT/deb/$PROJ_NAME"
|
||||
PACKAGE_RPM="$PACKAGE_ROOT/rpm/$PROJ_NAME"
|
||||
CORE_BUILD_DIR="$(getBuildPath hsa-core)"
|
||||
ROCclr_BUILD_DIR="$(getBuildPath rocclr)"
|
||||
BUILD_TYPE="Debug"
|
||||
@@ -54,7 +55,7 @@ do
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
ack_and_skip_static ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
--) shift; break;;
|
||||
@@ -148,7 +149,7 @@ print_output_directory() {
|
||||
case $TARGET in
|
||||
(clean) clean_opencl_on_rocclr ;;
|
||||
(build) build_opencl_on_rocclr ; package_opencl_on_rocclr ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ printUsage() {
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param for configuring package deps"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
@@ -23,20 +24,25 @@ printUsage() {
|
||||
return 0
|
||||
}
|
||||
|
||||
packageMajorVersion="17.60"
|
||||
PROJ_NAME="openmp-extras"
|
||||
packageMajorVersion="18.63"
|
||||
packageMinorVersion="0"
|
||||
packageVersion="${packageMajorVersion}.${packageMinorVersion}.${ROCM_LIBPATCH_VERSION}"
|
||||
BUILD_PATH="$(getBuildPath openmp-extras)"
|
||||
DEB_PATH="$(getDebPath openmp-extras)"
|
||||
RPM_PATH="$(getRpmPath openmp-extras)"
|
||||
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
|
||||
DEB_PATH="$(getDebPath $PROJ_NAME)"
|
||||
RPM_PATH="$(getRpmPath $PROJ_NAME)"
|
||||
TARGET="build"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
STATIC_PKG_DEPS="OFF"
|
||||
|
||||
export INSTALL_PREFIX=${ROCM_INSTALL_PATH}
|
||||
|
||||
while [ "$1" != "" ];
|
||||
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,address_sanitizer,static,outdir,package: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case $1 in
|
||||
case "$1" in
|
||||
-c | --clean )
|
||||
TARGET="clean" ;;
|
||||
-p | --package )
|
||||
@@ -52,8 +58,11 @@ do
|
||||
export SANITIZER=1 ;;
|
||||
-o | --outdir )
|
||||
shift 1; PKGTYPE=$1 ; TARGET="outdir" ;;
|
||||
-s | --static )
|
||||
export STATIC_PKG_DEPS="ON" ;;
|
||||
-h | --help )
|
||||
printUsage ; exit 0 ;;
|
||||
--) shift; break;;
|
||||
*)
|
||||
MAKEARG=$@ ; break ;;
|
||||
esac
|
||||
@@ -124,6 +133,23 @@ build_openmp_extras() {
|
||||
export AOMP_JENKINS_BUILD_LIST="extras openmp pgmath flang flang_runtime"
|
||||
echo "BEGIN Build of openmp-extras"
|
||||
"$AOMP_REPOS"/aomp/bin/build_aomp.sh $MAKEARG
|
||||
|
||||
local llvm_ver=`$INSTALL_PREFIX/lib/llvm/bin/clang --print-resource-dir | sed 's^/llvm/lib/clang/^ ^' | awk '{print $2}'`
|
||||
if [ ! -e $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp.h ] ; then
|
||||
if [ ! -h $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp.h ] ; then
|
||||
ln -s ../../../../include/omp.h $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp.h
|
||||
fi
|
||||
fi
|
||||
if [ ! -e $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/ompt.h ] ; then
|
||||
if [ ! -h $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/ompt.h ] ; then
|
||||
ln -s ../../../../include/ompt.h $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/ompt.h
|
||||
fi
|
||||
fi
|
||||
if [ ! -e $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp-tools.h ] ; then
|
||||
if [ ! -h $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp-tools.h ] ; then
|
||||
ln -s ../../../../include/omp-tools.h $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp-tools.h
|
||||
fi
|
||||
fi
|
||||
popd
|
||||
}
|
||||
|
||||
@@ -133,20 +159,30 @@ package_openmp_extras_deb() {
|
||||
local packageArch="amd64"
|
||||
local packageMaintainer="Openmp Extras Support <openmp-extras.support@amd.com>"
|
||||
local packageSummary="OpenMP Extras provides openmp and flang libraries."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 15 and is used for offloading to Radeon GPUs."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 17 and is used for offloading to Radeon GPUs."
|
||||
local debDependencies="rocm-llvm, rocm-device-libs, rocm-core"
|
||||
local debRecommends="gcc, g++"
|
||||
local controlFile="$packageDeb/openmp-extras/DEBIAN/control"
|
||||
|
||||
if [ "$packageName" == "openmp-extras-runtime" ]; then
|
||||
packageType="runtime"
|
||||
debDependencies="rocm-core, hsa-rocr"
|
||||
if [ "$STATIC_PKG_DEPS" == "OFF" ]; then
|
||||
debDependencies="rocm-core, hsa-rocr"
|
||||
else
|
||||
echo "static package dependency configuration for runtime" ;
|
||||
debDependencies="rocm-core, hsa-rocr-static-dev"
|
||||
fi
|
||||
else
|
||||
local debProvides="openmp-extras"
|
||||
local debConflicts="openmp-extras"
|
||||
local debReplaces="openmp-extras"
|
||||
packageType="devel"
|
||||
debDependencies="$debDependencies, openmp-extras-runtime, hsa-rocr-dev"
|
||||
if [ "$STATIC_PKG_DEPS" == "OFF" ]; then
|
||||
debDependencies="$debDependencies, openmp-extras-runtime, hsa-rocr-dev"
|
||||
else
|
||||
echo "Enabled static package dependency configuration for dev" ;
|
||||
debDependencies="$debDependencies, openmp-extras-runtime, hsa-rocr-static-dev"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "$BUILD_PATH"/build/installed_files.txt ] && [ ! -d "$INSTALL_PREFIX"/openmp-extras/devel ]; then
|
||||
@@ -209,6 +245,9 @@ package_openmp_extras_deb() {
|
||||
cp -r "$AOMP_REPOS"/aomp/examples/fortran "$packageDeb"/openmp-extras"$copyPath"/share/openmp-extras/examples
|
||||
cp -r "$AOMP_REPOS"/aomp/examples/openmp "$packageDeb"/openmp-extras"$copyPath"/share/openmp-extras/examples
|
||||
cp -r "$AOMP_REPOS"/aomp/examples/tools "$packageDeb"/openmp-extras"$copyPath"/share/openmp-extras/examples
|
||||
if [ -e "$AOMP_REPOS/aomp/examples/Makefile.help" ]; then
|
||||
cp "$AOMP_REPOS"/aomp/examples/Makefile* "$packageDeb"/openmp-extras"$copyPath"/share/openmp-extras/examples
|
||||
fi
|
||||
clean_examples "$packageDeb"/openmp-extras"$copyPath"/share/openmp-extras/examples
|
||||
fi
|
||||
|
||||
@@ -260,7 +299,7 @@ package_openmp_extras_asan_deb() {
|
||||
local packageArch="amd64"
|
||||
local packageMaintainer="Openmp Extras Support <openmp-extras.support@amd.com>"
|
||||
local packageSummary="AddressSanitizer OpenMP Extras provides instrumented openmp and flang libraries."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 15 and is used for offloading to Radeon GPUs."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 17 and is used for offloading to Radeon GPUs."
|
||||
local debDependencies="hsa-rocr-asan, rocm-core-asan"
|
||||
local debRecommends="gcc, g++"
|
||||
local controlFile="$packageDeb/openmp-extras/DEBIAN/control"
|
||||
@@ -317,23 +356,26 @@ package_openmp_extras_rpm() {
|
||||
local packageRpm="$packageDir/rpm"
|
||||
local specFile="$packageDir/$packageName.spec"
|
||||
local packageSummary="OpenMP Extras provides openmp and flang libraries."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 15 and is used for offloading to Radeon GPUs."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 17 and is used for offloading to Radeon GPUs."
|
||||
local rpmRequires="rocm-llvm, rocm-device-libs, rocm-core"
|
||||
if [ "$packageName" == "openmp-extras-runtime" ]; then
|
||||
packageType="runtime"
|
||||
rpmRequires="rocm-core, hsa-rocr"
|
||||
if [ "$STATIC_PKG_DEPS" == "OFF" ]; then
|
||||
rpmRequires="rocm-core, hsa-rocr"
|
||||
else
|
||||
rpmRequires="rocm-core, hsa-rocr-static-devel"
|
||||
fi
|
||||
else
|
||||
local rpmProvides="openmp-extras"
|
||||
local rpmObsoletes="openmp-extras"
|
||||
packageType="devel"
|
||||
rpmRequires="$rpmRequires, openmp-extras-runtime, hsa-rocr-devel"
|
||||
if [ "$STATIC_PKG_DEPS" == "OFF" ]; then
|
||||
rpmRequires="$rpmRequires, openmp-extras-runtime, hsa-rocr-devel"
|
||||
else
|
||||
rpmRequires="$rpmRequires, openmp-extras-runtime, hsa-rocr-static-devel"
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -f "$AOMP_REPOS"/aomp/examples/*.sh
|
||||
rm -f "$AOMP_REPOS"/aomp/examples/fortran/*.sh
|
||||
rm -f "$AOMP_REPOS"/aomp/examples/openmp/*.sh
|
||||
|
||||
|
||||
if [ "$packageType" == "runtime" ]; then
|
||||
rm -rf "$packageDir"
|
||||
rm -rf "$RPM_PATH"
|
||||
@@ -354,6 +396,7 @@ package_openmp_extras_rpm() {
|
||||
echo "Group: System Environment/Libraries"
|
||||
echo "License: MIT and ASL 2.0 and ASL 2.0 with exceptions"
|
||||
echo "Vendor: Advanced Micro Devices, Inc."
|
||||
echo "Prefix: $INSTALL_PREFIX"
|
||||
echo "Requires: $rpmRequires"
|
||||
echo "%if %is_devel"
|
||||
echo "Provides: $rpmProvides"
|
||||
@@ -435,6 +478,9 @@ package_openmp_extras_rpm() {
|
||||
echo " cp -r $AOMP_REPOS/aomp/examples/fortran \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples"
|
||||
echo " cp -r $AOMP_REPOS/aomp/examples/openmp \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples"
|
||||
echo " cp -r $AOMP_REPOS/aomp/examples/tools \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples"
|
||||
if [ -e "$AOMP_REPOS/aomp/examples/Makefile.help" ]; then
|
||||
echo " cp $AOMP_REPOS/aomp/examples/Makefile* \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples"
|
||||
fi
|
||||
clean_examples \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples
|
||||
echo "%endif"
|
||||
echo "%clean"
|
||||
@@ -461,7 +507,7 @@ package_openmp_extras_asan_rpm() {
|
||||
local packageRpm="$packageDir/rpm"
|
||||
local specFile="$packageDir/$packageName.spec"
|
||||
local packageSummary="AddressSanitizer OpenMP Extras provides instrumented openmp and flang libraries."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 15 and is used for offloading to Radeon GPUs."
|
||||
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 17 and is used for offloading to Radeon GPUs."
|
||||
local rpmRequires="hsa-rocr-asan, rocm-core-asan"
|
||||
local asanLibDir="runtime"
|
||||
|
||||
@@ -527,7 +573,6 @@ package_openmp_extras_asan_rpm() {
|
||||
mv $packageRpm/RPMS/x86_64/*.rpm $RPM_PATH
|
||||
}
|
||||
|
||||
|
||||
package_openmp_extras() {
|
||||
local DISTRO_NAME=$(cat /etc/os-release | grep -e ^NAME=)
|
||||
local installPath="$ROCM_INSTALL_PATH/lib/llvm"
|
||||
@@ -563,21 +608,23 @@ package_tests_deb(){
|
||||
local packageArch="amd64"
|
||||
local packageMaintainer="Openmp Extras Support <openmp-extras.support@amd.com>"
|
||||
local packageSummary="Tests for openmp-extras."
|
||||
local packageSummaryLong="Tests for openmp-extras $packageMajorVersion-$packageMinorVersion is based on LLVM 15 and is used for offloading to Radeon GPUs."
|
||||
local debDependencies="rocm-core"
|
||||
local debRecommends="gcc, g++"
|
||||
local packageSummaryLong="Tests for openmp-extras $packageMajorVersion-$packageMinorVersion is based on LLVM 17 and is used for offloading to Radeon GPUs."
|
||||
local debDependencies="openmp-extras-dev, rocm-core"
|
||||
local debRecommends=""
|
||||
local controlFile="$packageDeb/openmp-extras/DEBIAN/control"
|
||||
local installPath="$ROCM_INSTALL_PATH/share/openmp-extras/tests"
|
||||
local packageName="openmp-extras-tests"
|
||||
|
||||
rm -rf "$packageDir"
|
||||
|
||||
mkdir -p $packageDeb/openmp-extras$installPath; mkdir -p $packageDeb/openmp-extras$copyPath/bin
|
||||
mkdir -p $packageDeb/openmp-extras"$installPath"
|
||||
if [ -e $(dirname $controlFile) ]; then
|
||||
rm $(dirname $controlFile)
|
||||
fi
|
||||
mkdir -p "$(dirname $controlFile)"
|
||||
cp -r "$AOMP_REPOS/aomp/test/smoke" "$packageDeb$installPath"
|
||||
cp -r "$AOMP_REPOS/aomp/." "$packageDeb/openmp-extras/$installPath"
|
||||
rm -rf "$packageDeb"/openmp-extras"$installPath"/.git "$packageDeb"/openmp-extras"$installPath"/.github
|
||||
cp "$OUT_DIR/build/lightning/bin/FileCheck" "$packageDeb/openmp-extras/$installPath/bin"
|
||||
{
|
||||
echo "Package: $packageName"
|
||||
echo "Architecture: $packageArch"
|
||||
@@ -603,11 +650,12 @@ package_tests_rpm(){
|
||||
local packageName="openmp-extras-tests"
|
||||
local specFile="$packageDir/$packageName.spec"
|
||||
local packageSummary="Tests for openmp-extras."
|
||||
local packageSummaryLong="Tests for openmp-extras $packageVersion is based on LLVM 15 and is used for offloading to Radeon GPUs."
|
||||
local packageSummaryLong="Tests for openmp-extras $packageVersion is based on LLVM 18 and is used for offloading to Radeon GPUs."
|
||||
|
||||
rm -rf "$packageDir"
|
||||
mkdir -p "$packageRpm$installPath"
|
||||
mkdir -p "$packageRpm/openmp-extras/$installPath"
|
||||
{
|
||||
echo "AutoReqProv: no"
|
||||
echo "Name: $packageName"
|
||||
echo "Version: $packageVersion"
|
||||
echo "Release: ${CPACK_RPM_PACKAGE_RELEASE}%{?dist}"
|
||||
@@ -615,7 +663,10 @@ package_tests_rpm(){
|
||||
echo "Group: System Environment/Libraries"
|
||||
echo "License: Advanced Micro Devices, Inc."
|
||||
echo "Vendor: Advanced Micro Devices, Inc."
|
||||
echo "Prefix: $INSTALL_PREFIX"
|
||||
echo "Requires: $rpmRequires"
|
||||
echo "%define debug_package %{nil}"
|
||||
# Redefining __os_install_post to remove stripping
|
||||
echo "%define __os_install_post %{nil}"
|
||||
echo "%description"
|
||||
echo "$packageSummaryLong"
|
||||
@@ -625,18 +676,21 @@ package_tests_rpm(){
|
||||
echo "%build"
|
||||
|
||||
echo "%install"
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT$copyPath/share/aomp/tests"
|
||||
echo "cp -R $AOMP_REPOS/aomp/test/smoke \$RPM_BUILD_ROOT$copyPath/share/aomp/tests"
|
||||
echo "mkdir -p \$RPM_BUILD_ROOT$installPath"
|
||||
echo "cp -R $AOMP_REPOS/aomp/. \$RPM_BUILD_ROOT$installPath"
|
||||
echo "rm -rf \$RPM_BUILD_ROOT$installPath/.git \$RPM_BUILD_ROOT$installPath/.github"
|
||||
echo "cp $OUT_DIR/build/lightning/bin/FileCheck \$RPM_BUILD_ROOT$installPath/bin"
|
||||
echo 'find $RPM_BUILD_ROOT \! -type d | sed "s|$RPM_BUILD_ROOT||"> files.list'
|
||||
|
||||
echo "%clean"
|
||||
echo "rm -rf \$RPM_BUILD_ROOT"
|
||||
|
||||
echo "%files -f files.list"
|
||||
echo "$installPath"
|
||||
echo "%defattr(-,root,root,-)"
|
||||
|
||||
echo "%postun"
|
||||
echo "rm -rf $ROCM_INSTALL_PATH/share/aomp"
|
||||
echo "rm -rf $installPath"
|
||||
} > $specFile
|
||||
rpmbuild --define "_topdir $packageRpm" -ba $specFile
|
||||
mv $packageRpm/RPMS/x86_64/*.rpm $RPM_PATH
|
||||
@@ -665,7 +719,7 @@ print_output_directory() {
|
||||
|
||||
case $TARGET in
|
||||
(clean) clean_openmp_extras ;;
|
||||
(build) build_openmp_extras; package_openmp_extras ;;
|
||||
(build) build_openmp_extras; package_openmp_extras; package_tests ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
@@ -10,6 +10,10 @@ ENABLE_ADDRESS_SANITIZER=false
|
||||
build_rccl() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
mkdir -p $ROCM_PATH/.info/
|
||||
echo $ROCM_VERSION | tee $ROCM_PATH/.info/version
|
||||
|
||||
@@ -23,7 +27,7 @@ build_rccl() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
@@ -13,7 +13,7 @@ printUsage() {
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
return 0
|
||||
@@ -41,7 +41,7 @@ RDC_PKG_NAME_ROOT="rdc"
|
||||
RDC_PKG_NAME="${RDC_PKG_NAME_ROOT}"
|
||||
GRPC_PROTOC_ROOT="${RDC_BUILD_DIR}/grpc"
|
||||
GRPC_SEARCH_ROOT="/usr/grpc"
|
||||
GRPC_DESIRED_VERSION="1.59.1" # do not include 'v'
|
||||
GRPC_DESIRED_VERSION="1.61.0"
|
||||
|
||||
RDC_LIB_RPATH='$ORIGIN'
|
||||
RDC_LIB_RPATH=$RDC_LIB_RPATH:'$ORIGIN/..'
|
||||
@@ -70,7 +70,7 @@ do
|
||||
(-d | --documentation )
|
||||
BUILD_DOCS="yes" ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
ack_and_skip_static ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
@@ -111,49 +111,20 @@ find_grpc() {
|
||||
GRPC_PROTOC_ROOT=$GRPC_SEARCH_ROOT
|
||||
}
|
||||
|
||||
build_grpc() {
|
||||
if find_grpc; then
|
||||
return 0
|
||||
fi
|
||||
echo "GRPC SEARCH FAILED! Building from scratch..."
|
||||
|
||||
mkdir -p $PACKAGE_ROOT/build
|
||||
pushd $PACKAGE_ROOT/build
|
||||
|
||||
if [ ! -d $PACKAGE_ROOT/build/grpc/.git ]; then
|
||||
git clone \
|
||||
--shallow-submodules \
|
||||
--recurse-submodules \
|
||||
$DASH_JAY \
|
||||
-b v${GRPC_DESIRED_VERSION} \
|
||||
--depth 1 \
|
||||
https://github.com/grpc/grpc
|
||||
fi
|
||||
|
||||
cd grpc
|
||||
mkdir -p cmake/build
|
||||
cd cmake/build
|
||||
|
||||
cmake \
|
||||
-DgRPC_INSTALL=ON \
|
||||
-DgRPC_BUILD_TESTS=OFF \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_INSTALL_PREFIX=${GRPC_PROTOC_ROOT} \
|
||||
../..
|
||||
cmake --build . -- $DASH_JAY
|
||||
cmake --build . -- install
|
||||
|
||||
cp ../../LICENSE ${GRPC_PROTOC_ROOT}
|
||||
popd
|
||||
}
|
||||
|
||||
rdc_backwards_compat_cmake_params() {
|
||||
grep -q "RDC_CLIENT_INSTALL_PREFIX" "$RDC_ROOT/CMakeLists.txt" &&
|
||||
echo "-DRDC_CLIENT_INSTALL_PREFIX=$PACKAGE_ROOT"
|
||||
}
|
||||
|
||||
build_rdc() {
|
||||
if ! find_grpc; then
|
||||
echo "ERROR: GRPC SEARCH FAILED!"
|
||||
echo "You are expected to have gRPC [${GRPC_DESIRED_VERSION}] in [${GRPC_SEARCH_ROOT}]"
|
||||
# Compiling gRPC as part of the RDC build takes too long and times out the build job
|
||||
return 1
|
||||
fi
|
||||
echo "gRPC [${GRPC_DESIRED_VERSION}] found!"
|
||||
|
||||
echo "Building RDC"
|
||||
echo "RDC_BUILD_DIR: ${RDC_BUILD_DIR}"
|
||||
echo "GRPC_PROTOC_ROOT: ${GRPC_PROTOC_ROOT}"
|
||||
@@ -226,7 +197,7 @@ verifyEnvSetup
|
||||
case $TARGET in
|
||||
(clean) clean_rdc ;;
|
||||
(clean_grpc) clean_grpc ;;
|
||||
(build) build_grpc; build_rdc ;;
|
||||
(build) build_rdc ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
@@ -7,11 +7,6 @@ set_component_src rocAL
|
||||
|
||||
build_rocal() {
|
||||
|
||||
if [ "$DISTRO_ID" = "mariner-2.0" ] ; then
|
||||
echo "Not building rocal for ${DISTRO_ID}. Exiting..."
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "Start build"
|
||||
|
||||
# Enable ASAN
|
||||
|
||||
@@ -10,6 +10,10 @@ set_component_src rocALUTION
|
||||
build_rocalution() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
|
||||
CXX="g++"
|
||||
@@ -27,7 +31,7 @@ build_rocalution() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
cmake \
|
||||
@@ -35,7 +39,6 @@ build_rocalution() {
|
||||
${LAUNCHER_FLAGS} \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DAMDGPU_TARGETS=${GPU_TARGETS} \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
-DBUILD_CLIENTS_SAMPLES=ON \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DBUILD_CLIENTS_BENCHMARKS=ON \
|
||||
|
||||
@@ -12,6 +12,11 @@ stage2_command_args "$@"
|
||||
build_rocblas() {
|
||||
echo "Start build"
|
||||
|
||||
SHARED_LIBS="ON"
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
SHARED_LIBS="OFF"
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
@@ -26,7 +31,7 @@ build_rocblas() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
@@ -36,8 +41,8 @@ build_rocblas() {
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DROCM_DIR="${ROCM_PATH}" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DCMAKE_PREFIX_PATH="${DEPS_DIR};${ROCM_PATH}" \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DBUILD_CLIENTS_BENCHMARKS=ON \
|
||||
-DBUILD_CLIENTS_SAMPLES=ON \
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [options ...] [make options]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of type referred to by pkg_type"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
|
||||
BUILD_PATH="$(getBuildPath rocclr)"
|
||||
|
||||
TARGET="build"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/rocclr"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/rocclr"
|
||||
CORE_BUILD_DIR="$(getBuildPath hsa-core)"
|
||||
BUILD_TYPE="Debug"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
|
||||
|
||||
VALID_STR=`getopt -o hcraso: --long help,clean,release,static,address_sanitizer,outdir: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
(-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
(-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
(-r | --release)
|
||||
BUILD_TYPE="Release" ; shift ;;
|
||||
(-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
--) shift; break;;
|
||||
(*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
|
||||
clean_rocclr() {
|
||||
rm -rf "$BUILD_PATH"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
}
|
||||
|
||||
build_rocclr() {
|
||||
if [ "$SHARED_LIBS" = "ON" ]; then
|
||||
echo "rocclr not a standalone repo. skipping build" >&2
|
||||
echo "rocclr not a standalone repo. skipping build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -e "$CLR_ROOT/CMakeLists.txt" ]; then
|
||||
_ROCclr_CMAKELIST_DIR="$CLR_ROOT"
|
||||
elif [ ! -e "$ROCclr_ROOT/CMakeLists.txt" ]; then
|
||||
echo "No $ROCclr_ROOT/CMakeLists.txt file, skipping rocclr" >&2
|
||||
echo "No $ROCclr_ROOT/CMakeLists.txt file, skipping rocclr"
|
||||
exit 0
|
||||
else
|
||||
_ROCclr_CMAKELIST_DIR="$ROCclr_ROOT"
|
||||
fi
|
||||
echo "$_ROCclr_CMAKELIST_DIR"
|
||||
mkdir -p "$BUILD_PATH"
|
||||
pushd "$BUILD_PATH"
|
||||
print_lib_type $SHARED_LIBS
|
||||
if [ ! -e Makefile ]; then
|
||||
echo "Building ROCclr CMake environment"
|
||||
|
||||
cmake -DUSE_COMGR_LIBRARY=ON \
|
||||
$(rocm_cmake_params) \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DLLVM_INCLUDES="$LLVM_ROOT/include" \
|
||||
$(rocm_common_cmake_params) \
|
||||
"$_ROCclr_CMAKELIST_DIR"
|
||||
|
||||
echo "CMake complete"
|
||||
fi
|
||||
|
||||
echo "Building ROCclr"
|
||||
cmake --build . -- $MAKEOPTS "VERBOSE=1"
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
case $TARGET in
|
||||
(clean) clean_rocclr ;;
|
||||
(build) build_rocclr ;;
|
||||
(outdir) exit ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -4,14 +4,14 @@ source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
|
||||
set_component_src rocDecode
|
||||
BUILD_DEV=ON
|
||||
build_rocdecode() {
|
||||
if [ "$DISTRO_ID" = "centos-7" ] || [ "$DISTRO_ID" = "sles-15.4" ] ; then
|
||||
echo "Not building rocDecode for ${DISTRO_ID}. Exiting..."
|
||||
return 0
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
echo "Start build"
|
||||
mkdir -p $BUILD_DIR && cd $BUILD_DIR
|
||||
python3 ${COMPONENT_SRC}/rocDecode-setup.py --developer OFF
|
||||
# python3 ${COMPONENT_SRC}/rocDecode-setup.py --developer OFF
|
||||
|
||||
cmake -DROCM_DEP_ROCMCORE=ON ${COMPONENT_SRC}
|
||||
make -j8
|
||||
|
||||
@@ -21,7 +21,7 @@ build_rocfft() {
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
CXX="${ROCM_PATH}/bin/hipcc" \
|
||||
@@ -34,7 +34,6 @@ build_rocfft() {
|
||||
-DBUILD_CLIENTS_SAMPLES=ON \
|
||||
-DBUILD_CLIENTS_TESTS=ON \
|
||||
-DBUILD_CLIENTS_RIDER=ON \
|
||||
-DCPACK_SET_DESTDIR=OFF \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
cmake --build "$BUILD_DIR" -- -j${PROC}
|
||||
|
||||
39
tools/rocm-build/build_rocjpeg.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
|
||||
set_component_src rocJPEG
|
||||
BUILD_DEV=ON
|
||||
build_rocjpeg() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
mkdir -p $BUILD_DIR && cd $BUILD_DIR
|
||||
# python3 ../rocJPEG-setup.py
|
||||
|
||||
cmake -DROCM_DEP_ROCMCORE=ON "$COMPONENT_SRC"
|
||||
make -j8
|
||||
make install
|
||||
make package
|
||||
|
||||
cmake --build "$BUILD_DIR" -- -j${PROC}
|
||||
cpack -G ${PKGTYPE^^}
|
||||
rm -rf _CPack_Packages/ && find -name '*.o' -delete
|
||||
mkdir -p $PACKAGE_DIR
|
||||
cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
|
||||
show_build_cache_stats
|
||||
}
|
||||
clean_rocjpeg() {
|
||||
echo "Cleaning rocJPEG build directory: ${BUILD_DIR} ${PACKAGE_DIR}"
|
||||
rm -rf "$BUILD_DIR" "$PACKAGE_DIR"
|
||||
echo "Done!"
|
||||
}
|
||||
stage2_command_args "$@"
|
||||
case $TARGET in
|
||||
build) build_rocjpeg ;;
|
||||
outdir) print_output_directory ;;
|
||||
clean) clean_rocjpeg ;;
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
@@ -24,10 +24,11 @@ printUsage() {
|
||||
return 0
|
||||
}
|
||||
|
||||
PROJ_NAME="rocm-core"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
ROCM_CORE_BUILD_DIR="$(getBuildPath rocm_core)"
|
||||
ROCM_CORE_PACKAGE_DEB="$(getPackageRoot)/deb/rocm-core"
|
||||
ROCM_CORE_PACKAGE_RPM="$(getPackageRoot)/rpm/rocm-core"
|
||||
ROCM_CORE_PACKAGE_DEB="$(getPackageRoot)/deb/$PROJ_NAME"
|
||||
ROCM_CORE_PACKAGE_RPM="$(getPackageRoot)/rpm/$PROJ_NAME"
|
||||
ROCM_CORE_MAKE_OPTS="$DASH_JAY -C $ROCM_CORE_BUILD_DIR"
|
||||
BUILD_TYPE="Debug"
|
||||
TARGET="build"
|
||||
|
||||
@@ -48,7 +48,7 @@ CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
LDFLAGS="$LDFLAGS -Wl,--enable-new-dtags"
|
||||
|
||||
LIB_AMD_PYTHON="libamdpython.so"
|
||||
|
||||
tokeep=(
|
||||
main${ROCM_INSTALL_PATH}/bin/rocgdb
|
||||
@@ -123,11 +123,41 @@ package_deb(){
|
||||
local VERSION
|
||||
get_version unknown
|
||||
VERSION="${VERSION}.${ROCM_LIBPATCH_VERSION}"
|
||||
|
||||
grep -v '^# ' > "$BUILD_DIR/package/main/DEBIAN/control" <<EOF
|
||||
#create postinstall and prerm
|
||||
grep -v '^# ' > "$BUILD_DIR/package/main/DEBIAN/preinst" <<EOF
|
||||
#!/bin/sh
|
||||
# Pre-installation script commands
|
||||
echo "Running pre-installation script..."
|
||||
mkdir -p ${ROCM_INSTALL_PATH}/lib
|
||||
PYTHON_LIB_INSTALLED=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}')
|
||||
ln -s \$PYTHON_LIB_INSTALLED ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
|
||||
echo "pre-installation done."
|
||||
EOF
|
||||
grep -v '^# ' > "$BUILD_DIR/package/main/DEBIAN/postrm" <<EOF
|
||||
#!/bin/sh
|
||||
# Post-uninstallation script commands
|
||||
echo "Running post-uninstallation script..."
|
||||
PYTHON_LINK_BY_OPENCL=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}' | awk -F'/' '{print \$NF}')
|
||||
rm -f ${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL
|
||||
rm -f ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
|
||||
if [ -L "${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON" ] || \
|
||||
[ -L "${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL" ] ; then
|
||||
echo " some rocm-gdb requisite libs could not be removed"
|
||||
else
|
||||
echo " all requisite libs removed successfully "
|
||||
fi
|
||||
echo "post-uninstallation done."
|
||||
EOF
|
||||
chmod +x $BUILD_DIR/package/main/DEBIAN/postrm
|
||||
chmod +x $BUILD_DIR/package/main/DEBIAN/preinst
|
||||
# Create control file, with variable substitution.
|
||||
# Lines with # at the start are removed, to allow for comments
|
||||
mkdir "$BUILD_DIR/debian"
|
||||
grep -v '^# ' > "$BUILD_DIR/debian/control" <<EOF
|
||||
# Required fields
|
||||
Version: ${VERSION}-${CPACK_DEBIAN_PACKAGE_RELEASE}
|
||||
Package: ${PROJ_NAME}
|
||||
Source: ${PROJ_NAME}-src
|
||||
Maintainer: ROCm Debugger Support <rocm-gdb.support@amd.com>
|
||||
Description: ROCgdb
|
||||
This is ROCgdb, the AMD ROCm source-level debugger for Linux,
|
||||
@@ -137,15 +167,37 @@ Section: utils
|
||||
Architecture: amd64
|
||||
Essential: no
|
||||
Priority: optional
|
||||
Depends: libexpat1, libtinfo5, libncurses5, rocm-dbgapi, libpython3.10 | libpython3.8, libbabeltrace-ctf1 (>= 1.2.1), libbabeltrace1 (>= 1.2.1), rocm-core
|
||||
Depends: \${shlibs:Depends}, rocm-dbgapi, rocm-core
|
||||
EOF
|
||||
|
||||
# Use dpkg-shlibdeps to list shlib dependencies, the result is placed
|
||||
# in $BUILD_DIR/debian/substvars.
|
||||
(
|
||||
cd "$BUILD_DIR"
|
||||
if [[ $ASAN_BUILD == "yes" ]]
|
||||
then
|
||||
LD_LIBRARY_PATH=${ROCM_INSTALL_PATH}/lib/asan:$LD_LIBRARY_PATH
|
||||
fi
|
||||
dpkg-shlibdeps --ignore-missing-info -e "$BUILD_DIR/package/main/${ROCM_INSTALL_PATH}/bin/rocgdb"
|
||||
)
|
||||
# Generate the final DEBIAN/control, and substitute the shlibs:Depends.
|
||||
# This is a bit unorthodox as we are only using bits and pieces of the
|
||||
# dpkg tools.
|
||||
(
|
||||
SHLIB_DEPS=$(grep "^shlibs:Depends" "$BUILD_DIR/debian/substvars" | \
|
||||
sed -e "s/shlibs:Depends=//")
|
||||
sed -E \
|
||||
-e "/^#/d" \
|
||||
-e "/^Source:/d" \
|
||||
-e "s/\\$\{shlibs:Depends\}/$SHLIB_DEPS/" \
|
||||
< debian/control > "$BUILD_DIR/package/main/DEBIAN/control"
|
||||
)
|
||||
mkdir -p "$OUT_DIR/deb/$PROJ_NAME"
|
||||
fakeroot dpkg-deb -Zgzip --build "$BUILD_DIR/package/main" "$OUT_DIR/deb/$PROJ_NAME"
|
||||
|
||||
# Package the tests so they can be run on a test slave
|
||||
mkdir -p "$BUILD_DIR/package/tests/DEBIAN"
|
||||
mkdir -p "$BUILD_DIR/package/tests/${ROCM_INSTALL_PATH}/test/gdb"
|
||||
|
||||
# Create control file, with variable substitution.
|
||||
# Lines with # at the start are removed, to allow for comments
|
||||
grep -v '^# ' > "$BUILD_DIR/package/tests/DEBIAN/control" <<EOF
|
||||
# Required fields
|
||||
Version: ${VERSION}-${CPACK_DEBIAN_PACKAGE_RELEASE}
|
||||
@@ -161,7 +213,6 @@ Priority: optional
|
||||
# rocm-core as policy says everything to depend on rocm-core
|
||||
Depends: ${PROJ_NAME} (=${VERSION}-${CPACK_DEBIAN_PACKAGE_RELEASE}), dejagnu, rocm-core, make
|
||||
EOF
|
||||
|
||||
copy_testsuite_files
|
||||
fakeroot dpkg-deb -Zgzip --build "$BUILD_DIR/package/tests" "$OUT_DIR/deb/$PROJ_NAME"
|
||||
}
|
||||
@@ -204,7 +255,9 @@ Summary: ROCm source-level debugger for Linux
|
||||
Version: ${VERSION//-/_}
|
||||
Release: ${CPACK_RPM_PACKAGE_RELEASE}%{?dist}
|
||||
License: GPL
|
||||
Prefix: ${ROCM_INSTALL_PATH}
|
||||
Requires: rocm-core
|
||||
Provides: $LIB_AMD_PYTHON()(64bit)
|
||||
|
||||
%description
|
||||
This is ROCgdb, the ROCm source-level debugger for Linux, based on
|
||||
@@ -225,6 +278,27 @@ https://github.com/RadeonOpenCompute/ROCm
|
||||
## into the local RPM_BUILD_ROOT and left the defaults take over. Need
|
||||
## to quote the dollar signs as we want rpm to expand them when it is
|
||||
## run, rather than the shell when we build the spec file.
|
||||
%pre
|
||||
# Post-install script commands
|
||||
echo "Running post-install script..."
|
||||
mkdir -p ${ROCM_INSTALL_PATH}/lib
|
||||
PYTHON_LIB_INSTALLED=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}')
|
||||
ln -s \$PYTHON_LIB_INSTALLED ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
|
||||
|
||||
%postun
|
||||
# Post-uninstallation script commands
|
||||
echo "Running post-uninstallation script..."
|
||||
PYTHON_LINK_BY_OPENCL=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}' | awk -F'/' '{print \$NF}')
|
||||
rm -f ${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL
|
||||
rm -f ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
|
||||
if [ -L "${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON" ] || \
|
||||
[ -L "${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL" ] ; then
|
||||
echo " some rocm-gdb requisite libs could not be removed"
|
||||
else
|
||||
echo " all requisite libs removed successfully "
|
||||
fi
|
||||
echo "post-uninstallation done."
|
||||
|
||||
%install
|
||||
rm -rf \$RPM_BUILD_ROOT
|
||||
mkdir -p \$RPM_BUILD_ROOT
|
||||
@@ -279,6 +353,7 @@ Summary: Tests for gdb enhanced to debug AMD GPUs
|
||||
Version: ${VERSION//-/_}
|
||||
Release: ${RELEASE}
|
||||
License: GPL
|
||||
Prefix: ${ROCM_INSTALL_PATH}
|
||||
Requires: dejagnu, ${PROJ_NAME} = ${VERSION//-/_}-${RELEASE}, rocm-core, make
|
||||
|
||||
%description
|
||||
@@ -340,15 +415,36 @@ build() {
|
||||
--infodir="\${prefix}/share/info/rocgdb" \
|
||||
--with-separate-debug-dir="\${prefix}/lib/debug:/usr/lib/debug" \
|
||||
--with-gdb-datadir="\${prefix}/share/rocgdb" --enable-64-bit-bfd \
|
||||
--with-bugurl="$BUG_URL" --with-pkgversion="${ROCM_BUILD_ID:-ROCm}" \
|
||||
--enable-targets="x86_64-linux-gnu,amdgcn-amd-amdhsa" \
|
||||
--disable-ld --disable-gas --disable-gdbserver --disable-sim --enable-tui \
|
||||
--disable-gdbtk --disable-shared --disable-gprofng \
|
||||
--with-expat --with-system-zlib --without-guile --with-babeltrace --with-lzma \
|
||||
--with-python=$pythonver --with-rocm-dbgapi=$ROCM_INSTALL_PATH \
|
||||
--with-amd-dbgapi PKG_CONFIG_PATH="${ROCM_INSTALL_PATH}/share/pkgconfig" \
|
||||
--with-bugurl="$BUG_URL" --with-pkgversion="${ROCM_BUILD_ID:-ROCm}" \
|
||||
--enable-targets="x86_64-linux-gnu,amdgcn-amd-amdhsa" \
|
||||
--disable-gas \
|
||||
--disable-gdbserver \
|
||||
--disable-gdbtk \
|
||||
--disable-gprofng \
|
||||
--disable-ld \
|
||||
--disable-shared \
|
||||
--disable-sim \
|
||||
--enable-tui \
|
||||
--with-amd-dbgapi \
|
||||
--with-expat \
|
||||
--with-lzma \
|
||||
--with-python=$pythonver \
|
||||
--with-rocm-dbgapi=$ROCM_INSTALL_PATH \
|
||||
--with-system-zlib \
|
||||
--with-zstd \
|
||||
--without-babeltrace \
|
||||
--without-guile \
|
||||
--without-intel-pt \
|
||||
--without-libunwind-ia64 \
|
||||
--without-xxhash \
|
||||
PKG_CONFIG_PATH="${ROCM_INSTALL_PATH}/share/pkgconfig" \
|
||||
LDFLAGS="$LDFLAGS"
|
||||
LD_RUN_PATH='${ORIGIN}/../lib' make $MAKE_OPTS
|
||||
|
||||
REPLACE_LIB_NAME=$(ldd -d $BUILD_DIR/gdb/gdb |awk '/libpython/{print $1}')
|
||||
echo "Replacing $REPLACE_LIB_NAME with $LIB_AMD_PYTHON"
|
||||
patchelf --replace-needed $REPLACE_LIB_NAME $LIB_AMD_PYTHON $BUILD_DIR/gdb/gdb
|
||||
|
||||
mkdir -p $BUILD_DIR/package/main${ROCM_INSTALL_PATH}/{share/rocgdb,bin}
|
||||
|
||||
make $MAKE_OPTS -C gdb DESTDIR=$BUILD_DIR/package/main install install-pdf install-html
|
||||
@@ -381,7 +477,7 @@ main(){
|
||||
|
||||
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,static,address_sanitizer,outdir:,package: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
ASAN_BUILD="no"
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
@@ -393,9 +489,10 @@ do
|
||||
BUILD_TYPE="Release" ; shift ; MAKEARG="$MAKEARG REL=1" ;; # For compatability with other scripts
|
||||
(-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
set_address_sanitizer_on
|
||||
ASAN_BUILD="yes" ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
ack_and_skip_static ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package) #FIXME
|
||||
|
||||
@@ -7,7 +7,7 @@ printUsage() {
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
@@ -65,7 +65,7 @@ do
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
ack_and_skip_static ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
@@ -127,6 +127,10 @@ build_rocm_bandwidth_test() {
|
||||
echo "Packaging $TEST_NAME"
|
||||
cmake --build "$TEST_BLD_DIR" -- $MAKEARG -C $TEST_BLD_DIR package
|
||||
|
||||
mkdir -p "$TEST_BIN_DIR"
|
||||
echo "Copying $TEST_NAME to $TEST_BIN_DIR"
|
||||
progressCopy "$TEST_BLD_DIR/$TEST_NAME" "$TEST_BIN_DIR"
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$TEST_PKG_DEB" $TEST_BLD_DIR/*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$TEST_PKG_RPM" $TEST_BLD_DIR/*.rpm
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ printUsage() {
|
||||
return 0
|
||||
}
|
||||
|
||||
PROJ_NAME="rsmi"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
TARGET="build"
|
||||
|
||||
@@ -30,8 +31,8 @@ PACKAGE_LIB=$(getLibPath)
|
||||
PACKAGE_INCLUDE="$(getIncludePath)"
|
||||
|
||||
RSMI_BUILD_DIR=$(getBuildPath rsmi)
|
||||
RSMI_PACKAGE_DEB_DIR="$(getPackageRoot)/deb/rsmi"
|
||||
RSMI_PACKAGE_RPM_DIR="$(getPackageRoot)/rpm/rsmi"
|
||||
RSMI_PACKAGE_DEB_DIR="$(getPackageRoot)/deb/$PROJ_NAME"
|
||||
RSMI_PACKAGE_RPM_DIR="$(getPackageRoot)/rpm/$PROJ_NAME"
|
||||
RSMI_BUILD_TYPE="debug"
|
||||
BUILD_TYPE="Debug"
|
||||
|
||||
|
||||
@@ -22,17 +22,16 @@ printUsage() {
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
PROJ_NAME="rocminfo"
|
||||
TARGET="build"
|
||||
ROCMINFO_DEST="$(getBinPath)"
|
||||
ROCMINFO_SRC_ROOT="$ROCMINFO_ROOT"
|
||||
ROCMINFO_BUILD_DIR="$(getBuildPath rocminfo)"
|
||||
ROCMINFO_BUILD_DIR="$(getBuildPath $PROJ_NAME)"
|
||||
|
||||
MAKEARG="$DASH_JAY"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_UTILS="$(getUtilsPath)"
|
||||
ROCMINFO_PACKAGE_DEB="$(getPackageRoot)/deb/rocminfo"
|
||||
ROCMINFO_PACKAGE_RPM="$(getPackageRoot)/rpm/rocminfo"
|
||||
ROCMINFO_PACKAGE_DEB="$PACKAGE_ROOT/deb/$PROJ_NAME"
|
||||
ROCMINFO_PACKAGE_RPM="$PACKAGE_ROOT/rpm/$PROJ_NAME"
|
||||
BUILD_TYPE="debug"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
@@ -91,6 +90,7 @@ build_rocminfo() {
|
||||
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DROCRTST_BLD_TYPE="$BUILD_TYPE" \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCPACK_PACKAGE_VERSION_MAJOR="1" \
|
||||
|
||||
@@ -10,6 +10,10 @@ ROCM_RVS_LIB_RPATH="\$ORIGIN/.."
|
||||
build_rocmvalidationsuite() {
|
||||
echo "Start build"
|
||||
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
ack_and_skip_static
|
||||
fi
|
||||
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
@@ -17,11 +21,13 @@ build_rocmvalidationsuite() {
|
||||
|
||||
cd "${COMPONENT_SRC}"
|
||||
mkdir -p "$BUILD_DIR"
|
||||
init_rocm_common_cmake_params
|
||||
|
||||
cmake \
|
||||
$(rocm_common_cmake_params) \
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DFETCH_ROCMPATH_FROM_ROCMCORE=ON \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_LIB_RPATH:$ROCM_RVS_LIB_RPATH" \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--build-id=sha1,--rpath,$ROCM_LIB_RPATH:$ROCM_RVS_LIB_RPATH" \
|
||||
-DRVS_BUILD_TESTS=FALSE \
|
||||
-B "$BUILD_DIR" \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
|
||||
@@ -16,12 +16,17 @@ build_rocprim() {
|
||||
ASAN_CMAKE_PARAMS="false"
|
||||
fi
|
||||
|
||||
SHARED_LIBS="ON"
|
||||
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
|
||||
SHARED_LIBS="OFF"
|
||||
fi
|
||||
|
||||
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
|
||||
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
GPU_TARGETS="$GPU_ARCHS"
|
||||
else
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
|
||||
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
fi
|
||||
|
||||
init_rocm_common_cmake_params
|
||||
@@ -31,8 +36,8 @@ build_rocprim() {
|
||||
"${rocm_math_common_cmake_params[@]}" \
|
||||
-DAMDGPU_TARGETS=${GPU_TARGETS} \
|
||||
-DBUILD_BENCHMARK=OFF \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DBUILD_TEST=ON \
|
||||
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
|
||||
-DCMAKE_MODULE_PATH="${ROCM_PATH}/lib/cmake/hip;${ROCM_PATH}/hip/cmake" \
|
||||
"$COMPONENT_SRC"
|
||||
|
||||
|
||||
144
tools/rocm-build/build_rocprofiler-compute.sh
Executable file
@@ -0,0 +1,144 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -w, --wheel Creates python wheel package of ROCm Compute Profiler.
|
||||
It needs to be used along with -r option"
|
||||
echo
|
||||
echo "Possible values for package <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="rocprofiler-compute"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="$DASH_JAY -C $BUILD_DIR"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,static,address_sanitizer,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2" ; shift 2 ;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="Release" ; shift ;;
|
||||
-s | --static)
|
||||
ack_and_skip_static ;;
|
||||
-w | --wheel)
|
||||
WHEEL_PACKAGE=true ; shift ;;
|
||||
--) shift; break;;
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars "$API_NAME" "$TARGET" "$BUILD_TYPE" "$SHARED_LIBS" "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build() {
|
||||
echo "Building $PROJ_NAME"
|
||||
if [ "$DISTRO_ID" = centos-7 ]; then
|
||||
echo "Skip make and uploading packages for ROCm Compute Profiler on Centos7 distro, due to python dependency"
|
||||
exit 0
|
||||
fi
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
|
||||
#install python deps
|
||||
#python3 -m pip install -t ${BUILD_DIR}/python-libs -r ${ROCPROFILER_COMPUTE_ROOT}/requirements.txt
|
||||
print_lib_type $SHARED_LIBS
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCHECK_PYTHON_DEPS=NO \
|
||||
-DPYTHON_DEPS=${BUILD_DIR}/python-libs \
|
||||
"$ROCPROFILER_COMPUTE_ROOT"
|
||||
fi
|
||||
make $MAKE_OPTS
|
||||
make $MAKE_OPTS install
|
||||
make $MAKE_OPTS package
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo "${PACKAGE_DEB}";;
|
||||
("rpm")
|
||||
echo "${PACKAGE_RPM}";;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
(clean) clean ;;
|
||||
(build) build ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -37,7 +37,6 @@ PACKAGE_INCLUDE="$(getIncludePath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
ROCM_WHEEL_DIR="${BUILD_DIR}/_wheel"
|
||||
PACKAGE_PREFIX="$ROCM_INSTALL_PATH"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="$DASH_JAY"
|
||||
@@ -74,8 +73,7 @@ while true; do
|
||||
shift
|
||||
;;
|
||||
-s | --static)
|
||||
SHARED_LIBS="OFF"
|
||||
shift
|
||||
ack_and_skip_static
|
||||
;;
|
||||
-w | --wheel)
|
||||
WHEEL_PACKAGE=true
|
||||
@@ -113,7 +111,6 @@ fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
@@ -177,18 +174,6 @@ build_rocprofiler-sdk() {
|
||||
fi
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
echo "Creating rocprofiler sdk wheel package"
|
||||
mkdir -p "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/generate_setup_py.py "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/repackage_wheel.sh "$ROCM_WHEEL_DIR"
|
||||
cd "$ROCM_WHEEL_DIR"
|
||||
# Currently only supports python3.6
|
||||
./repackage_wheel.sh "$BUILD_DIR"/*.rpm python3.6
|
||||
# Copy the wheel created to RPM folder which will be uploaded to artifactory
|
||||
copy_if WHL "WHL" "$PACKAGE_RPM" "$ROCM_WHEEL_DIR"/dist/*.whl
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
"deb")
|
||||
@@ -214,9 +199,4 @@ case "$TARGET" in
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
|
||||
236
tools/rocm-build/build_rocprofiler-systems.sh
Executable file
@@ -0,0 +1,236 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -w, --wheel Creates python wheel package of rocprof_sys.
|
||||
It needs to be used along with -r option"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="rocprofiler-systems"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="-j 8"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
ASAN=0
|
||||
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,address_sanitizer,static,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true; do
|
||||
#echo "parocessing $1"
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage
|
||||
exit 0
|
||||
;;
|
||||
-c | --clean)
|
||||
TARGET="clean"
|
||||
((CLEAN_OR_OUT |= 1))
|
||||
shift
|
||||
;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="RelWithDebInfo"
|
||||
shift
|
||||
;;
|
||||
-a | --address_sanitizer)
|
||||
ack_and_ignore_asan
|
||||
# set_asan_env_vars
|
||||
# set_address_sanitizer_on
|
||||
|
||||
ASAN=1
|
||||
shift
|
||||
;;
|
||||
-s | --static)
|
||||
ack_and_skip_static
|
||||
;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"
|
||||
PKGTYPE=$2
|
||||
# OUT_DIR_SPECIFIED=1
|
||||
((CLEAN_OR_OUT |= 2))
|
||||
shift 2
|
||||
;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2"
|
||||
shift 2
|
||||
;;
|
||||
-w | --wheel)
|
||||
WHEEL_PACKAGE=true
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;; # end delimiter
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] " >&2
|
||||
exit 20
|
||||
;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build_rocprofiler_systems() {
|
||||
echo "Building $PROJ_NAME"
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Skip make and uploading packages for rocprofiler-systems on ASAN build"
|
||||
exit 0
|
||||
fi
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
echo "Created build directory: $BUILD_DIR"
|
||||
fi
|
||||
|
||||
cd $ROCPROFILER_SYSTEMS_ROOT || exit
|
||||
|
||||
echo "Current submodule status"
|
||||
git submodule status
|
||||
echo "Cached (old) submodule status"
|
||||
git submodule status --cached
|
||||
cat .git/config
|
||||
|
||||
echo "Updating submodules"
|
||||
git submodule init
|
||||
|
||||
git submodule sync --recursive
|
||||
|
||||
git submodule update --init --recursive --force
|
||||
|
||||
echo "Updated submodule status"
|
||||
git submodule status
|
||||
cat .git/config
|
||||
|
||||
echo "Build directory: $BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
print_lib_type $SHARED_LIBS
|
||||
|
||||
ELFUTIL_URL="https://compute-artifactory.amd.com/artifactory/rocm-generic-local/dev-tools/omnitrace/elfutils-0.188.tar.bz2"
|
||||
BINUTIL_URL="https://compute-artifactory.amd.com/artifactory/rocm-generic-local/dev-tools/omnitrace/binutils-2.40.tar.gz"
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
echo "ELFUTIL_URL=$ELFUTIL_URL, BINUTIL_URL=$BINUTIL_URL"
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Address Sanitizer path"
|
||||
|
||||
# Commenting out the below cmake command as it is not working as expected
|
||||
# LD_LIBRARY_PATH=$ROCM_INSTALL_PATH/lib/asan:$LD_LIBRARY_PATH
|
||||
# cmake \
|
||||
# $(rocm_cmake_params) \
|
||||
# $(rocm_common_cmake_params) \
|
||||
# -DROCPROFSYS_BUILD_{LIBUNWIND,DYNINST}=ON \
|
||||
# -DDYNINST_BUILD_{TBB,BOOST,ELFUTILS,LIBIBERTY}=ON \
|
||||
# -DAMDDeviceLibs_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/AMDDeviceLibs" \
|
||||
# -Dhip_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hip" \
|
||||
# -Dhip-lang_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hip-lang" \
|
||||
# -Damd_comgr_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/amd_comgr" \
|
||||
# -Dhsa-runtime64_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hsa-runtime64" \
|
||||
# -Dhsakmt_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hsakmt" \
|
||||
# -DROCM_PATH="${ROCM_INSTALL_PATH}/lib/asan" \
|
||||
# -Drocprofiler_ROOT_DIR="${ROCM_INSTALL_PATH}/lib/asan" \
|
||||
# -DCMAKE_HIP_COMPILER_ROCM_ROOT="${ROCM_INSTALL_PATH}" \
|
||||
# -DCMAKE_PREFIX_PATH="${ROCM_INSTALL_PATH};${ROCM_INSTALL_PATH}/lib/asan" \
|
||||
# -DCMAKE_LIBRARY_PATH="${ROCM_INSTALL_PATH}/lib/asan" \
|
||||
# -DCPACK_DEBIAN_PACKAGE_SHLIBDEPS=OFF \
|
||||
# "$ROCPROFILER_SYSTEMS_ROOT"
|
||||
|
||||
else
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DROCPROFSYS_BUILD_{LIBUNWIND,DYNINST}=ON \
|
||||
-DDYNINST_BUILD_{TBB,BOOST,ELFUTILS,LIBIBERTY}=ON \
|
||||
-DElfUtils_DOWNLOAD_URL="$ELFUTIL_URL" \
|
||||
-D{DYNINST,TIMEMORY}_BINUTILS_DOWNLOAD_URL="$BINUTIL_URL" \
|
||||
"$ROCPROFILER_SYSTEMS_ROOT"
|
||||
fi
|
||||
|
||||
|
||||
popd || exit
|
||||
|
||||
echo "Make Options: $MAKE_OPTS"
|
||||
cmake --build "$BUILD_DIR" --target all -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target install -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target package -- $MAKE_OPTS
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
"deb")
|
||||
echo "${PACKAGE_DEB}"
|
||||
;;
|
||||
"rpm")
|
||||
echo "${PACKAGE_RPM}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
clean) clean ;;
|
||||
build) build_rocprofiler_systems ;;
|
||||
outdir) print_output_directory ;;
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||