Compare commits

..

21 Commits

Author SHA1 Message Date
ammallya
65b8498867 Merge pull request #4513 from ammallya/main
Name change for hip
2025-03-18 12:49:47 -07:00
ammallya
937dcb376b Change naming convention for hip 2025-03-18 12:49:09 -07:00
ammallya
ac944077ba Promote new 2025-03-18 12:48:31 -07:00
ammallya
1688d56edc Merge pull request #4512 from ammallya/main
Reverting rbt and clr
2025-03-18 11:27:49 -07:00
ammallya
042b01a71b Reverting hip_on_rocclr 2025-03-18 11:26:53 -07:00
ammallya
c26ff9528f Revert RBT 2025-03-18 11:25:47 -07:00
ammallya
9baa927218 Merge pull request #4511 from ammallya/main
Revert rocr.sh
2025-03-18 10:37:00 -07:00
ammallya
d7136806da Revert rocr.sh 2025-03-18 10:36:33 -07:00
ammallya
61ada2eae9 Merge pull request #4510 from ammallya/main
Reverting ROCm.mk
2025-03-18 09:51:02 -07:00
ammallya
9b34984b1b Reverting ROCm.mk 2025-03-18 09:50:41 -07:00
ammallya
20a264a715 Merge pull request #4509 from ammallya/main
Promoting Build Scripts
2025-03-18 09:37:21 -07:00
Ameya Keshava Mallya
3750e88066 Promoting Build Scripts 2025-03-18 16:36:14 +00:00
ammallya
beb5f5b48d Merge pull request #4502 from ammallya/main
Updating hipblaslt and hipsparselt build scripts
2025-03-14 14:21:06 -07:00
ammallya
65330eb26d Fixed small typo 2025-03-14 14:20:19 -07:00
ammallya
23b90e4e6b Update hipsparselt build script 2025-03-14 14:19:46 -07:00
ammallya
080c442789 Updating hipblaslt build script 2025-03-14 14:15:56 -07:00
ammallya
5d90797591 Merge pull request #4487 from ammallya/main
Updated to reduce ram usage
2025-03-12 15:11:37 -07:00
ammallya
0417c2196d Updated to reduce ram usage 2025-03-12 15:11:05 -07:00
dependabot[bot]
1f2a94bb3a Build(deps): Bump rocm-docs-core from 1.15.0 to 1.17.0 in /docs/sphinx (#4413)
Bumps [rocm-docs-core](https://github.com/ROCm/rocm-docs-core) from 1.15.0 to 1.17.0.
- [Release notes](https://github.com/ROCm/rocm-docs-core/releases)
- [Changelog](https://github.com/ROCm/rocm-docs-core/blob/develop/CHANGELOG.md)
- [Commits](https://github.com/ROCm/rocm-docs-core/compare/v1.15.0...v1.17.0)

---
updated-dependencies:
- dependency-name: rocm-docs-core
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-24 17:09:15 -07:00
ammallya
9c5c2c9ca1 Merge pull request #4363 from ammallya/main
Added main branch with fixes to run mainline
2025-02-10 15:11:07 -08:00
Ameya Keshava Mallya
190f609ea2 Added main branch with fixes to run mainline 2025-02-10 22:40:33 +00:00
116 changed files with 4216 additions and 4827 deletions

View File

@@ -101,7 +101,7 @@ jobs:
-DMIOPEN_BACKEND=HIP
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/miopen-deps
-DGPU_TARGETS=$(JOB_GPU_TARGET)
-DAMDGPU_TARGETS=$(JOB_GPU_TARGET)
-DMIOPEN_ENABLE_AI_KERNEL_TUNING=OFF
-DMIOPEN_ENABLE_AI_IMMED_MODE_FALLBACK=OFF
-DCMAKE_BUILD_TYPE=Release
@@ -129,8 +129,6 @@ jobs:
variables:
- group: common
- template: /.azuredevops/variables-global.yml
- name: ROCM_PATH
value: $(Agent.BuildDirectory)/rocm
pool: $(JOB_TEST_POOL)
workspace:
clean: all

View File

@@ -123,12 +123,16 @@ jobs:
targetType: 'inline'
workingDirectory: $(Build.SourcesDirectory)/rocrtst/suites/test_common
script: |
sudo rm -rf $(Agent.BuildDirectory)/external/llvm-project
mkdir -p $(Agent.BuildDirectory)/external/llvm-project/clang/lib
sudo ln -s $(Agent.BuildDirectory)/rocm/llvm/lib/clang/20/include $(Agent.BuildDirectory)/external/llvm-project/clang/lib/Headers
mkdir build && cd build
cmake .. \
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm \
-DTARGET_DEVICES=$(JOB_GPU_TARGET) \
-DROCM_DIR=$(Agent.BuildDirectory)/rocm \
-DLLVM_DIR=$(Agent.BuildDirectory)/rocm/llvm/bin \
-DOPENCL_DIR=$(Agent.BuildDirectory)/rocm/llvm/bin
-DOPENCL_INC_DIR=$(Agent.BuildDirectory)/rocm/llvm/lib/clang/21/include
make
make rocrtst_kernels

View File

@@ -51,7 +51,6 @@ parameters:
jobs:
- job: rccl
timeoutInMinutes: 90
variables:
- group: common
- template: /.azuredevops/variables-global.yml
@@ -79,6 +78,7 @@ jobs:
checkoutRef: ${{ parameters.checkoutRef }}
dependencyList: ${{ parameters.rocmDependencies }}
gpuTarget: $(JOB_GPU_TARGET)
- script: chmod +x $(Agent.BuildDirectory)/rocm/bin/hipify-perl
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
parameters:
extraBuildFlags: >-
@@ -88,7 +88,7 @@ jobs:
-DCMAKE_BUILD_TYPE=Release
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
-DBUILD_TESTS=ON
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/share/rocm/cmake;$(Agent.BuildDirectory)/rocm/libexec/hipify
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/rocm/share/rocm/cmake/
-DAMDGPU_TARGETS=$(JOB_GPU_TARGET)
-GNinja
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml

View File

@@ -84,7 +84,6 @@ jobs:
echo "##vso[task.setvariable variable=PYBIND11_PATH;]$(python3 -c 'import pybind11; print(pybind11.get_cmake_dir())')"
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml
parameters:
installEnabled: false
extraBuildFlags: >-
-DROCM_PATH=$(Agent.BuildDirectory)/rocm
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(PYTHON_USER_SITE)/pybind11;$(PYTHON_DIST_PACKAGES)/pybind11;$(PYBIND11_PATH)
@@ -92,14 +91,6 @@ jobs:
-DGPU_TARGETS=$(JOB_GPU_TARGET)
-DCMAKE_INSTALL_PREFIX_PYTHON=$(Build.BinariesDirectory)
-GNinja
- task: Bash@3
displayName: 'rocPyDecode install'
inputs:
targetType: inline
script: |
sudo cmake --build . --target install
sudo chown -R $(whoami):$(id -gn) $(Build.BinariesDirectory)
workingDirectory: $(Build.SourcesDirectory)/build
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
parameters:
gpuTarget: $(JOB_GPU_TARGET)
@@ -114,8 +105,7 @@ jobs:
script: |
export ROCM_PATH=$(Agent.BuildDirectory)/rocm
export HIP_INCLUDE_DIRS=$(Agent.BuildDirectory)/rocm/include/hip
sudo python3 setup.py bdist_wheel
sudo chown -R $(whoami):$(id -gn) $(find . -name "*.whl")
python3 setup.py bdist_wheel
workingDirectory: $(Build.SourcesDirectory)
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml
parameters:

View File

@@ -56,7 +56,6 @@ jobs:
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
parameters:
aptPackages: ${{ parameters.aptPackages }}
registerROCmPackages: true
- job: rocminfo_testing
dependsOn: rocminfo
@@ -103,6 +102,5 @@ jobs:
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml
parameters:
aptPackages: ${{ parameters.aptPackages }}
registerROCmPackages: true
environment: test
gpuTarget: $(JOB_GPU_TARGET)

View File

@@ -29,7 +29,6 @@ parameters:
- clr
- half
- llvm-project
- rocm-cmake
- rocminfo
- ROCR-Runtime
- name: rocmTestDependencies
@@ -80,7 +79,6 @@ jobs:
-DHALF_INCLUDE_DIRS=$(Agent.BuildDirectory)/rocm/include
-DCMAKE_BUILD_TYPE=Release
-DGPU_TARGETS=$(JOB_GPU_TARGET)
-DROCM_PLATFORM_VERSION=$(NEXT_RELEASE_VERSION)
-GNinja
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
parameters:

View File

@@ -0,0 +1,29 @@
variables:
- group: common
- template: /.azuredevops/variables-global.yml
parameters:
- name: checkoutRef
type: string
default: refs/tags/$(LATEST_RELEASE_TAG)
resources:
repositories:
- repository: pipelines_repo
type: github
endpoint: ROCm
name: ROCm/ROCm
- repository: release_repo
type: github
endpoint: ROCm
name: ROCm/TransferBench
ref: ${{ parameters.checkoutRef }}
trigger: none
pr: none
jobs:
- template: ${{ variables.CI_COMPONENT_PATH }}/TransferBench.yml
parameters:
checkoutRepo: release_repo
checkoutRef: ${{ parameters.checkoutRef }}

View File

@@ -20,37 +20,41 @@ steps:
ARTIFACT_NAME="composablekernel.${{ parameters.gpuTarget }}"
EXIT_CODE=0
# Try to find an Azure build for the specific CK commit called out in MIOpen's requirements.txt
# The commits that MIOpen reference are all merge commits from CK/develop to CK/amd-develop
# These commits are present on CK/amd-develop but not on CK/develop
# Ex-CI only builds CK/develop, so we need to find a commit present on both CK/develop and CK/amd-develop
CK_COMMIT=$(grep 'ROCm/composable_kernel' requirements.txt | sed -E 's/.*@([a-f0-9]{40}).*/\1/')
echo "Fetching CK build ID for commit $CK_COMMIT"
CK_CHECKS_URL="$GH_API/composable_kernel/commits/${CK_COMMIT}/check-runs"
CK_BUILD_ID=$(curl -s $CK_CHECKS_URL | \
CK_COMMIT_URL="$GH_API/composable_kernel/commits/${CK_COMMIT}"
PARENT_COMMIT=$(curl -s $CK_COMMIT_URL | jq '.parents[1].sha' | tr -d '"')
echo "Found parent commit: $PARENT_COMMIT"
PARENT_CHECKS_URL="$GH_API/composable_kernel/commits/${PARENT_COMMIT}/check-runs"
CK_BUILD_ID=$(curl -s $PARENT_CHECKS_URL | \
jq '.check_runs[] | select(.name == "composable_kernel" and .app.slug == "azure-pipelines") | .details_url' | \
tr -d '"' | grep -oP 'buildId=\K\d+')
# If none found, use latest successful CK build instead
if [[ -z "$CK_BUILD_ID" ]]; then
if [ -z "$CK_BUILD_ID" ]; then
echo "Did not find specific CK build ID"
LATEST_BUILD_URL="$AZ_API/build/builds?definitions=$(COMPOSABLE_KERNEL_PIPELINE_ID)&statusFilter=completed&resultFilter=succeeded&\$top=1&api-version=7.1"
CK_BUILD_ID=$(curl -s $LATEST_BUILD_URL | jq '.value[0].id')
echo "Found latest CK build ID: $CK_BUILD_ID"
EXIT_CODE=1
else
echo "Found specific CK build ID: $CK_BUILD_ID"
fi
AZURE_URL="$AZ_API/build/builds/$CK_BUILD_ID/artifacts?artifactName=$ARTIFACT_NAME&api-version=7.1"
ARTIFACT_URL=$(curl -s $AZURE_URL | jq '.resource.downloadUrl' | tr -d '"')
# If using the specific CK commit and it doesn't have any valid artifacts, use latest successful CK build instead
if { [[ -z "$ARTIFACT_URL" ]] || [[ "$ARTIFACT_URL" == "null" ]]; } && [[ $EXIT_CODE -eq 0 ]]; then
echo "Did not find valid specific CK build artifact"
LATEST_BUILD_URL="$AZ_API/build/builds?definitions=$(COMPOSABLE_KERNEL_PIPELINE_ID)&statusFilter=completed&resultFilter=succeeded&\$top=1&api-version=7.1"
if [ -z "$ARTIFACT_URL" ]; then
echo "Did not find specific CK build artifact"
LATEST_BUILD_URL="$AZ_API/build/builds?definitions=$(COMPOSABLE_KERNEL_PIPELINE_ID)&status=completed&result=succeeded&\$top=1&api-version=7.1"
CK_BUILD_ID=$(curl -s $LATEST_BUILD_URL | jq '.value[0].id')
echo "Found latest CK build ID: $CK_BUILD_ID"
AZURE_URL="$AZ_API/build/builds/$CK_BUILD_ID/artifacts?artifactName=$ARTIFACT_NAME&api-version=7.1"
ARTIFACT_URL=$(curl -s $AZURE_URL | jq '.resource.downloadUrl' | tr -d '"')
EXIT_CODE=2
elif [ $EXIT_CODE -eq 0 ]; then
echo "Found specific CK build ID: $CK_BUILD_ID"
fi
echo "Downloading CK artifact from $ARTIFACT_URL"
@@ -60,13 +64,9 @@ steps:
tar -zxvf $(System.ArtifactsDirectory)/$ARTIFACT_NAME/*.tar.gz -C $(Agent.BuildDirectory)/rocm
rm -r $(System.ArtifactsDirectory)/ck.zip $(System.ArtifactsDirectory)/$ARTIFACT_NAME
if [[ $EXIT_CODE -ne 0 ]]; then
if [ $EXIT_CODE -ne 0 ]; then
BUILD_COMMIT=$(curl -s $AZ_API/build/builds/$CK_BUILD_ID | jq '.sourceVersion' | tr -d '"')
if [[ $EXIT_CODE -eq 1 ]]; then
echo "WARNING: couldn't find a CK build for commit $CK_COMMIT"
elif [[ $EXIT_CODE -eq 2 ]]; then
echo "WARNING: couldn't find a valid CK artifact for commit $CK_COMMIT"
fi
echo "WARNING: couldn't find a CK build for commit $CK_COMMIT"
echo "Instead used latest CK build $CK_BUILD_ID for commit $BUILD_COMMIT"
fi
exit $EXIT_CODE

1
.gitignore vendored
View File

@@ -11,7 +11,6 @@ _toc.yml
docBin/
_doxygen/
_readthedocs/
__pycache__/
# avoid duplicating contributing.md due to conf.py
docs/CHANGELOG.md

View File

@@ -117,7 +117,6 @@ FX
Filesystem
FindDb
Flang
FluxBenchmark
Fortran
Fuyu
GALB
@@ -132,7 +131,6 @@ GDS
GEMM
GEMMs
GFortran
GFXIP
Gemma
GiB
GIM
@@ -156,7 +154,6 @@ HCA
HGX
HIPCC
HIPExtension
HIPification
HIPIFY
HIPification
HIPify
@@ -319,7 +316,6 @@ PipelineParallel
PnP
PowerEdge
PowerShell
Pretraining
Profiler's
PyPi
Pytest
@@ -341,7 +337,6 @@ RNNs
ROC
ROCProfiler
ROCT
ROCTx
ROCTracer
ROCclr
ROCdbgapi
@@ -482,7 +477,6 @@ ZenDNN
accuracies
activations
addr
ai
alloc
allocatable
allocator
@@ -548,7 +542,6 @@ cTDP
dataset
datasets
dataspace
datatemplate
datatype
datatypes
dbgapi
@@ -577,7 +570,6 @@ el
embeddings
enablement
encodings
endfor
endpgm
enqueue
env
@@ -698,7 +690,6 @@ pageable
pallas
parallelization
parallelizing
param
parameterization
passthrough
perfcounter
@@ -723,7 +714,6 @@ preprocessing
preprocessor
prequantized
prerequisites
pretraining
profiler
profilers
protobuf
@@ -776,7 +766,6 @@ rocm
rocminfo
rocprim
rocprof
rocprofv
rocprofiler
rocr
rocrand
@@ -816,7 +805,6 @@ supercomputing
symlink
symlinks
sys
tabindex
td
tensorfloat
th
@@ -862,7 +850,6 @@ vectorizes
virtualize
virtualized
vjxb
vllm
voxel
walkthrough
walkthroughs

View File

@@ -50,7 +50,7 @@ The following example shows how to use the repo tool to download the ROCm source
```bash
mkdir -p ~/ROCm/
cd ~/ROCm/
export ROCM_VERSION=6.3.3
export ROCM_VERSION=6.3.2
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
~/bin/repo sync
```
@@ -77,8 +77,8 @@ The Build time will reduce significantly if we limit the GPU Architecture/s agai
mkdir -p ~/WORKSPACE/ # Or any folder name other than WORKSPACE
cd ~/WORKSPACE/
export ROCM_VERSION=6.3.3
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
export ROCM_VERSION=6.3.2
~/bin/repo init -u http://github.com/ROCm/ROCm.git -b develop -m tools/rocm-build/rocm-${ROCM_VERSION}.xml
~/bin/repo sync
# --------------------------------------

View File

@@ -10,7 +10,7 @@
<!-- markdownlint-disable reference-links-images -->
<!-- markdownlint-disable no-missing-space-atx -->
<!-- spellcheck-disable -->
# ROCm 6.3.3 release notes
# ROCm 6.3.2 release notes
The release notes provide a summary of notable changes since the previous ROCm release.
@@ -24,6 +24,8 @@ The release notes provide a summary of notable changes since the previous ROCm r
- [ROCm known issues](#rocm-known-issues)
- [ROCm resolved issues](#rocm-resolved-issues)
- [ROCm upcoming changes](#rocm-upcoming-changes)
```{note}
@@ -32,43 +34,35 @@ documentation to verify compatibility and system requirements.
```
## Release highlights
The following are notable new features and improvements in ROCm 6.3.3. For changes to individual components, see
The following are notable improvements in ROCm 6.3.2. For changes to individual components, see
[Detailed component changes](#detailed-component-changes).
### ROCm Offline Installer Creator updates
The ROCm Offline Installer Creator 6.3.3 adds a new Post-Install Options menu, which includes a new ``udev`` option for adding GPU resources access for all users. It also moves the user-specific GPU access option (for the ``video,render`` group) from the Driver Options menu to the Post-Install Options menu. See the [ROCm Offline Installer Creator](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/rocm-offline-installer.html#post-install-options-menu) documentation for more information.
### ROCm documentation updates
ROCm documentation continues to be updated to provide clearer and more comprehensive guidance for a wider variety of user needs and use cases.
* [Tutorials for AI developers](https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/) have been added. These tutorials are Jupyter notebook-based, easy-to-follow documents. They are ideal for AI developers who want to learn about specific topics, including inference, fine-tuning, and training.
* Documentation about ROCm compatibility with deep learning frameworks has been added. These topics outline ROCm-enabled features for each deep learning framework, key ROCm libraries that can influence the capabilities, validated Docker image tags, and features supported across the available ROCm and framework versions. For more information, see:
* The [LLM inference performance validation guide for AMD Instinct MI300X](https://rocm.docs.amd.com/en/latest/how-to/rocm-for-ai/inference/vllm-benchmark.html)
now includes additional models for performance benchmarking. The accompanying ROCm vLLM Docker has been upgraded to ROCm 6.3.1.
* The HIP documentation has been updated with new resources for developers. To learn more about concurrency, parallelism, and stream management on devices and multiple GPUs, see [Asynchronous concurrent execution](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/asynchronous.html)
* [PyTorch compatibility](https://rocm.docs.amd.com/en/latest/compatibility/ml-compatibility/pytorch-compatibility.html)
* The following HIP documentation topics have been updated:
- [Virtual memory management](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/memory_management/virtual_memory.html)
- [Programming for HIP runtime compiler (RTC)](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_rtc.html)
- [HIP porting guide](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_porting_guide.html)
- [Porting CUDA driver API](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_porting_driver_api.html)
- [CUDA to HIP API function comparison](https://rocm.docs.amd.com/projects/HIP/en/latest/reference/api_syntax.html)
* [TensorFlow compatibility](https://rocm.docs.amd.com/en/latest/compatibility/ml-compatibility/tensorflow-compatibility.html)
* [JAX compatibility](https://rocm.docs.amd.com/en/latest/compatibility/ml-compatibility/jax-compatibility.html)
* The [HIP C++ language extensions](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_cpp_language_extensions.html) and [Kernel language C++ support](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/kernel_language_cpp_support.html) topics have been reorganized to make them easier to find and review. The topics have also been enhanced with new content.
## Operating system and hardware support changes
Operating system and hardware support remain unchanged in this release.
ROCm 6.3.2 adds support for Azure Linux 3.0 (kernel: 6.6). Azure Linux is supported only on AMD Instinct accelerators. For more information, see [Azure Linux installation](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/quick-start.html).
See the [Compatibility
matrix](https://rocm.docs.amd.com/en/docs-6.3.3/compatibility/compatibility-matrix.html)
matrix](https://rocm.docs.amd.com/en/latest/compatibility/compatibility-matrix.html)
for more information about operating system and hardware compatibility.
## ROCm components
The following table lists the versions of ROCm components for ROCm 6.3.3, including any version
changes from 6.3.2 to 6.3.3. Click the component's updated version to go to a list of its changes.
The following table lists the versions of ROCm components for ROCm 6.3.2, including any version
changes from 6.3.1 to 6.3.2. Click the component's updated version to go to a list of its changes.
Click {fab}`github` to go to the component's source code on GitHub.
<div class="pst-scrollable-table-container">
@@ -90,47 +84,47 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tr>
<th rowspan="9">Libraries</th>
<th rowspan="9">Machine learning and computer vision</th>
<td><a href="https://rocm.docs.amd.com/projects/composable_kernel/en/docs-6.3.3/index.html">Composable Kernel</a></td>
<td><a href="https://rocm.docs.amd.com/projects/composable_kernel/en/docs-6.3.2/index.html">Composable Kernel</a></td>
<td>1.1.0</td>
<td><a href="https://github.com/ROCm/composable_kernel"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/AMDMIGraphX/en/docs-6.3.3/index.html">MIGraphX</a></td>
<td><a href="https://rocm.docs.amd.com/projects/AMDMIGraphX/en/docs-6.3.2/index.html">MIGraphX</a></td>
<td>2.11.0</td>
<td><a href="https://github.com/ROCm/AMDMIGraphX"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/MIOpen/en/docs-6.3.3/index.html">MIOpen</a></td>
<td><a href="https://rocm.docs.amd.com/projects/MIOpen/en/docs-6.3.2/index.html">MIOpen</a></td>
<td>3.3.0</td>
<td><a href="https://github.com/ROCm/MIOpen"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/MIVisionX/en/docs-6.3.3/index.html">MIVisionX</a></td>
<td><a href="https://rocm.docs.amd.com/projects/MIVisionX/en/docs-6.3.2/index.html">MIVisionX</a></td>
<td>3.1.0</td>
<td><a href="https://github.com/ROCm/MIVisionX"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocAL/en/docs-6.3.3/index.html">rocAL</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocAL/en/docs-6.3.2/index.html">rocAL</a></td>
<td>2.1.0</td>
<td><a href="https://github.com/ROCm/rocAL"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocDecode/en/docs-6.3.3/index.html">rocDecode</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocDecode/en/docs-6.3.2/index.html">rocDecode</a></td>
<td>0.8.0</td>
<td><a href="https://github.com/ROCm/rocDecode"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocJPEG/en/docs-6.3.3/index.html">rocJPEG</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocJPEG/en/docs-6.3.2/index.html">rocJPEG</a></td>
<td>0.6.0</td>
<td><a href="https://github.com/ROCm/rocJPEG"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocPyDecode/en/docs-6.3.3/index.html">rocPyDecode</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocPyDecode/en/docs-6.3.2/index.html">rocPyDecode</a></td>
<td>0.2.0</td>
<td><a href="https://github.com/ROCm/rocPyDecode"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rpp/en/docs-6.3.3/index.html">RPP</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rpp/en/docs-6.3.2/index.html">RPP</a></td>
<td>1.9.1</td>
<td><a href="https://github.com/ROCm/rpp"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
@@ -139,7 +133,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tr>
<th rowspan="1"></th>
<th rowspan="1">Communication</th>
<td><a href="https://rocm.docs.amd.com/projects/rccl/en/docs-6.3.3/index.html">RCCL</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rccl/en/docs-6.3.2/index.html">RCCL</a></td>
<td>2.21.5</td>
<td><a href="https://github.com/ROCm/rccl"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
@@ -148,82 +142,82 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tr>
<th rowspan="16"></th>
<th rowspan="16">Math</th>
<td><a href="https://rocm.docs.amd.com/projects/hipBLAS/en/docs-6.3.3/index.html">hipBLAS</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipBLAS/en/docs-6.3.2/index.html">hipBLAS</a></td>
<td>2.3.0</td>
<td><a href="https://github.com/ROCm/hipBLAS"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipBLASLt/en/docs-6.3.3/index.html">hipBLASLt</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipBLASLt/en/docs-6.3.2/index.html">hipBLASLt</a></td>
<td>0.10.0</td>
<td><a href="https://github.com/ROCm/hipBLASLt"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipFFT/en/docs-6.3.3/index.html">hipFFT</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipFFT/en/docs-6.3.2/index.html">hipFFT</a></td>
<td>1.0.17</td>
<td><a href="https://github.com/ROCm/hipFFT"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipfort/en/docs-6.3.3/index.html">hipfort</a></td>
<td>0.5.1</td>
<td><a href="https://rocm.docs.amd.com/projects/hipfort/en/docs-6.3.2/index.html">hipfort</a></td>
<td>0.5.0&nbsp;&Rightarrow;&nbsp;<a href="#hipfort-0-5-1">0.5.1</a></td>
<td><a href="https://github.com/ROCm/hipfort"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipRAND/en/docs-6.3.3/index.html">hipRAND</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipRAND/en/docs-6.3.2/index.html">hipRAND</a></td>
<td>2.11.1</td>
<td><a href="https://github.com/ROCm/hipRAND"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipSOLVER/en/docs-6.3.3/index.html">hipSOLVER</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipSOLVER/en/docs-6.3.2/index.html">hipSOLVER</a></td>
<td>2.3.0</td>
<td><a href="https://github.com/ROCm/hipSOLVER"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipSPARSE/en/docs-6.3.3/index.html">hipSPARSE</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipSPARSE/en/docs-6.3.2/index.html">hipSPARSE</a></td>
<td>3.1.2</td>
<td><a href="https://github.com/ROCm/hipSPARSE"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipSPARSELt/en/docs-6.3.3/index.html">hipSPARSELt</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipSPARSELt/en/docs-6.3.2/index.html">hipSPARSELt</a></td>
<td>0.2.2</td>
<td><a href="https://github.com/ROCm/hipSPARSELt"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocALUTION/en/docs-6.3.3/index.html">rocALUTION</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocALUTION/en/docs-6.3.2/index.html">rocALUTION</a></td>
<td>3.2.1</td>
<td><a href="https://github.com/ROCm/rocALUTION"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocBLAS/en/docs-6.3.3/index.html">rocBLAS</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocBLAS/en/docs-6.3.2/index.html">rocBLAS</a></td>
<td>4.3.0</td>
<td><a href="https://github.com/ROCm/rocBLAS"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocFFT/en/docs-6.3.3/index.html">rocFFT</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocFFT/en/docs-6.3.2/index.html">rocFFT</a></td>
<td>1.0.31</td>
<td><a href="https://github.com/ROCm/rocFFT"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocRAND/en/docs-6.3.3/index.html">rocRAND</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocRAND/en/docs-6.3.2/index.html">rocRAND</a></td>
<td>3.2.0</td>
<td><a href="https://github.com/ROCm/rocRAND"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocSOLVER/en/docs-6.3.3/index.html">rocSOLVER</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocSOLVER/en/docs-6.3.2/index.html">rocSOLVER</a></td>
<td>3.27.0</td>
<td><a href="https://github.com/ROCm/rocSOLVER"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocSPARSE/en/docs-6.3.3/index.html">rocSPARSE</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocSPARSE/en/docs-6.3.2/index.html">rocSPARSE</a></td>
<td>3.3.0</td>
<td><a href="https://github.com/ROCm/rocSPARSE"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocWMMA/en/docs-6.3.3/index.html">rocWMMA</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocWMMA/en/docs-6.3.2/index.html">rocWMMA</a></td>
<td>1.6.0</td>
<td><a href="https://github.com/ROCm/rocWMMA"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/Tensile/en/docs-6.3.3/src/index.html">Tensile</a></td>
<td><a href="https://rocm.docs.amd.com/projects/Tensile/en/docs-6.3.2/src/index.html">Tensile</a></td>
<td>4.42.0</td>
<td><a href="https://github.com/ROCm/Tensile"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
@@ -232,22 +226,22 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tr>
<th rowspan="4"></th>
<th rowspan="4">Primitives</th>
<td><a href="https://rocm.docs.amd.com/projects/hipCUB/en/docs-6.3.3/index.html">hipCUB</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipCUB/en/docs-6.3.2/index.html">hipCUB</a></td>
<td>3.3.0</td>
<td><a href="https://github.com/ROCm/hipCUB"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/hipTensor/en/docs-6.3.3/index.html">hipTensor</a></td>
<td><a href="https://rocm.docs.amd.com/projects/hipTensor/en/docs-6.3.2/index.html">hipTensor</a></td>
<td>1.4.0</td>
<td><a href="https://github.com/ROCm/hipTensor"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocPRIM/en/docs-6.3.3/index.html">rocPRIM</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocPRIM/en/docs-6.3.2/index.html">rocPRIM</a></td>
<td>3.3.0</td>
<td><a href="https://github.com/ROCm/rocPRIM"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocThrust/en/docs-6.3.3/index.html">rocThrust</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocThrust/en/docs-6.3.2/index.html">rocThrust</a></td>
<td>3.3.0</td>
<td><a href="https://github.com/ROCm/rocThrust"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
@@ -256,27 +250,27 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tr>
<th rowspan="7">Tools</th>
<th rowspan="7">System management</th>
<td><a href="https://rocm.docs.amd.com/projects/amdsmi/en/docs-6.3.3/index.html">AMD SMI</a></td>
<td><a href="https://rocm.docs.amd.com/projects/amdsmi/en/docs-6.3.2/index.html">AMD SMI</a></td>
<td>24.7.1</td>
<td><a href="https://github.com/ROCm/amdsmi"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rdc/en/docs-6.3.3/index.html">ROCm Data Center Tool</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rdc/en/docs-6.3.2/index.html">ROCm Data Center Tool</a></td>
<td>0.3.0</td>
<td><a href="https://github.com/ROCm/rdc"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocminfo/en/docs-6.3.3/index.html">rocminfo</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocminfo/en/docs-6.3.2/index.html">rocminfo</a></td>
<td>1.0.0</td>
<td><a href="https://github.com/ROCm/rocminfo"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocm_smi_lib/en/docs-6.3.3/index.html">ROCm SMI</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocm_smi_lib/en/docs-6.3.2/index.html">ROCm SMI</a></td>
<td>7.4.0</td>
<td><a href="https://github.com/ROCm/rocm_smi_lib"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/ROCmValidationSuite/en/docs-6.3.3/index.html">ROCmValidationSuite</a></td>
<td><a href="https://rocm.docs.amd.com/projects/ROCmValidationSuite/en/docs-6.3.2/index.html">ROCmValidationSuite</a></td>
<td>1.1.0</td>
<td><a href="https://github.com/ROCm/ROCmValidationSuite"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
@@ -285,38 +279,38 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tr>
<th rowspan="6"></th>
<th rowspan="6">Performance</th>
<td><a href="https://rocm.docs.amd.com/projects/rocm_bandwidth_test/en/docs-6.3.3/index.html">ROCm Bandwidth
<td><a href="https://rocm.docs.amd.com/projects/rocm_bandwidth_test/en/docs-6.3.2/index.html">ROCm Bandwidth
Test</a></td>
<td>1.4.0</td>
<td><a href="https://github.com/ROCm/rocm_bandwidth_test/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler-compute/en/docs-6.3.3/index.html">ROCm Compute Profiler</a></td>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler-compute/en/docs-6.3.2/index.html">ROCm Compute Profiler</a></td>
<td>3.0.0</td>
<td><a href="https://github.com/ROCm/rocprofiler-compute"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler-systems/en/docs-6.3.3/index.html">ROCm Systems Profiler</a></td>
<td>0.1.1&nbsp;&Rightarrow;&nbsp;<a href="#rocm-systems-profiler-0-1-2">0.1.2</td>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler-systems/en/docs-6.3.2/index.html">ROCm Systems Profiler</a></td>
<td>0.1.0&nbsp;&Rightarrow;&nbsp;<a href="#rocm-systems-profiler-0-1-1">0.1.1</td>
<td><a href="https://github.com/ROCm/rocprofiler-systems"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler/en/docs-6.3.3/index.html">ROCProfiler</a></td>
<td>2.0.0</td>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler/en/docs-6.3.2/index.html">ROCProfiler</a></td>
<td>2.0.0&nbsp;&Rightarrow;&nbsp;<a href="#rocprofiler-2-0-0">2.0.0</a></td>
<td><a href="https://github.com/ROCm/ROCProfiler/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler-sdk/en/docs-6.3.3/index.html">ROCprofiler-SDK</a></td>
<td>0.5.0</td>
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler-sdk/en/docs-6.3.2/index.html">ROCprofiler-SDK</a></td>
<td>0.5.0&nbsp;&Rightarrow;&nbsp;<a href="#rocprofiler-sdk-0-5-0">0.5.0</a></td>
<td><a href="https://github.com/ROCm/rocprofiler-sdk/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr >
<td><a href="https://rocm.docs.amd.com/projects/roctracer/en/docs-6.3.3/index.html">ROCTracer</a></td>
<td><a href="https://rocm.docs.amd.com/projects/roctracer/en/docs-6.3.2/index.html">ROCTracer</a></td>
<td>4.1.0</td>
<td><a href="https://github.com/ROCm/ROCTracer/"><i
class="fab fa-github fa-lg"></i></a></td>
@@ -326,32 +320,32 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tr>
<th rowspan="5"></th>
<th rowspan="5">Development</th>
<td><a href="https://rocm.docs.amd.com/projects/HIPIFY/en/docs-6.3.3/index.html">HIPIFY</a></td>
<td><a href="https://rocm.docs.amd.com/projects/HIPIFY/en/docs-6.3.2/index.html">HIPIFY</a></td>
<td>18.0.0</td>
<td><a href="https://github.com/ROCm/HIPIFY/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/ROCdbgapi/en/docs-6.3.3/index.html">ROCdbgapi</a></td>
<td><a href="https://rocm.docs.amd.com/projects/ROCdbgapi/en/docs-6.3.2/index.html">ROCdbgapi</a></td>
<td>0.77.0</td>
<td><a href="https://github.com/ROCm/ROCdbgapi/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/ROCmCMakeBuildTools/en/docs-6.3.3/index.html">ROCm CMake</a></td>
<td><a href="https://rocm.docs.amd.com/projects/ROCmCMakeBuildTools/en/docs-6.3.2/index.html">ROCm CMake</a></td>
<td>0.14.0</td>
<td><a href="https://github.com/ROCm/rocm-cmake/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/ROCgdb/en/docs-6.3.3/index.html">ROCm Debugger (ROCgdb)</a>
<td><a href="https://rocm.docs.amd.com/projects/ROCgdb/en/docs-6.3.2/index.html">ROCm Debugger (ROCgdb)</a>
</td>
<td>15.2</td>
<td><a href="https://github.com/ROCm/ROCgdb/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/rocr_debug_agent/en/docs-6.3.3/index.html">ROCr Debug Agent</a>
<td><a href="https://rocm.docs.amd.com/projects/rocr_debug_agent/en/docs-6.3.2/index.html">ROCr Debug Agent</a>
</td>
<td>2.0.3</td>
<td><a href="https://github.com/ROCm/rocr_debug_agent/"><i
@@ -361,13 +355,13 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tbody class="rocm-components-compilers">
<tr>
<th rowspan="2" colspan="2">Compilers</th>
<td><a href="https://rocm.docs.amd.com/projects/HIPCC/en/docs-6.3.3/index.html">HIPCC</a></td>
<td><a href="https://rocm.docs.amd.com/projects/HIPCC/en/docs-6.3.2/index.html">HIPCC</a></td>
<td>1.1.1</td>
<td><a href="https://github.com/ROCm/llvm-project/"><i
class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.3/index.html">llvm-project</a></td>
<td><a href="https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.2/index.html">llvm-project</a></td>
<td>18.0.0</td>
<td><a href="https://github.com/ROCm/llvm-project/"><i
class="fab fa-github fa-lg"></i></a></td>
@@ -376,12 +370,12 @@ Click {fab}`github` to go to the component's source code on GitHub.
<tbody class="rocm-components-runtimes">
<tr>
<th rowspan="2" colspan="2">Runtimes</th>
<td><a href="https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.3/index.html">HIP</a></td>
<td>6.3.2</td>
<td><a href="https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.2/index.html">HIP</a></td>
<td>6.3.1&nbsp;&Rightarrow;&nbsp;<a href="#hip-6-3-2">6.3.2</a></td>
<td><a href="https://github.com/ROCm/HIP/"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
<tr>
<td><a href="https://rocm.docs.amd.com/projects/ROCR-Runtime/en/docs-6.3.3/index.html">ROCr Runtime</a></td>
<td><a href="https://rocm.docs.amd.com/projects/ROCR-Runtime/en/docs-6.3.2/index.html">ROCr Runtime</a></td>
<td>1.14.0</td>
<td><a href="https://github.com/ROCm/ROCR-Runtime/"><i class="fab fa-github fa-lg"></i></a></td>
</tr>
@@ -393,34 +387,112 @@ Click {fab}`github` to go to the component's source code on GitHub.
The following sections describe key changes to ROCm components.
### **ROCm Systems Profiler** (0.1.2)
### **HIP** (6.3.2)
#### Added
* Tracking of Heterogeneous System Architecture (HSA) handlers:
- Adds an atomic counter to track the outstanding HSA handlers.
- Waits on CPU for the callbacks if the number exceeds the defined value.
* Codes to capture Architected Queueing Language (AQL) packets for HIP graph memory copy node between host and device. HIP enqueues AQL packets during graph launch.
* Control to use system pool implementation in runtime commands handling. By default, it is disabled.
* A new path to avoid `WaitAny` calls in `AsyncEventsLoop`. The new path is selected by default.
* Runtime control on decrement counter only if the event is popped. There is a new way to restore dead signals cleanup for the old path.
* A new logic in runtime to track the age of events from the kernel mode driver.
#### Optimized
* HSA callback performance. The HIP runtime creates and submits commands in the queue and interacts with HSA through a callback function. HIP waits for the CPU status from HSA to optimize the handling of events, profiling, commands, and HSA signals for higher performance.
* Runtime optimization which combines all logic of `WaitAny` in a single processing loop and avoids extra memory allocations or reference counting. The runtime won't spin on the CPU if all events are busy.
* Multi-threaded dispatches for performance improvement.
* Command submissions and processing between CPU and GPU by introducing a way to limit the software batch size.
* Switch to `std::shared_mutex` in book/keep logic in streams from multiple threads simultaneously, for performance improvement in specific customer applications.
* `std::shared_mutex` is used in memory object mapping, for performance improvement.
#### Resolved issues
* Fixed an error that prevented GPU hardware activity from being presented in certain workloads.
* Race condition in multi-threaded producer/consumer scenario with `hipMallocFromPoolAsync`.
* Segmentation fault with `hipStreamLegacy` while using the API `hipStreamWaitEvent`.
* Usage of `hipStreamLegacy` in HIP event record.
* A soft hang in graph execution process from HIP user object. The fix handles the release of graph execution object properly considering synchronization on the device/stream. The user application now behaves the same with `hipUserObject` on both the AMD ROCm and NVIDIA CUDA platforms.
### **hipfort** (0.5.1)
#### Added
* Support for building with LLVM Flang.
#### Resolved issues
* Fixed the exported `hipfort::hipsparse` CMake target.
### **ROCm Systems Profiler** (0.1.1)
#### Resolved issues
* Fixed an error when building from source on some SUSE and RHEL systems when using the `ROCPROFSYS_BUILD_DYNINST` option.
### **ROCProfiler** (2.0.0)
#### Changed
* Replaced `CU_UTILIZATION` metric with `SIMD_UTILIZATION` for better accuracy.
#### Resolved issues
* Fixed the `VALUBusy` and `SALUBusy` activity metrics for accuracy on MI300.
### **ROCprofiler-SDK** (0.5.0)
#### Added
* Support for system-wide collection of SQ counters across all HSA processes.
#### Changed
* `rocprofiler_sample_device_counting_service` API updated to return counter output immediately, when called in synchronous mode.
## ROCm known issues
ROCm known issues are noted on {fab}`github` [GitHub](https://github.com/ROCm/ROCm/labels/Verified%20Issue). For known
issues related to individual components, review the [Detailed component changes](#detailed-component-changes).
### Zero value is displayed in ROCTx aggregated statistics
## ROCm resolved issues
The ROCTx markers are standalone markers within the ROCProfiler-SDK library. Each marker reports only a single timestamp, which is recorded as the `start_timestamp` and `end_timestamp`. As a result, the value for aggregated statistics presented in `TotalDurationNs`, `maxNs`, and `minNs`, is zero. The zero value indicates that the actual execution time is not associated with the markers, which is an expected behavior. See [GitHub issue #4396](https://github.com/ROCm/ROCm/issues/4396).
The following are previously known issues resolved in this release. For resolved issues related to
individual components, review the [Detailed component changes](#detailed-component-changes).
### TransferBench packages not functional
Issue with TransferBench packages not being compiled properly has been fixed. For more information, See [GitHub issue #4081](https://github.com/ROCm/ROCm/issues/4081).
### ROCm Compute Profiler CTest failure in CI
When running the ROCm Compute Profiler (`rocprof-compute`) CTest in the Azure CI environment, the
`rocprof-compute` execution test failed. This issue was due to an outdated test file that was not renamed
(`omniperf` to `rocprof-compute`), and the `ROCM_PATH` environment variable not being set in
the Azure CI environment, resulting in the tool being unable to extract chip information as expected.
This issue has been fixed in the ROCm 6.3.2 release. See [GitHub issue #4085](https://github.com/ROCm/ROCm/issues/4085).
### MIVisionX memory access fault in Canny edge detection
An issue where Canny edge detection kernels accessed out-of-bounds memory locations while
computing gradient intensities on edge pixels has been fixed. This issue was isolated to
Canny-specific use cases on Instinct MI300 series accelerators. See [GitHub issue #4086](https://github.com/ROCm/ROCm/issues/4086).
### AMD VCN instability with rocDecode
A firmware crash on gfx942 devices when AMD Video Core Next (VCN) was used for rocDecode operations has been resolved.
## ROCm upcoming changes
The following changes to the ROCm software stack are anticipated for future releases.
### ROCTracer and ROCProfiler (rocprof and rocprofv2) deprecation
Development and support for ROCTracer and ROCProfiler (`rocprof` and `rocprofv2`) will phase out in favor of ROCprofiler-SDK (`rocprofv3`) in upcoming ROCm releases. Going forward, only critical defect fixes will be addressed for older versions of profiling tools and libraries. Upgrade to the latest version of ROCprofiler-SDK (`rocprofv3`) library to ensure continued support and access to new features.
### AMDGPU wavefront size compiler macro deprecation
The `__AMDGCN_WAVEFRONT_SIZE__` macro will be deprecated in an upcoming
release. It is recommended to remove any use of this macro. For more information, see [AMDGPU
support](https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.3/LLVM/clang/html/AMDGPUSupport.html).
support](https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.2/LLVM/clang/html/AMDGPUSupport.html).
### HIPCC Perl scripts deprecation

View File

@@ -1,76 +1,78 @@
<?xml version="1.0" encoding="UTF-8"?>
<manifest>
<remote name="rocm-org" fetch="https://github.com/ROCm/" />
<default revision="refs/tags/rocm-6.3.3"
<default revision="refs/heads/amd-mainline"
remote="rocm-org"
sync-c="true"
sync-j="4" />
<!--list of projects for ROCm-->
<project name="ROCK-Kernel-Driver" />
<project name="ROCR-Runtime" />
<project name="amdsmi" />
<project name="rdc" />
<project name="rocm_bandwidth_test" />
<project name="rocm_smi_lib" />
<project name="rocm-core" />
<project name="rocm-examples" />
<project name="rocminfo" />
<project name="rocprofiler" />
<project name="rocprofiler-register" />
<project name="rocprofiler-sdk" />
<project name="rocprofiler-compute" />
<project name="rocprofiler-systems" />
<project name="roctracer" />
<project name="ROCm" revision="refs/heads/main" />
<project name="ROCK-Kernel-Driver" revision="refs/heads/master" />
<project name="ROCR-Runtime" revision="refs/heads/amd-master"/>
<project name="amdsmi" />
<project name="rdc" />
<project name="rocm_bandwidth_test" revision="refs/heads/master" />
<project name="rocm_smi_lib" />
<project name="rocm-core" revision="refs/heads/amd-master" />
<project name="rocm-examples" revision="refs/heads/develop"/>
<project name="rocminfo" revision="refs/heads/amd-master" />
<project name="rocprofiler" />
<project name="rocprofiler-register" />
<project name="rocprofiler-sdk" />
<project name="rocprofiler-compute" />
<project name="rocprofiler-systems" />
<project name="roctracer" />
<!--HIP Projects-->
<project name="HIP" />
<project name="hip-tests" />
<project name="HIPIFY" />
<project name="clr" />
<project name="hipother" />
<project name="hip" />
<project name="hip-tests" />
<project name="HIPIFY" />
<project name="clr" />
<project name="hipother" />
<!-- The following projects are all associated with the AMDGPU LLVM compiler -->
<project name="half" />
<project name="half" revision="refs/heads/rocm" />
<project name="llvm-project" />
<project name="SPIRV-LLVM-Translator" path="llvm-project/llvm/projects/SPIRV-LLVM-Translator" />
<!-- gdb projects -->
<project name="ROCdbgapi" />
<project name="ROCgdb" />
<project name="ROCdbgapi" />
<project name="ROCgdb" revision="refs/heads/amd-mainline-rocgdb-15"/>
<project name="rocr_debug_agent" />
<!-- ROCm Libraries -->
<project groups="mathlibs" name="AMDMIGraphX" />
<project groups="mathlibs" name="MIOpen" />
<project groups="mathlibs" name="MIVisionX" />
<project groups="mathlibs" name="ROCmValidationSuite" />
<project groups="mathlibs" name="Tensile" />
<project groups="mathlibs" name="composable_kernel" />
<project groups="mathlibs" name="hipBLAS-common" />
<project groups="mathlibs" name="hipBLAS" />
<project groups="mathlibs" name="hipBLASLt" />
<project groups="mathlibs" name="hipCUB" />
<project groups="mathlibs" name="hipFFT" />
<project groups="mathlibs" name="hipRAND" />
<project groups="mathlibs" name="hipSOLVER" />
<project groups="mathlibs" name="hipSPARSE" />
<project groups="mathlibs" name="hipSPARSELt" />
<project groups="mathlibs" name="hipTensor" />
<project groups="mathlibs" name="hipfort" />
<project groups="mathlibs" name="rccl" />
<project groups="mathlibs" name="rocAL" />
<project groups="mathlibs" name="rocALUTION" />
<project groups="mathlibs" name="rocBLAS" />
<project groups="mathlibs" name="rocDecode" />
<project groups="mathlibs" name="rocJPEG" />
<project groups="mathlibs" name="rocPyDecode" />
<project groups="mathlibs" name="rocFFT" />
<project groups="mathlibs" name="rocPRIM" />
<project groups="mathlibs" name="rocRAND" />
<project groups="mathlibs" name="rocSOLVER" />
<project groups="mathlibs" name="rocSPARSE" />
<project groups="mathlibs" name="rocThrust" />
<project groups="mathlibs" name="rocWMMA" />
<project groups="mathlibs" name="rocm-cmake" />
<project groups="mathlibs" name="rpp" />
<project groups="mathlibs" name="TransferBench" />
<project groups="mathlibs" name="AMDMIGraphX" revision="refs/heads/master" />
<project groups="mathlibs" name="MIOpen" revision="refs/heads/amd-master" />
<project groups="mathlibs" name="MIVisionX" revision="refs/heads/master" />
<project groups="mathlibs" name="ROCmValidationSuite" revision="refs/heads/master" />
<project groups="mathlibs" name="Tensile" revision="refs/heads/master" />
<project groups="mathlibs" name="composable_kernel" revision="refs/heads/amd-master"/>
<project groups="mathlibs" name="hipBLAS-common" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipBLAS" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipBLASLt" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipCUB" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipFFT" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipRAND" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipSOLVER" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipSPARSE" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipSPARSELt" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipTensor" revision="refs/heads/mainline" />
<project groups="mathlibs" name="hipfort" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rccl" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocAL" revision="refs/heads/master" />
<project groups="mathlibs" name="rocALUTION" revision="refs/heads/mainline"/>
<project groups="mathlibs" name="rocBLAS" revision="refs/heads/mainline"/>
<project groups="mathlibs" name="rocDecode" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocJPEG" revision="refs/heads/mainline"/>
<project groups="mathlibs" name="rocPyDecode" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocFFT" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocPRIM" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocRAND" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocSOLVER" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocSPARSE" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocThrust" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocWMMA" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rocm-cmake" revision="refs/heads/mainline" />
<project groups="mathlibs" name="rpp" revision="refs/heads/master" />
<project groups="mathlibs" name="TransferBench" />
<!-- Projects for OpenMP-Extras -->
<project name="aomp" path="openmp-extras/aomp" />
<project name="aomp-extras" path="openmp-extras/aomp-extras" />
<project name="flang" path="openmp-extras/flang" />
<project name="aomp" path="openmp-extras/aomp" />
<project name="aomp-extras" path="openmp-extras/aomp-extras" />
<project name="flang" path="openmp-extras/flang" />
</manifest>

View File

@@ -1,120 +1,120 @@
ROCm Version,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,,
,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2"
,,,,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2"
,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8"
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4"
,,,,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9
,Oracle Linux 8.10 [#mi300x-past-60]_,Oracle Linux 8.10 [#mi300x-past-60]_,Oracle Linux 8.10 [#mi300x-past-60]_,Oracle Linux 8.10 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,,,
,Debian 12 [#single-node-past-60]_,Debian 12 [#single-node-past-60]_,Debian 12 [#single-node-past-60]_,,,,,,,,,,
,Azure Linux 3.0 [#mi300x-past-60]_,Azure Linux 3.0 [#mi300x-past-60]_,,,,,,,,,,,
,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3
,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2
,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA
,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3
,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2
,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>`,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100
,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030
,gfx942,gfx942,gfx942,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_
,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a
,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908
,,,,,,,,,,,,,
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`PyTorch <../compatibility/ml-compatibility/pytorch-compatibility>`,"2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13"
:doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
:doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.4.31,0.4.31,0.4.31,0.4.31,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
,,,,,,,,,,,,,
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,,,,
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1
,,,,,,,,,,,,,
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,,,,
Thrust,2.3.2,2.3.2,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
CUB,2.3.2,2.3.2,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
,,,,,,,,,,,,,
KMD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,,,,
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
,,,,,,,,,,,,,
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.11.0,2.11.0,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0
:doc:`MIOpen <miopen:index>`,3.3.0,3.3.0,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0
:doc:`rocAL <rocal:index>`,2.1.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.8.0,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,0.6.0,0.6.0,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.2.0,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A
:doc:`RPP <rpp:index>`,1.9.1,1.9.1,1.9.1,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0
,,,,,,,,,,,,,
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`RCCL <rccl:index>`,2.21.5,2.21.5,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
,,,,,,,,,,,,,
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.3.0,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.10.0,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.17,1.0.17,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13
:doc:`hipfort <hipfort:index>`,0.5.1,0.5.1,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0
:doc:`hipRAND <hiprand:index>`,2.11.1,2.11.1,2.11.1,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.3.0,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.2,3.1.2,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.2,0.2.2,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.2.1,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.3.0,4.3.0,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.31,1.0.31,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23
:doc:`rocRAND <rocrand:index>`,3.2.0,3.2.0,3.2.0,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.27.0,3.27.0,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.3.0,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.6.0,1.6.0,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0
:doc:`Tensile <tensile:src/index>`,4.42.0,4.42.0,4.42.0,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0
,,,,,,,,,,,,,
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`hipCUB <hipcub:index>`,3.3.0,3.3.0,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.3.0,3.3.0,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.3.0,3.3.0,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
,,,,,,,,,,,,,
SUPPORT LIBS,,,,,,,,,,,,,
`hipother <https://github.com/ROCm/hipother>`_,6.3.42134,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245
,,,,,,,,,,,,,
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.7.1,24.7.1,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.4.0,7.4.0,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.1.0,1.1.0,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000
,,,,,,,,,,,,,
PERFORMANCE TOOLS,,,,,,,,,,,,,
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,3.0.0,3.0.0,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.2,0.1.1,0.1.0,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60303,2.0.60302,2.0.60301,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.5.0,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A
:doc:`ROCTracer <roctracer:index>`,4.1.60303,4.1.60302,4.1.60301,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000
,,,,,,,,,,,,,
DEVELOPMENT TOOLS,,,,,,,,,,,,,
:doc:`HIPIFY <hipify:index>`,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.14.0,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.77.0,0.77.0,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,15.2.0,15.2.0,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,N/A,N/A
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3
,,,,,,,,,,,,,
COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,,,,
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
`Flang <https://github.com/ROCm/flang>`_,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
:doc:`llvm-project <llvm-project:index>`,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
,,,,,,,,,,,,,
RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,,,,
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42134,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
:doc:`HIP <hip:index>`,6.3.42134,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0
ROCm Version,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,,
,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2"
,,,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2"
,RHEL 8.10,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8"
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4"
,,,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9
,Oracle Linux 8.10 [#mi300x-past-60]_,Oracle Linux 8.10 [#mi300x-past-60]_,Oracle Linux 8.10 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,Oracle Linux 8.9 [#mi300x-past-60]_,,,
,Debian 12 [#single-node-past-60]_,Debian 12 [#single-node-past-60]_,,,,,,,,,,
,Azure Linux 3.0 [#mi300x-past-60]_,,,,,,,,,,,
,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3
,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2
,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA
,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3
,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2
,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>`,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100
,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030
,gfx942,gfx942,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_
,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a
,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908
,,,,,,,,,,,,
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`PyTorch <../compatibility/ml-compatibility/pytorch-compatibility>`,"2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13"
:doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
:doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.4.31,0.4.31,0.4.31,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
,,,,,,,,,,,,
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,,,
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1
,,,,,,,,,,,,
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,,,
Thrust,2.3.2,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
CUB,2.3.2,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
,,,,,,,,,,,,
KMD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,,,
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
,,,,,,,,,,,,
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.11.0,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0
:doc:`MIOpen <miopen:index>`,3.3.0,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0
:doc:`rocAL <rocal:index>`,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,0.6.0,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A
:doc:`RPP <rpp:index>`,1.9.1,1.9.1,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0
,,,,,,,,,,,,
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`RCCL <rccl:index>`,2.21.5,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
,,,,,,,,,,,,
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.17,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13
:doc:`hipfort <hipfort:index>`,0.5.1,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0
:doc:`hipRAND <hiprand:index>`,2.11.1,2.11.1,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.2,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.2,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.3.0,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.31,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23
:doc:`rocRAND <rocrand:index>`,3.2.0,3.2.0,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.27.0,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.6.0,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0
:doc:`Tensile <tensile:src/index>`,4.42.0,4.42.0,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0
,,,,,,,,,,,,
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`hipCUB <hipcub:index>`,3.3.0,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.3.0,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.3.0,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
,,,,,,,,,,,,
SUPPORT LIBS,,,,,,,,,,,,
`hipother <https://github.com/ROCm/hipother>`_,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245
,,,,,,,,,,,,
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.7.1,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.4.0,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.1.0,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000
,,,,,,,,,,,,
PERFORMANCE TOOLS,,,,,,,,,,,,
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,3.0.0,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.1,0.1.0,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60302,2.0.60301,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A
:doc:`ROCTracer <roctracer:index>`,4.1.60302,4.1.60301,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000
,,,,,,,,,,,,
DEVELOPMENT TOOLS,,,,,,,,,,,,
:doc:`HIPIFY <hipify:index>`,18.0.0.25012,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.77.0,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,15.2.0,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,N/A,N/A
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3
,,,,,,,,,,,,
COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,,,
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
`Flang <https://github.com/ROCm/flang>`_,18.0.0.25012,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
:doc:`llvm-project <llvm-project:index>`,18.0.0.25012,18.0.0.24491,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.25012,18.0.0.24491,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
,,,,,,,,,,,,
RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,,,
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
:doc:`HIP <hip:index>`,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0
1 ROCm Version 6.3.3 6.3.2 6.3.1 6.3.0 6.2.4 6.2.2 6.2.1 6.2.0 6.1.2 6.1.1 6.1.0 6.0.2 6.0.0
2 :ref:`Operating systems & kernels <OS-kernel-versions>` Ubuntu 24.04.2 Ubuntu 24.04.2 Ubuntu 24.04.2 Ubuntu 24.04.2 Ubuntu 24.04.1, 24.04 Ubuntu 24.04.1, 24.04 Ubuntu 24.04.1, 24.04 Ubuntu 24.04
3 Ubuntu 22.04.5 Ubuntu 22.04.5 Ubuntu 22.04.5 Ubuntu 22.04.5 Ubuntu 22.04.5, 22.04.4 Ubuntu 22.04.5, 22.04.4 Ubuntu 22.04.5, 22.04.4 Ubuntu 22.04.5, 22.04.4 Ubuntu 22.04.4, 22.04.3 Ubuntu 22.04.4, 22.04.3 Ubuntu 22.04.4, 22.04.3 Ubuntu 22.04.4, 22.04.3, 22.04.2 Ubuntu 22.04.4, 22.04.3, 22.04.2
4 Ubuntu 20.04.6, 20.04.5 Ubuntu 20.04.6, 20.04.5 Ubuntu 20.04.6, 20.04.5 Ubuntu 20.04.6, 20.04.5 Ubuntu 20.04.6, 20.04.5
5 RHEL 9.5, 9.4 RHEL 9.5, 9.4 RHEL 9.5, 9.4 RHEL 9.5, 9.4 RHEL 9.4, 9.3 RHEL 9.4, 9.3 RHEL 9.4, 9.3 RHEL 9.4, 9.3 RHEL 9.4, 9.3, 9.2 RHEL 9.4, 9.3, 9.2 RHEL 9.4, 9.3, 9.2 RHEL 9.3, 9.2 RHEL 9.3, 9.2
6 RHEL 8.10 RHEL 8.10 RHEL 8.10 RHEL 8.10 RHEL 8.10, 8.9 RHEL 8.10, 8.9 RHEL 8.10, 8.9 RHEL 8.10, 8.9 RHEL 8.9, 8.8 RHEL 8.9, 8.8 RHEL 8.9, 8.8 RHEL 8.9, 8.8 RHEL 8.9, 8.8
7 SLES 15 SP6, SP5 SLES 15 SP6, SP5 SLES 15 SP6, SP5 SLES 15 SP6, SP5 SLES 15 SP6, SP5 SLES 15 SP6, SP5 SLES 15 SP6, SP5 SLES 15 SP6, SP5 SLES 15 SP5, SP4 SLES 15 SP5, SP4 SLES 15 SP5, SP4 SLES 15 SP5, SP4 SLES 15 SP5, SP4
8 CentOS 7.9 CentOS 7.9 CentOS 7.9 CentOS 7.9 CentOS 7.9
9 Oracle Linux 8.10 [#mi300x-past-60]_ Oracle Linux 8.10 [#mi300x-past-60]_ Oracle Linux 8.10 [#mi300x-past-60]_ Oracle Linux 8.10 [#mi300x-past-60]_ Oracle Linux 8.9 [#mi300x-past-60]_ Oracle Linux 8.9 [#mi300x-past-60]_ Oracle Linux 8.9 [#mi300x-past-60]_ Oracle Linux 8.9 [#mi300x-past-60]_ Oracle Linux 8.9 [#mi300x-past-60]_ Oracle Linux 8.9 [#mi300x-past-60]_
10 Debian 12 [#single-node-past-60]_ Debian 12 [#single-node-past-60]_ Debian 12 [#single-node-past-60]_
11 Azure Linux 3.0 [#mi300x-past-60]_ Azure Linux 3.0 [#mi300x-past-60]_
12 .. _architecture-support-compatibility-matrix-past-60: .. _architecture-support-compatibility-matrix-past-60:
13 :doc:`Architecture <rocm-install-on-linux:reference/system-requirements>` CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3 CDNA3
14 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2 CDNA2
15 CDNA CDNA CDNA CDNA CDNA CDNA CDNA CDNA CDNA CDNA CDNA CDNA CDNA
16 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3 RDNA3
17 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2 RDNA2
18 .. _gpu-support-compatibility-matrix-past-60: .. _gpu-support-compatibility-matrix-past-60:
19 :doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>` gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100 gfx1100
20 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030 gfx1030
21 gfx942 gfx942 gfx942 gfx942 gfx942 [#mi300_624-past-60]_ gfx942 [#mi300_622-past-60]_ gfx942 [#mi300_621-past-60]_ gfx942 [#mi300_620-past-60]_ gfx942 [#mi300_612-past-60]_ gfx942 [#mi300_611-past-60]_ gfx942 [#mi300_610-past-60]_ gfx942 [#mi300_602-past-60]_ gfx942 [#mi300_600-past-60]_
22 gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a gfx90a
23 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908 gfx908
24
25 FRAMEWORK SUPPORT .. _framework-support-compatibility-matrix-past-60: .. _framework-support-compatibility-matrix-past-60:
26 :doc:`PyTorch <../compatibility/ml-compatibility/pytorch-compatibility>` 2.4, 2.3, 2.2, 1.13 2.4, 2.3, 2.2, 1.13 2.4, 2.3, 2.2, 1.13 2.4, 2.3, 2.2, 2.1, 2.0, 1.13 2.3, 2.2, 2.1, 2.0, 1.13 2.3, 2.2, 2.1, 2.0, 1.13 2.3, 2.2, 2.1, 2.0, 1.13 2.3, 2.2, 2.1, 2.0, 1.13 2.1, 2.0, 1.13 2.1, 2.0, 1.13 2.1, 2.0, 1.13 2.1, 2.0, 1.13 2.1, 2.0, 1.13
27 :doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>` 2.17.0, 2.16.2, 2.15.1 2.17.0, 2.16.2, 2.15.1 2.17.0, 2.16.2, 2.15.1 2.17.0, 2.16.2, 2.15.1 2.16.1, 2.15.1, 2.14.1 2.16.1, 2.15.1, 2.14.1 2.16.1, 2.15.1, 2.14.1 2.16.1, 2.15.1, 2.14.1 2.15.0, 2.14.0, 2.13.1 2.15.0, 2.14.0, 2.13.1 2.15.0, 2.14.0, 2.13.1 2.14.0, 2.13.1, 2.12.1 2.14.0, 2.13.1, 2.12.1
28 :doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>` 0.4.31 0.4.31 0.4.31 0.4.31 0.4.26 0.4.26 0.4.26 0.4.26 0.4.26 0.4.26 0.4.26 0.4.26 0.4.26
29 `ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_ 1.17.3 1.17.3 1.17.3 1.17.3 1.17.3 1.17.3 1.17.3 1.17.3 1.17.3 1.17.3 1.17.3 1.14.1 1.14.1
30
31 THIRD PARTY COMMS .. _thirdpartycomms-support-compatibility-matrix-past-60: .. _thirdpartycomms-support-compatibility-matrix-past-60:
32 `UCC <https://github.com/ROCm/ucc>`_ >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.3.0 >=1.2.0 >=1.2.0
33 `UCX <https://github.com/ROCm/ucx>`_ >=1.15.0 >=1.15.0 >=1.15.0 >=1.15.0 >=1.15.0 >=1.15.0 >=1.15.0 >=1.15.0 >=1.14.1 >=1.14.1 >=1.14.1 >=1.14.1 >=1.14.1
34
35 THIRD PARTY ALGORITHM .. _thirdpartyalgorithm-support-compatibility-matrix-past-60: .. _thirdpartyalgorithm-support-compatibility-matrix-past-60:
36 Thrust 2.3.2 2.3.2 2.3.2 2.3.2 2.2.0 2.2.0 2.2.0 2.2.0 2.1.0 2.1.0 2.1.0 2.0.1 2.0.1
37 CUB 2.3.2 2.3.2 2.3.2 2.3.2 2.2.0 2.2.0 2.2.0 2.2.0 2.1.0 2.1.0 2.1.0 2.0.1 2.0.1
38
39 KMD & USER SPACE [#kfd_support-past-60]_ .. _kfd-userspace-support-compatibility-matrix-past-60: .. _kfd-userspace-support-compatibility-matrix-past-60:
40 Tested user space versions 6.3.x, 6.2.x, 6.1.x 6.3.x, 6.2.x, 6.1.x 6.3.x, 6.2.x, 6.1.x 6.3.x, 6.2.x, 6.1.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x 6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x 6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x
41
42 ML & COMPUTER VISION .. _mllibs-support-compatibility-matrix-past-60: .. _mllibs-support-compatibility-matrix-past-60:
43 :doc:`Composable Kernel <composable_kernel:index>` 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0 1.1.0
44 :doc:`MIGraphX <amdmigraphx:index>` 2.11.0 2.11.0 2.11.0 2.11.0 2.10.0 2.10.0 2.10.0 2.10.0 2.9.0 2.9.0 2.9.0 2.8.0 2.8.0
45 :doc:`MIOpen <miopen:index>` 3.3.0 3.3.0 3.3.0 3.3.0 3.2.0 3.2.0 3.2.0 3.2.0 3.1.0 3.1.0 3.1.0 3.0.0 3.0.0
46 :doc:`MIVisionX <mivisionx:index>` 3.1.0 3.1.0 3.1.0 3.1.0 3.0.0 3.0.0 3.0.0 3.0.0 2.5.0 2.5.0 2.5.0 2.5.0 2.5.0
47 :doc:`rocAL <rocal:index>` 2.1.0 2.1.0 2.1.0 2.1.0 2.0.0 2.0.0 2.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0
48 :doc:`rocDecode <rocdecode:index>` 0.8.0 0.8.0 0.8.0 0.8.0 0.6.0 0.6.0 0.6.0 0.6.0 0.6.0 0.5.0 0.5.0 N/A N/A
49 :doc:`rocJPEG <rocjpeg:index>` 0.6.0 0.6.0 0.6.0 0.6.0 N/A N/A N/A N/A N/A N/A N/A N/A N/A
50 :doc:`rocPyDecode <rocpydecode:index>` 0.2.0 0.2.0 0.2.0 0.2.0 0.1.0 0.1.0 0.1.0 0.1.0 N/A N/A N/A N/A N/A
51 :doc:`RPP <rpp:index>` 1.9.1 1.9.1 1.9.1 1.9.1 1.8.0 1.8.0 1.8.0 1.8.0 1.5.0 1.5.0 1.5.0 1.4.0 1.4.0
52
53 COMMUNICATION .. _commlibs-support-compatibility-matrix-past-60: .. _commlibs-support-compatibility-matrix-past-60:
54 :doc:`RCCL <rccl:index>` 2.21.5 2.21.5 2.21.5 2.21.5 2.20.5 2.20.5 2.20.5 2.20.5 2.18.6 2.18.6 2.18.6 2.18.3 2.18.3
55
56 MATH LIBS .. _mathlibs-support-compatibility-matrix-past-60: .. _mathlibs-support-compatibility-matrix-past-60:
57 `half <https://github.com/ROCm/half>`_ 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0 1.12.0
58 :doc:`hipBLAS <hipblas:index>` 2.3.0 2.3.0 2.3.0 2.3.0 2.2.0 2.2.0 2.2.0 2.2.0 2.1.0 2.1.0 2.1.0 2.0.0 2.0.0
59 :doc:`hipBLASLt <hipblaslt:index>` 0.10.0 0.10.0 0.10.0 0.10.0 0.8.0 0.8.0 0.8.0 0.8.0 0.7.0 0.7.0 0.7.0 0.6.0 0.6.0
60 :doc:`hipFFT <hipfft:index>` 1.0.17 1.0.17 1.0.17 1.0.17 1.0.16 1.0.15 1.0.15 1.0.14 1.0.14 1.0.14 1.0.14 1.0.13 1.0.13
61 :doc:`hipfort <hipfort:index>` 0.5.1 0.5.1 0.5.0 0.5.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0
62 :doc:`hipRAND <hiprand:index>` 2.11.1 2.11.1 2.11.1 2.11.0 2.11.1 2.11.0 2.11.0 2.11.0 2.10.16 2.10.16 2.10.16 2.10.16 2.10.16
63 :doc:`hipSOLVER <hipsolver:index>` 2.3.0 2.3.0 2.3.0 2.3.0 2.2.0 2.2.0 2.2.0 2.2.0 2.1.1 2.1.1 2.1.0 2.0.0 2.0.0
64 :doc:`hipSPARSE <hipsparse:index>` 3.1.2 3.1.2 3.1.2 3.1.2 3.1.1 3.1.1 3.1.1 3.1.1 3.0.1 3.0.1 3.0.1 3.0.0 3.0.0
65 :doc:`hipSPARSELt <hipsparselt:index>` 0.2.2 0.2.2 0.2.2 0.2.2 0.2.1 0.2.1 0.2.1 0.2.1 0.2.0 0.1.0 0.1.0 0.1.0 0.1.0
66 :doc:`rocALUTION <rocalution:index>` 3.2.1 3.2.1 3.2.1 3.2.1 3.2.1 3.2.0 3.2.0 3.2.0 3.1.1 3.1.1 3.1.1 3.0.3 3.0.3
67 :doc:`rocBLAS <rocblas:index>` 4.3.0 4.3.0 4.3.0 4.3.0 4.2.4 4.2.1 4.2.1 4.2.0 4.1.2 4.1.0 4.1.0 4.0.0 4.0.0
68 :doc:`rocFFT <rocfft:index>` 1.0.31 1.0.31 1.0.31 1.0.31 1.0.30 1.0.29 1.0.29 1.0.28 1.0.27 1.0.27 1.0.26 1.0.25 1.0.23
69 :doc:`rocRAND <rocrand:index>` 3.2.0 3.2.0 3.2.0 3.2.0 3.1.1 3.1.0 3.1.0 3.1.0 3.0.1 3.0.1 3.0.1 3.0.0 2.10.17
70 :doc:`rocSOLVER <rocsolver:index>` 3.27.0 3.27.0 3.27.0 3.27.0 3.26.2 3.26.0 3.26.0 3.26.0 3.25.0 3.25.0 3.25.0 3.24.0 3.24.0
71 :doc:`rocSPARSE <rocsparse:index>` 3.3.0 3.3.0 3.3.0 3.3.0 3.2.1 3.2.0 3.2.0 3.2.0 3.1.2 3.1.2 3.1.2 3.0.2 3.0.2
72 :doc:`rocWMMA <rocwmma:index>` 1.6.0 1.6.0 1.6.0 1.6.0 1.5.0 1.5.0 1.5.0 1.5.0 1.4.0 1.4.0 1.4.0 1.3.0 1.3.0
73 :doc:`Tensile <tensile:src/index>` 4.42.0 4.42.0 4.42.0 4.42.0 4.41.0 4.41.0 4.41.0 4.41.0 4.40.0 4.40.0 4.40.0 4.39.0 4.39.0
74
75 PRIMITIVES .. _primitivelibs-support-compatibility-matrix-past-60: .. _primitivelibs-support-compatibility-matrix-past-60:
76 :doc:`hipCUB <hipcub:index>` 3.3.0 3.3.0 3.3.0 3.3.0 3.2.1 3.2.0 3.2.0 3.2.0 3.1.0 3.1.0 3.1.0 3.0.0 3.0.0
77 :doc:`hipTensor <hiptensor:index>` 1.4.0 1.4.0 1.4.0 1.4.0 1.3.0 1.3.0 1.3.0 1.3.0 1.2.0 1.2.0 1.2.0 1.1.0 1.1.0
78 :doc:`rocPRIM <rocprim:index>` 3.3.0 3.3.0 3.3.0 3.3.0 3.2.2 3.2.0 3.2.0 3.2.0 3.1.0 3.1.0 3.1.0 3.0.0 3.0.0
79 :doc:`rocThrust <rocthrust:index>` 3.3.0 3.3.0 3.3.0 3.3.0 3.1.1 3.1.0 3.1.0 3.0.1 3.0.1 3.0.1 3.0.1 3.0.0 3.0.0
80
81 SUPPORT LIBS
82 `hipother <https://github.com/ROCm/hipother>`_ 6.3.42134 6.3.42134 6.3.42133 6.3.42131 6.2.41134 6.2.41134 6.2.41134 6.2.41133 6.1.40093 6.1.40092 6.1.40091 6.1.32831 6.1.32830
83 `rocm-core <https://github.com/ROCm/rocm-core>`_ 6.3.3 6.3.2 6.3.1 6.3.0 6.2.4 6.2.2 6.2.1 6.2.0 6.1.2 6.1.1 6.1.0 6.0.2 6.0.0
84 `ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_ N/A [#ROCT-rocr-past-60]_ N/A [#ROCT-rocr-past-60]_ N/A [#ROCT-rocr-past-60]_ N/A [#ROCT-rocr-past-60]_ 20240607.5.7 20240607.5.7 20240607.4.05 20240607.1.4246 20240125.5.08 20240125.5.08 20240125.3.30 20231016.2.245 20231016.2.245
85
86 SYSTEM MGMT TOOLS .. _tools-support-compatibility-matrix-past-60: .. _tools-support-compatibility-matrix-past-60:
87 :doc:`AMD SMI <amdsmi:index>` 24.7.1 24.7.1 24.7.1 24.7.1 24.6.3 24.6.3 24.6.3 24.6.2 24.5.1 24.5.1 24.4.1 23.4.2 23.4.2
88 :doc:`ROCm Data Center Tool <rdc:index>` 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0 0.3.0
89 :doc:`rocminfo <rocminfo:index>` 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0
90 :doc:`ROCm SMI <rocm_smi_lib:index>` 7.4.0 7.4.0 7.4.0 7.4.0 7.3.0 7.3.0 7.3.0 7.3.0 7.2.0 7.0.0 7.0.0 6.0.2 6.0.0
91 :doc:`ROCm Validation Suite <rocmvalidationsuite:index>` 1.1.0 1.1.0 1.1.0 1.1.0 1.0.60204 1.0.60202 1.0.60201 1.0.60200 1.0.60102 1.0.60101 1.0.60100 1.0.60002 1.0.60000
92
93 PERFORMANCE TOOLS
94 :doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>` 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0 1.4.0
95 :doc:`ROCm Compute Profiler <rocprofiler-compute:index>` 3.0.0 3.0.0 3.0.0 3.0.0 2.0.1 2.0.1 2.0.1 2.0.1 N/A N/A N/A N/A N/A
96 :doc:`ROCm Systems Profiler <rocprofiler-systems:index>` 0.1.2 0.1.1 0.1.0 0.1.0 1.11.2 1.11.2 1.11.2 1.11.2 N/A N/A N/A N/A N/A
97 :doc:`ROCProfiler <rocprofiler:index>` 2.0.60303 2.0.60302 2.0.60301 2.0.60300 2.0.60204 2.0.60202 2.0.60201 2.0.60200 2.0.60102 2.0.60101 2.0.60100 2.0.60002 2.0.60000
98 :doc:`ROCprofiler-SDK <rocprofiler-sdk:index>` 0.5.0 0.5.0 0.5.0 0.5.0 0.4.0 0.4.0 0.4.0 0.4.0 N/A N/A N/A N/A N/A
99 :doc:`ROCTracer <roctracer:index>` 4.1.60303 4.1.60302 4.1.60301 4.1.60300 4.1.60204 4.1.60202 4.1.60201 4.1.60200 4.1.60102 4.1.60101 4.1.60100 4.1.60002 4.1.60000
100
101 DEVELOPMENT TOOLS
102 :doc:`HIPIFY <hipify:index>` 18.0.0.25012 18.0.0.25012 18.0.0.24491 18.0.0.24455 18.0.0.24392 18.0.0.24355 18.0.0.24355 18.0.0.24232 17.0.0.24193 17.0.0.24154 17.0.0.24103 17.0.0.24012 17.0.0.23483
103 :doc:`ROCm CMake <rocmcmakebuildtools:index>` 0.14.0 0.14.0 0.14.0 0.14.0 0.13.0 0.13.0 0.13.0 0.13.0 0.12.0 0.12.0 0.12.0 0.11.0 0.11.0
104 :doc:`ROCdbgapi <rocdbgapi:index>` 0.77.0 0.77.0 0.77.0 0.77.0 0.76.0 0.76.0 0.76.0 0.76.0 0.71.0 0.71.0 0.71.0 0.71.0 0.71.0
105 :doc:`ROCm Debugger (ROCgdb) <rocgdb:index>` 15.2.0 15.2.0 15.2.0 15.2.0 14.2.0 14.2.0 14.2.0 14.2.0 14.1.0 14.1.0 14.1.0 13.2.0 13.2.0
106 `rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_ 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.4.0 0.3.0 0.3.0 0.3.0 N/A N/A
107 :doc:`ROCr Debug Agent <rocr_debug_agent:index>` 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3 2.0.3
108
109 COMPILERS .. _compilers-support-compatibility-matrix-past-60: .. _compilers-support-compatibility-matrix-past-60:
110 `clang-ocl <https://github.com/ROCm/clang-ocl>`_ N/A N/A N/A N/A N/A N/A N/A N/A 0.5.0 0.5.0 0.5.0 0.5.0 0.5.0
111 :doc:`hipCC <hipcc:index>` 1.1.1 1.1.1 1.1.1 1.1.1 1.1.1 1.1.1 1.1.1 1.1.1 1.0.0 1.0.0 1.0.0 1.0.0 1.0.0
112 `Flang <https://github.com/ROCm/flang>`_ 18.0.0.25012 18.0.0.25012 18.0.0.24491 18.0.0.24455 18.0.0.24392 18.0.0.24355 18.0.0.24355 18.0.0.24232 17.0.0.24193 17.0.0.24154 17.0.0.24103 17.0.0.24012 17.0.0.23483
113 :doc:`llvm-project <llvm-project:index>` 18.0.0.25012 18.0.0.25012 18.0.0.24491 18.0.0.24491 18.0.0.24392 18.0.0.24355 18.0.0.24355 18.0.0.24232 17.0.0.24193 17.0.0.24154 17.0.0.24103 17.0.0.24012 17.0.0.23483
114 `OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_ 18.0.0.25012 18.0.0.25012 18.0.0.24491 18.0.0.24491 18.0.0.24392 18.0.0.24355 18.0.0.24355 18.0.0.24232 17.0.0.24193 17.0.0.24154 17.0.0.24103 17.0.0.24012 17.0.0.23483
115
116 RUNTIMES .. _runtime-support-compatibility-matrix-past-60: .. _runtime-support-compatibility-matrix-past-60:
117 :doc:`AMD CLR <hip:understand/amd_clr>` 6.3.42134 6.3.42134 6.3.42133 6.3.42131 6.2.41134 6.2.41134 6.2.41134 6.2.41133 6.1.40093 6.1.40092 6.1.40091 6.1.32831 6.1.32830
118 :doc:`HIP <hip:index>` 6.3.42134 6.3.42134 6.3.42133 6.3.42131 6.2.41134 6.2.41134 6.2.41134 6.2.41133 6.1.40093 6.1.40092 6.1.40091 6.1.32831 6.1.32830
119 `OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_ 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0
120 :doc:`ROCr Runtime <rocr-runtime:index>` 1.14.0 1.14.0 1.14.0 1.14.0 1.14.0 1.14.0 1.14.0 1.13.0 1.13.0 1.13.0 1.13.0 1.12.0 1.12.0

View File

@@ -23,7 +23,7 @@ compatibility and system requirements.
.. container:: format-big-table
.. csv-table::
:header: "ROCm Version", "6.3.3", "6.3.2", "6.2.0"
:header: "ROCm Version", "6.3.2", "6.3.1", "6.2.0"
:stub-columns: 1
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04
@@ -33,7 +33,7 @@ compatibility and system requirements.
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5"
,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.9 [#mi300x]_
,Debian 12 [#single-node]_,Debian 12 [#single-node]_,
,Azure Linux 3.0 [#mi300x]_,Azure Linux 3.0 [#mi300x]_,
,Azure Linux 3.0 [#mi300x]_,,
,.. _architecture-support-compatibility-matrix:,,
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3
,CDNA2,CDNA2,CDNA2
@@ -83,7 +83,7 @@ compatibility and system requirements.
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.3.0,2.2.0
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.10.0,0.8.0
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.17,1.0.14
:doc:`hipfort <hipfort:index>`,0.5.1,0.5.1,0.4.0
:doc:`hipfort <hipfort:index>`,0.5.1,0.5.0,0.4.0
:doc:`hipRAND <hiprand:index>`,2.11.1,2.11.1,2.11.0
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.3.0,2.2.0
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.2,3.1.1
@@ -104,8 +104,8 @@ compatibility and system requirements.
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.3.0,3.0.1
,,,
SUPPORT LIBS,,,
`hipother <https://github.com/ROCm/hipother>`_,6.3.42134,6.3.42134,6.2.41133
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.3,6.3.2,6.2.0
`hipother <https://github.com/ROCm/hipother>`_,6.3.42134,6.3.42133,6.2.41133
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.2,6.3.1,6.2.0
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr]_,N/A [#ROCT-rocr]_,20240607.1.4246
,,,
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix:,,
@@ -118,35 +118,34 @@ compatibility and system requirements.
PERFORMANCE TOOLS,,,
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,3.0.0,2.0.1
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.2,0.1.1,1.11.2
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60303,2.0.60302,2.0.60200
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.1,0.1.0,1.11.2
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60302,2.0.60301,2.0.60200
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.5.0,0.4.0
:doc:`ROCTracer <roctracer:index>`,4.1.60303,4.1.60302,4.1.60200
:doc:`ROCTracer <roctracer:index>`,4.1.60302,4.1.60301,4.1.60200
,,,
DEVELOPMENT TOOLS,,,
:doc:`HIPIFY <hipify:index>`,18.0.0.25012,18.0.0.25012,18.0.0.24232
:doc:`HIPIFY <hipify:index>`,18.0.0.25012,18.0.0.24491,18.0.0.24232
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.13.0
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.77.0,0.76.0
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,15.2.0,14.2.0
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3
,,,
COMPILERS,.. _compilers-support-compatibility-matrix:,,
COMPILERS,.. _compilers-support-compatibility-matrix:,..
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1
`Flang <https://github.com/ROCm/flang>`_,18.0.0.25012,18.0.0.25012,18.0.0.24232
:doc:`llvm-project <llvm-project:index>`,18.0.0.25012,18.0.0.25012,18.0.0.24232
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.25012,18.0.0.25012,18.0.0.24232
`Flang <https://github.com/ROCm/flang>`_,18.0.0.25012,18.0.0.24491,18.0.0.24232
:doc:`llvm-project <llvm-project:index>`,18.0.0.25012,18.0.0.24491,18.0.0.24232
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.25012,18.0.0.24491,18.0.0.24232
,,,
RUNTIMES,.. _runtime-support-compatibility-matrix:,,
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42134,6.3.42134,6.2.41133
:doc:`HIP <hip:index>`,6.3.42134,6.3.42134,6.2.41133
RUNTIMES,.. _runtime-support-compatibility-matrix:,..
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42134,6.3.42133,6.2.41133
:doc:`HIP <hip:index>`,6.3.42134,6.3.42133,6.2.41133
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.13.0
.. rubric:: Footnotes
.. [#mi300x] Oracle Linux and Azure Linux are supported only on AMD Instinct MI300X.

View File

@@ -4,8 +4,6 @@
:description: JAX compatibility
:keywords: GPU, JAX compatibility
.. version-set:: rocm_version latest
*******************************************************************************
JAX compatibility
*******************************************************************************
@@ -121,8 +119,7 @@ Critical ROCm libraries for JAX
The functionality of JAX with ROCm is determined by its underlying library
dependencies. These critical ROCm components affect the capabilities,
performance, and feature set available to developers. The versions described
are available in ROCm :version:`rocm_version`.
performance, and feature set available to developers.
.. list-table::
:header-rows: 1
@@ -132,7 +129,7 @@ are available in ROCm :version:`rocm_version`.
- Purpose
- Used in
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
- :version-ref:`hipBLAS rocm_version`
- 2.3.0
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
matrix and vector operations.
- Matrix multiplication in ``jax.numpy.matmul``, ``jax.lax.dot`` and
@@ -141,7 +138,7 @@ are available in ROCm :version:`rocm_version`.
``jax.numpy.einsum`` with matrix-multiplication patterns algebra
operations.
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
- :version-ref:`hipBLASLt rocm_version`
- 0.10.0
- hipBLASLt is an extension of hipBLAS, providing additional
features like epilogues fused into the matrix multiplication kernel or
use of integer tensor cores.
@@ -150,7 +147,7 @@ are available in ROCm :version:`rocm_version`.
operations, mixed-precision support, and hardware-specific
optimizations.
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
- :version-ref:`hipCUB rocm_version`
- 3.3.0
- Provides a C++ template library for parallel algorithms for reduction,
scan, sort and select.
- Reduction functions (``jax.numpy.sum``, ``jax.numpy.mean``,
@@ -158,23 +155,23 @@ are available in ROCm :version:`rocm_version`.
(``jax.numpy.cumsum``, ``jax.numpy.cumprod``) and sorting
(``jax.numpy.sort``, ``jax.numpy.argsort``).
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
- :version-ref:`hipFFT rocm_version`
- 1.0.17
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
- Used in functions like ``jax.numpy.fft``.
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
- :version-ref:`hipRAND rocm_version`
- 2.11.0
- Provides fast random number generation for GPUs.
- The ``jax.random.uniform``, ``jax.random.normal``,
``jax.random.randint`` and ``jax.random.split``.
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
- :version-ref:`hipSOLVER rocm_version`
- 2.3.0
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
singular value decompositions (SVD).
- Solving linear systems (``jax.numpy.linalg.solve``), matrix
factorizations, SVD (``jax.numpy.linalg.svd``) and eigenvalue problems
(``jax.numpy.linalg.eig``).
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
- :version-ref:`hipSPARSE rocm_version`
- 3.1.2
- Accelerates operations on sparse matrices, such as sparse matrix-vector
or matrix-matrix products.
- Sparse matrix multiplication (``jax.numpy.matmul``), sparse
@@ -182,28 +179,28 @@ are available in ROCm :version:`rocm_version`.
(``jax.experimental.sparse.dot``), sparse linear system solvers and
sparse data handling.
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
- :version-ref:`hipSPARSELt rocm_version`
- 0.2.2
- Accelerates operations on sparse matrices, such as sparse matrix-vector
or matrix-matrix products.
- Sparse matrix multiplication (``jax.numpy.matmul``), sparse
matrix-vector and matrix-matrix products
(``jax.experimental.sparse.dot``) and sparse linear system solvers.
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
- :version-ref:`MIOpen rocm_version`
- 3.3.0
- Optimized for deep learning primitives such as convolutions, pooling,
normalization, and activation functions.
- Speeds up convolutional neural networks (CNNs), recurrent neural
networks (RNNs), and other layers. Used in operations like
``jax.nn.conv``, ``jax.nn.relu``, and ``jax.nn.batch_norm``.
* - `RCCL <https://github.com/ROCm/rccl>`_
- :version-ref:`RCCL rocm_version`
- 2.21.5
- Optimized for multi-GPU communication for operations like all-reduce,
broadcast, and scatter.
- Distribute computations across multiple GPU with ``pmap`` and
``jax.distributed``. XLA automatically uses rccl when executing
operations across multiple GPUs on AMD hardware.
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
- :version-ref:`rocThrust rocm_version`
- 3.3.0
- Provides a C++ template library for parallel algorithms like sorting,
reduction, and scanning.
- Reduction operations like ``jax.numpy.sum``, ``jax.pmap`` for

View File

@@ -4,8 +4,6 @@
:description: PyTorch compatibility
:keywords: GPU, PyTorch compatibility
.. version-set:: rocm_version latest
********************************************************************************
PyTorch compatibility
********************************************************************************
@@ -58,7 +56,7 @@ Docker image compatibility
AMD validates and publishes ready-made `PyTorch images <https://hub.docker.com/r/rocm/pytorch>`_
with ROCm backends on Docker Hub. The following Docker image tags and
associated inventories are validated for `ROCm 6.3.3 <https://repo.radeon.com/rocm/apt/6.3.3/>`_.
associated inventories are validated for `ROCm 6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_.
Click the |docker-icon| icon to view the image on Docker Hub.
.. list-table:: PyTorch Docker image components
@@ -79,26 +77,11 @@ Click the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu24.04_py3.12_pytorch_release_2.4.0/images/sha256-6c798857b2c9526b44ba535710b93a1737546acea79b53a93c646195c272f1d5"><i class="fab fa-docker fa-lg"></i></a>
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu24.04_py3.12_pytorch_release_2.4.0/images/sha256-98ddf20333bd01ff749b8092b1190ee369a75d3b8c71c2fac80ffdcb1a98d529?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
- 24.04
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`_
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu22.04_py3.10_pytorch_release_2.4.0/images/sha256-a09b21248133876fc8912a5ff4e6ee2c8d62b14120313e426b3dadda5702713d"><i class="fab fa-docker fa-lg"></i></a>
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
- 22.04
- `3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
- `3.12 <https://www.python.org/downloads/release/python-3128/>`_
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
@@ -109,11 +92,11 @@ Click the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu22.04_py3.9_pytorch_release_2.4.0/images/sha256-963187534467f0f9da77996762fc1d112a6faa5372277c348a505533e7876ec8"><i class="fab fa-docker fa-lg"></i></a>
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.4.0/images/sha256-402c9b4f1a6b5a81c634a1932b56cbe01abb699cfcc7463d226276997c6cf8ea?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
- 22.04
- `3.9.21 <https://www.python.org/downloads/release/python-3921/>`_
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
@@ -124,11 +107,26 @@ Click the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-952f2621bd2bf3078bef19061e05b209105a82a7908e7e6cdf85014938a4d93a"><i class="fab fa-docker fa-lg"></i></a>
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_2.4.0/images/sha256-e0608b55d408c3bfe5c19fdd57a4ced3e0eb3a495b74c309980b60b156c526dd?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
- 22.04
- `3.9.18 <https://www.python.org/downloads/release/python-3918/>`_
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-652cf25263d05b1de548222970aeb76e60b12de101de66751264709c0d0ff9d8?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.3.0 <https://github.com/ROCm/pytorch/tree/release/2.3>`_
- 22.04
- `3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
- `1.3.0 <https://github.com/ROCm/apex/tree/release/1.3.0>`_
- `0.18.0 <https://github.com/pytorch/vision/tree/v0.18.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
@@ -139,7 +137,7 @@ Click the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu22.04_py3.10_pytorch_release_2.2.1/images/sha256-a2fe20e170feb9e05da3e5728bb98e40d08567e137be8e6ba797962ed2852608"><i class="fab fa-docker fa-lg"></i></a>
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.2.1/images/sha256-051976f26beab8f9aa65d999e3ad546c027b39240a0cc3ee81b114a9024f2912?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
- 22.04
@@ -154,7 +152,7 @@ Click the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu20.04_py3.9_pytorch_release_2.2.1/images/sha256-7f231937c897cca5f89e360be33c70a2017d60f62d1fbe81292be48c15fe345b"><i class="fab fa-docker fa-lg"></i></a>
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_2.2.1/images/sha256-88c839a364d109d3748c100385bfa100d28090d25118cc723fd0406390ab2f7e?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
- 20.04
@@ -169,14 +167,14 @@ Click the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu22.04_py3.9_pytorch_release_1.13.1/images/sha256-616a47758004f91951e2da6c1fe291f903de65a7b2318d4b18359b48fe3032f4"><i class="fab fa-docker fa-lg"></i></a>
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_1.13.1/images/sha256-994424ed07a63113f79dd9aa72159124c00f5fbfe18127151e6658f7d0b6f821?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
- 22.04
- `3.9.21 <https://www.python.org/downloads/release/python-3921/>`_
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
- `2.19.0 <https://github.com/tensorflow/tensorboard/tree/2.19>`_
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
@@ -184,7 +182,7 @@ Click the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3.3_ubuntu20.04_py3.9_pytorch_release_1.13.1/images/sha256-a2cfb365aea58b84595e241ffdb0d5ef3e6566e98c10b5499f4aa29983a74ea2"><i class="fab fa-docker fa-lg"></i></a>
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_1.13.1/images/sha256-7b8139fe40a9aeb4bca3aecd15c22c1fa96e867d93479fa3a24fdeeeeafa1219?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
- 20.04
@@ -202,8 +200,7 @@ Critical ROCm libraries for PyTorch
The functionality of PyTorch with ROCm is determined by its underlying library
dependencies. These critical ROCm components affect the capabilities,
performance, and feature set available to developers. The versions described
are available in ROCm :version:`rocm_version`.
performance, and feature set available to developers.
.. list-table::
:header-rows: 1
@@ -213,28 +210,28 @@ are available in ROCm :version:`rocm_version`.
- Purpose
- Used in
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
- :version-ref:`"Composable Kernel" rocm_version`
- 1.1.0
- Enables faster execution of core operations like matrix multiplication
(GEMM), convolutions and transformations.
- Speeds up ``torch.permute``, ``torch.view``, ``torch.matmul``,
``torch.mm``, ``torch.bmm``, ``torch.nn.Conv2d``, ``torch.nn.Conv3d``
and ``torch.nn.MultiheadAttention``.
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
- :version-ref:`hipBLAS rocm_version`
- 2.3.0
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
matrix and vector operations.
- Supports operations like matrix multiplication, matrix-vector products,
and tensor contractions. Utilized in both dense and batched linear
algebra operations.
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
- :version-ref:`hipBLASLt rocm_version`
- 0.10.0
- hipBLASLt is an extension of the hipBLAS library, providing additional
features like epilogues fused into the matrix multiplication kernel or
use of integer tensor cores.
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
matrix multiplications used in convolutional and linear layers.
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
- :version-ref:`hipCUB rocm_version`
- 3.3.0
- Provides a C++ template library for parallel algorithms for reduction,
scan, sort and select.
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
@@ -242,93 +239,93 @@ are available in ROCm :version:`rocm_version`.
irregular shapes often involve scanning, sorting, and filtering, which
hipCUB handles efficiently.
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
- :version-ref:`hipFFT rocm_version`
- 1.0.17
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
- Used in functions like the ``torch.fft`` module.
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
- :version-ref:`hipRAND rocm_version`
- 2.11.0
- Provides fast random number generation for GPUs.
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
``torch.nn.Dropout``.
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
- :version-ref:`hipSOLVER rocm_version`
- 2.3.0
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
singular value decompositions (SVD).
- Supports functions like ``torch.linalg.solve``,
``torch.linalg.eig``, and ``torch.linalg.svd``.
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
- :version-ref:`hipSPARSE rocm_version`
- 3.1.2
- Accelerates operations on sparse matrices, such as sparse matrix-vector
or matrix-matrix products.
- Sparse tensor operations ``torch.sparse``.
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
- :version-ref:`hipSPARSELt rocm_version`
- 0.2.2
- Accelerates operations on sparse matrices, such as sparse matrix-vector
or matrix-matrix products.
- Sparse tensor operations ``torch.sparse``.
* - `hipTensor <https://github.com/ROCm/hipTensor>`_
- :version-ref:`hipTensor rocm_version`
- 1.4.0
- Optimizes for high-performance tensor operations, such as contractions.
- Accelerates tensor algebra, especially in deep learning and scientific
computing.
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
- :version-ref:`MIOpen rocm_version`
- 3.3.0
- Optimizes deep learning primitives such as convolutions, pooling,
normalization, and activation functions.
- Speeds up convolutional neural networks (CNNs), recurrent neural
networks (RNNs), and other layers. Used in operations like
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`_
- :version-ref:`MIGraphX rocm_version`
- 2.11.0
- Adds graph-level optimizations, ONNX models and mixed precision support
and enable Ahead-of-Time (AOT) Compilation.
- Speeds up inference models and executes ONNX models for
compatibility with other frameworks.
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`_
- :version-ref:`MIVisionX rocm_version`
- 3.1.0
- Optimizes acceleration for computer vision and AI workloads like
preprocessing, augmentation, and inferencing.
- Faster data preprocessing and augmentation pipelines for datasets like
ImageNet or COCO and easy to integrate into PyTorch's ``torch.utils.data``
and ``torchvision`` workflows.
* - `rocAL <https://github.com/ROCm/rocAL>`_
- :version-ref:`rocAL rocm_version`
- 2.1.0
- Accelerates the data pipeline by offloading intensive preprocessing and
augmentation tasks. rocAL is part of MIVisionX.
- Easy to integrate into PyTorch's ``torch.utils.data`` and
``torchvision`` data load workloads.
* - `RCCL <https://github.com/ROCm/rccl>`_
- :version-ref:`RCCL rocm_version`
- 2.21.5
- Optimizes for multi-GPU communication for operations like AllReduce and
Broadcast.
- Distributed data parallel training (``torch.nn.parallel.DistributedDataParallel``).
Handles communication in multi-GPU setups.
* - `rocDecode <https://github.com/ROCm/rocDecode>`_
- :version-ref:`rocDecode rocm_version`
- 0.8.0
- Provides hardware-accelerated data decoding capabilities, particularly
for image, video, and other dataset formats.
- Can be integrated in ``torch.utils.data``, ``torchvision.transforms``
and ``torch.distributed``.
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`_
- :version-ref:`rocJPEG rocm_version`
- 0.6.0
- Provides hardware-accelerated JPEG image decoding and encoding.
- GPU accelerated ``torchvision.io.decode_jpeg`` and
``torchvision.io.encode_jpeg`` and can be integrated in
``torch.utils.data`` and ``torchvision``.
* - `RPP <https://github.com/ROCm/RPP>`_
- :version-ref:`RPP rocm_version`
- 1.9.1
- Speeds up data augmentation, transformation, and other preprocessing steps.
- Easy to integrate into PyTorch's ``torch.utils.data`` and
``torchvision`` data load workloads.
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
- :version-ref:`rocThrust rocm_version`
- 3.3.0
- Provides a C++ template library for parallel algorithms like sorting,
reduction, and scanning.
- Utilized in backend operations for tensor computations requiring
parallel processing.
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`_
- :version-ref:`rocWMMA rocm_version`
- 1.6.0
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
multiplication (GEMM) and accumulation operations with mixed precision
support.

View File

@@ -4,8 +4,6 @@
:description: TensorFlow compatibility
:keywords: GPU, TensorFlow compatibility
.. version-set:: rocm_version latest
*******************************************************************************
TensorFlow compatibility
*******************************************************************************
@@ -56,7 +54,7 @@ Docker image compatibility
AMD validates and publishes ready-made `TensorFlow images
<https://hub.docker.com/r/rocm/tensorflow>`_ with ROCm backends on
Docker Hub. The following Docker image tags and associated inventories are
validated for `ROCm 6.3.3 <https://repo.radeon.com/rocm/apt/6.3.3/>`_. Click
validated for `ROCm 6.3.1 <https://repo.radeon.com/rocm/apt/6.3.1/>`_. Click
the |docker-icon| icon to view the image on Docker Hub.
.. list-table:: TensorFlow Docker image components
@@ -70,47 +68,47 @@ the |docker-icon| icon to view the image on Docker Hub.
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.12-tf2.17-dev/images/sha256-fd2653f436880366cc874aa24264ca9dabd892d76ccb63fb807debba459bcaaf"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.12-tf2.17.0-dev/images/sha256-804121ee4985718277ba7dcec53c57bdade130a1ef42f544b6c48090ad379c17"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.17.0-cp312-cp312-manylinux_2_28_x86_64.whl>`__
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.17.0-cp312-cp312-manylinux_2_28_x86_64.whl>`__
- dev
- `Python 3.12.4 <https://www.python.org/downloads/release/python-3124/>`_
- `Python 3.12 <https://www.python.org/downloads/release/python-3124/>`_
- `TensorBoard 2.17.1 <https://github.com/tensorflow/tensorboard/tree/2.17.1>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.10-tf2.17-dev/images/sha256-8a5eb7443798935dd269575e2abae847b702e1dfb06766ab84f081a6314d8b95"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.10-tf2.17.0-dev/images/sha256-776837ffa945913f6c466bfe477810a11453d21d5b6afb200be1c36e48fbc08e"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.17.0-cp310-cp310-manylinux_2_28_x86_64.whl>`__
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.17.0-cp310-cp310-manylinux_2_28_x86_64.whl>`__
- dev
- `Python 3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
- `TensorBoard 2.17.1 <https://github.com/tensorflow/tensorboard/tree/2.17.1>`_
- `Python 3.10 <https://www.python.org/downloads/release/python-31012/>`_
- `TensorBoard 2.17.0 <https://github.com/tensorflow/tensorboard/tree/2.17.0>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.12-tf2.16-dev/images/sha256-8fc939b10cdd6d2b11407474880d4c8ab2b52ab6e2d1743c921fc2adbfd0422f"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.12-tf2.16.2-dev/images/sha256-c793e1483e30809c3c28fc5d7805bedc033c73da224f839fff370717cb100944"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.16.2-cp312-cp312-manylinux_2_28_x86_64.whl>`__
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.16.2-cp312-cp312-manylinux_2_28_x86_64.whl>`__
- dev
- `Python 3.12.4 <https://www.python.org/downloads/release/python-3124/>`_
- `Python 3.12 <https://www.python.org/downloads/release/python-3124/>`_
- `TensorBoard 2.16.2 <https://github.com/tensorflow/tensorboard/tree/2.16.2>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.10-tf2.16-dev/images/sha256-a4cc6ab23d59fdf5459ceac1f0a603e6c16ae7f885d30e42c0c2b3ac60c2ad10"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.10-tf2.16.0-dev/images/sha256-263e78414ae85d7bcd52a025a94131d0a279872a45ed632b9165336dfdcd4443"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.16.2-cp310-cp310-manylinux_2_28_x86_64.whl>`__
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.16.2-cp310-cp310-manylinux_2_28_x86_64.whl>`__
- dev
- `Python 3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
- `Python 3.10 <https://www.python.org/downloads/release/python-31012/>`_
- `TensorBoard 2.16.2 <https://github.com/tensorflow/tensorboard/tree/2.16.2>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.3-py3.10-tf2.15-dev/images/sha256-60887c488421184adcb60b9ed4f72a8bd7bdb64d238e50943ca7cbde38e4aa48"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.10-tf2.15.0-dev/images/sha256-479046a8477ca701a9494a813ab17e8ab4f6baa54641e65dc8d07629f1e6a880"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
- `tensorflow-rocm 2.15.1 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.3/tensorflow_rocm-2.15.1-cp310-cp310-manylinux_2_28_x86_64.whl>`_
- `tensorflow-rocm 2.15.1 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.15.1-cp310-cp310-manylinux_2_28_x86_64.whl>`_
- dev
- `Python 3.10.16 <https://www.python.org/downloads/release/python-31016/>`_
- `Python 3.10 <https://www.python.org/downloads/release/python-31012/>`_
- `TensorBoard 2.15.2 <https://github.com/tensorflow/tensorboard/tree/2.15.2>`_
Critical ROCm libraries for TensorFlow
@@ -119,8 +117,7 @@ Critical ROCm libraries for TensorFlow
TensorFlow depends on multiple components and the supported features of those
components can affect the TensorFlow ROCm supported feature set. The versions
in the following table refer to the first TensorFlow version where the ROCm
library was introduced as a dependency. The versions described
are available in ROCm :version:`rocm_version`.
library was introduced as a dependency.
.. list-table::
:widths: 25, 10, 35, 30
@@ -131,43 +128,43 @@ are available in ROCm :version:`rocm_version`.
- Purpose
- Used in
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
- :version-ref:`hipBLAS rocm_version`
- 2.3.0
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
matrix and vector operations.
- Accelerates operations like ``tf.matmul``, ``tf.linalg.matmul``, and
other matrix multiplications commonly used in neural network layers.
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
- :version-ref:`hipBLASLt rocm_version`
- 0.10.0
- Extends hipBLAS with additional optimizations like fused kernels and
integer tensor cores.
- Optimizes matrix multiplications and linear algebra operations used in
layers like dense, convolutional, and RNNs in TensorFlow.
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
- :version-ref:`hipCUB rocm_version`
- 3.3.0
- Provides a C++ template library for parallel algorithms for reduction,
scan, sort and select.
- Supports operations like ``tf.reduce_sum``, ``tf.cumsum``, ``tf.sort``
and other tensor operations in TensorFlow, especially those involving
scanning, sorting, and filtering.
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
- :version-ref:`hipFFT rocm_version`
- 1.0.17
- Accelerates Fast Fourier Transforms (FFT) for signal processing tasks.
- Used for operations like signal processing, image filtering, and
certain types of neural networks requiring FFT-based transformations.
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
- :version-ref:`hipSOLVER rocm_version`
- 2.3.0
- Provides GPU-accelerated direct linear solvers for dense and sparse
systems.
- Optimizes linear algebra functions such as solving systems of linear
equations, often used in optimization and training tasks.
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
- :version-ref:`hipSPARSE rocm_version`
- 3.1.2
- Optimizes sparse matrix operations for efficient computations on sparse
data.
- Accelerates sparse matrix operations in models with sparse weight
matrices or activations, commonly used in neural networks.
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
- :version-ref:`MIOpen rocm_version`
- 3.3.0
- Provides optimized deep learning primitives such as convolutions,
pooling,
normalization, and activation functions.
@@ -175,13 +172,13 @@ are available in ROCm :version:`rocm_version`.
in TensorFlow for layers like ``tf.nn.conv2d``, ``tf.nn.relu``, and
``tf.nn.lstm_cell``.
* - `RCCL <https://github.com/ROCm/rccl>`_
- :version-ref:`RCCL rocm_version`
- 2.21.5
- Optimizes for multi-GPU communication for operations like AllReduce and
Broadcast.
- Distributed data parallel training (``tf.distribute.MirroredStrategy``).
Handles communication in multi-GPU setups.
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
- :version-ref:`rocThrust rocm_version`
- 3.3.0
- Provides a C++ template library for parallel algorithms like sorting,
reduction, and scanning.
- Reduction operations like ``tf.reduce_sum``, ``tf.cumsum`` for computing

View File

@@ -1,916 +0,0 @@
.. meta::
:description: PyTorch compatibility
:keywords: GPU, PyTorch compatibility
********************************************************************************
PyTorch compatibility
********************************************************************************
`PyTorch <https://pytorch.org/>`_ is an open-source tensor library designed for
deep learning. PyTorch on ROCm provides mixed-precision and large-scale training
using `MIOpen <https://github.com/ROCm/MIOpen>`_ and
`RCCL <https://github.com/ROCm/rccl>`_ libraries.
ROCm support for PyTorch is upstreamed into the official PyTorch repository. Due to independent
compatibility considerations, this results in two distinct release cycles for PyTorch on ROCm:
- ROCm PyTorch release:
- Provides the latest version of ROCm but doesn't immediately support the latest stable PyTorch
version.
- Offers :ref:`Docker images <pytorch-docker-compat>` with ROCm and PyTorch
pre-installed.
- ROCm PyTorch repository: `<https://github.com/rocm/pytorch>`__
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>` to get started.
- Official PyTorch release:
- Provides the latest stable version of PyTorch but doesn't immediately support the latest ROCm version.
- Official PyTorch repository: `<https://github.com/pytorch/pytorch>`__
- See the `Nightly and latest stable version installation guide <https://pytorch.org/get-started/locally/>`_
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_ to get started.
The upstream PyTorch includes an automatic HIPification solution that automatically generates HIP
source code from the CUDA backend. This approach allows PyTorch to support ROCm without requiring
manual code modifications.
ROCm's development is aligned with the stable release of PyTorch while upstream PyTorch testing uses
the stable release of ROCm to maintain consistency.
.. _pytorch-docker-compat:
Docker image compatibility
================================================================================
AMD validates and publishes ready-made `PyTorch <https://hub.docker.com/r/rocm/pytorch>`_
images with ROCm backends on Docker Hub. The following Docker image tags and
associated inventories are validated for `ROCm 6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_.
.. list-table:: PyTorch Docker image components
:header-rows: 1
:class: docker-image-compatibility
* - Docker
- PyTorch
- Ubuntu
- Python
- Apex
- torchvision
- TensorBoard
- MAGMA
- UCX
- OMPI
- OFED
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu24.04_py3.12_pytorch_release_2.4.0/images/sha256-98ddf20333bd01ff749b8092b1190ee369a75d3b8c71c2fac80ffdcb1a98d529?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
- 24.04
- `3.12 <https://www.python.org/downloads/release/python-3128/>`_
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.4.0/images/sha256-402c9b4f1a6b5a81c634a1932b56cbe01abb699cfcc7463d226276997c6cf8ea?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
- 22.04
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_2.4.0/images/sha256-e0608b55d408c3bfe5c19fdd57a4ced3e0eb3a495b74c309980b60b156c526dd?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
- 22.04
- `3.9 <https://www.python.org/downloads/release/python-3918/>`_
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-652cf25263d05b1de548222970aeb76e60b12de101de66751264709c0d0ff9d8?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.3.0 <https://github.com/ROCm/pytorch/tree/release/2.3>`_
- 22.04
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
- `1.3.0 <https://github.com/ROCm/apex/tree/release/1.3.0>`_
- `0.18.0 <https://github.com/pytorch/vision/tree/v0.18.0>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.2.1/images/sha256-051976f26beab8f9aa65d999e3ad546c027b39240a0cc3ee81b114a9024f2912?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
- 22.04
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_2.2.1/images/sha256-88c839a364d109d3748c100385bfa100d28090d25118cc723fd0406390ab2f7e?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
- 20.04
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_1.13.1/images/sha256-994424ed07a63113f79dd9aa72159124c00f5fbfe18127151e6658f7d0b6f821?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
- 22.04
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
* - .. raw:: html
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_1.13.1/images/sha256-7b8139fe40a9aeb4bca3aecd15c22c1fa96e867d93479fa3a24fdeeeeafa1219?context=explore"><i class="fab fa-docker fa-lg"></i></a>
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
- 20.04
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
- `master <https://bitbucket.org/icl/magma/src/master/>`_
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
Critical ROCm libraries for PyTorch
================================================================================
The functionality of PyTorch with ROCm is shaped by its underlying library
dependencies. These critical ROCm components affect the capabilities,
performance, and feature set available to developers.
.. list-table::
:header-rows: 1
* - ROCm library
- Version
- Purpose
- Used in
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
- 1.1.0
- Enables faster execution of core operations like matrix multiplication
(GEMM), convolutions and transformations.
- Speeds up ``torch.permute``, ``torch.view``, ``torch.matmul``,
``torch.mm``, ``torch.bmm``, ``torch.nn.Conv2d``, ``torch.nn.Conv3d``
and ``torch.nn.MultiheadAttention``.
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
- 2.3.0
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
matrix and vector operations.
- Supports operations like matrix multiplication, matrix-vector products,
and tensor contractions. Utilized in both dense and batched linear
algebra operations.
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
- 0.10.0
- hipBLASLt is an extension of the hipBLAS library, providing additional
features like epilogues fused into the matrix multiplication kernel or
use of integer tensor cores.
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
matrix multiplications used in convolutional and linear layers.
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
- 3.3.0
- Provides a C++ template library for parallel algorithms for reduction,
scan, sort and select.
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
and ``torch.topk``. Operations on sparse tensors or tensors with
irregular shapes often involve scanning, sorting, and filtering, which
hipCUB handles efficiently.
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
- 1.0.17
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
- Used in functions like the ``torch.fft`` module.
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
- 2.11.0
- Provides fast random number generation for GPUs.
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
``torch.nn.Dropout``.
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
- 2.3.0
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
singular value decompositions (SVD).
- Supports functions like ``torch.linalg.solve``,
``torch.linalg.eig``, and ``torch.linalg.svd``.
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
- 3.1.2
- Accelerates operations on sparse matrices, such as sparse matrix-vector
or matrix-matrix products.
- Sparse tensor operations ``torch.sparse``.
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
- 0.2.2
- Accelerates operations on sparse matrices, such as sparse matrix-vector
or matrix-matrix products.
- Sparse tensor operations ``torch.sparse``.
* - `hipTensor <https://github.com/ROCm/hipTensor>`_
- 1.4.0
- Optimizes for high-performance tensor operations, such as contractions.
- Accelerates tensor algebra, especially in deep learning and scientific
computing.
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
- 3.3.0
- Optimizes deep learning primitives such as convolutions, pooling,
normalization, and activation functions.
- Speeds up convolutional neural networks (CNNs), recurrent neural
networks (RNNs), and other layers. Used in operations like
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`_
- 2.11.0
- Add graph-level optimizations, ONNX models and mixed precision support
and enable Ahead-of-Time (AOT) Compilation.
- Speeds up inference models and executes ONNX models for
compatibility with other frameworks.
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`_
- 3.1.0
- Optimizes acceleration for computer vision and AI workloads like
preprocessing, augmentation, and inferencing.
- Faster data preprocessing and augmentation pipelines for datasets like
ImageNet or COCO and easy to integrate into PyTorch's ``torch.utils.data``
and ``torchvision`` workflows.
* - `rocAL <https://github.com/ROCm/rocAL>`_
- 2.1.0
- Accelerates the data pipeline by offloading intensive preprocessing and
augmentation tasks. rocAL is part of MIVisionX.
- Easy to integrate into PyTorch's ``torch.utils.data`` and
``torchvision`` data load workloads.
* - `RCCL <https://github.com/ROCm/rccl>`_
- 2.21.5
- Optimizes for multi-GPU communication for operations like AllReduce and
Broadcast.
- Distributed data parallel training (``torch.nn.parallel.DistributedDataParallel``).
Handles communication in multi-GPU setups.
* - `rocDecode <https://github.com/ROCm/rocDecode>`_
- 0.8.0
- Provide hardware-accelerated data decoding capabilities, particularly
for image, video, and other dataset formats.
- Can be integrated in ``torch.utils.data``, ``torchvision.transforms``
and ``torch.distributed``.
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`_
- 0.6.0
- Provide hardware-accelerated JPEG image decoding and encoding.
- GPU accelerated ``torchvision.io.decode_jpeg`` and
``torchvision.io.encode_jpeg`` and can be integrated in
``torch.utils.data`` and ``torchvision``.
* - `RPP <https://github.com/ROCm/RPP>`_
- 1.9.1
- Speed up data augmentation, transformation, and other preprocessing step.
- Easy to integrate into PyTorch's ``torch.utils.data`` and
``torchvision`` data load workloads.
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
- 3.3.0
- Provides a C++ template library for parallel algorithms like sorting,
reduction, and scanning.
- Utilized in backend operations for tensor computations requiring
parallel processing.
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`_
- 1.6.0
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
multiplication (GEMM) and accumulation operations with mixed precision
support.
- Linear layers (``torch.nn.Linear``), convolutional layers
(``torch.nn.Conv2d``), attention layers, general tensor operations that
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
more.
Supported and unsupported features
================================================================================
The following section maps GPU-accelerated PyTorch features to their supported
ROCm and PyTorch versions.
torch
--------------------------------------------------------------------------------
`torch <https://pytorch.org/docs/stable/index.html>`_ is the central module of
PyTorch, providing data structures for multi-dimensional tensors and
implementing mathematical operations on them. It also includes utilities for
efficient serialization of tensors and arbitrary data types, along with various
other tools.
Tensor data types
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The data type of a tensor is specified using the ``dtype`` attribute or argument, and PyTorch supports a wide range of data types for different use cases.
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_'s single data types:
.. list-table::
:header-rows: 1
* - Data type
- Description
- Since PyTorch
- Since ROCm
* - ``torch.float8_e4m3fn``
- 8-bit floating point, e4m3
- 2.3
- 5.5
* - ``torch.float8_e5m2``
- 8-bit floating point, e5m2
- 2.3
- 5.5
* - ``torch.float16`` or ``torch.half``
- 16-bit floating point
- 0.1.6
- 2.0
* - ``torch.bfloat16``
- 16-bit floating point
- 1.6
- 2.6
* - ``torch.float32`` or ``torch.float``
- 32-bit floating point
- 0.1.12_2
- 2.0
* - ``torch.float64`` or ``torch.double``
- 64-bit floating point
- 0.1.12_2
- 2.0
* - ``torch.complex32`` or ``torch.chalf``
- PyTorch provides native support for 32-bit complex numbers
- 1.6
- 2.0
* - ``torch.complex64`` or ``torch.cfloat``
- PyTorch provides native support for 64-bit complex numbers
- 1.6
- 2.0
* - ``torch.complex128`` or ``torch.cdouble``
- PyTorch provides native support for 128-bit complex numbers
- 1.6
- 2.0
* - ``torch.uint8``
- 8-bit integer (unsigned)
- 0.1.12_2
- 2.0
* - ``torch.uint16``
- 16-bit integer (unsigned)
- 2.3
- Not natively supported
* - ``torch.uint32``
- 32-bit integer (unsigned)
- 2.3
- Not natively supported
* - ``torch.uint64``
- 32-bit integer (unsigned)
- 2.3
- Not natively supported
* - ``torch.int8``
- 8-bit integer (signed)
- 1.12
- 5.0
* - ``torch.int16`` or ``torch.short``
- 16-bit integer (signed)
- 0.1.12_2
- 2.0
* - ``torch.int32`` or ``torch.int``
- 32-bit integer (signed)
- 0.1.12_2
- 2.0
* - ``torch.int64`` or ``torch.long``
- 64-bit integer (signed)
- 0.1.12_2
- 2.0
* - ``torch.bool``
- Boolean
- 1.2
- 2.0
* - ``torch.quint8``
- Quantized 8-bit integer (unsigned)
- 1.8
- 5.0
* - ``torch.qint8``
- Quantized 8-bit integer (signed)
- 1.8
- 5.0
* - ``torch.qint32``
- Quantized 32-bit integer (signed)
- 1.8
- 5.0
* - ``torch.quint4x2``
- Quantized 4-bit integer (unsigned)
- 1.8
- 5.0
.. note::
Unsigned types aside from ``uint8`` are currently only have limited support in
eager mode (they primarily exist to assist usage with ``torch.compile``).
The :doc:`ROCm precision support page <rocm:reference/precision-support>`
collected the native HW support of different data types.
torch.cuda
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``torch.cuda`` in PyTorch is a module that provides utilities and functions for
managing and utilizing AMD and NVIDIA GPUs. It enables GPU-accelerated
computations, memory management, and efficient execution of tensor operations,
leveraging ROCm and CUDA as the underlying frameworks.
.. list-table::
:header-rows: 1
* - Data type
- Description
- Since PyTorch
- Since ROCm
* - Device management
- Utilities for managing and interacting with GPUs.
- 0.4.0
- 3.8
* - Tensor operations on GPU
- Perform tensor operations such as addition and matrix multiplications on
the GPU.
- 0.4.0
- 3.8
* - Streams and events
- Streams allow overlapping computation and communication for optimized
performance, events enable synchronization.
- 1.6.0
- 3.8
* - Memory management
- Functions to manage and inspect memory usage like
``torch.cuda.memory_allocated()``, ``torch.cuda.max_memory_allocated()``,
``torch.cuda.memory_reserved()`` and ``torch.cuda.empty_cache()``.
- 0.3.0
- 1.9.2
* - Running process lists of memory management
- Return a human-readable printout of the running processes and their GPU
memory use for a given device with functions like
``torch.cuda.memory_stats()`` and ``torch.cuda.memory_summary()``.
- 1.8.0
- 4.0
* - Communication collectives
- A set of APIs that enable efficient communication between multiple GPUs,
allowing for distributed computing and data parallelism.
- 1.9.0
- 5.0
* - ``torch.cuda.CUDAGraph``
- Graphs capture sequences of GPU operations to minimize kernel launch
overhead and improve performance.
- 1.10.0
- 5.3
* - TunableOp
- A mechanism that allows certain operations to be more flexible and
optimized for performance. It enables automatic tuning of kernel
configurations and other settings to achieve the best possible
performance based on the specific hardware (GPU) and workload.
- 2.0
- 5.4
* - NVIDIA Tools Extension (NVTX)
- Integration with NVTX for profiling and debugging GPU performance using
NVIDIA's Nsight tools.
- 1.8.0
- ❌
* - Lazy loading NVRTC
- Delays JIT compilation with NVRTC until the code is explicitly needed.
- 1.13.0
- ❌
* - Jiterator (beta)
- Jiterator allows asynchronous data streaming into computation streams
during training loops.
- 1.13.0
- 5.2
.. Need to validate and extend.
torch.backends.cuda
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``torch.backends.cuda`` is a PyTorch module that provides configuration options
and flags to control the behavior of CUDA or ROCm operations. It is part of the
PyTorch backend configuration system, which allows users to fine-tune how
PyTorch interacts with the CUDA or ROCm environment.
.. list-table::
:header-rows: 1
* - Data type
- Description
- Since PyTorch
- Since ROCm
* - ``cufft_plan_cache``
- Manages caching of GPU FFT plans to optimize repeated FFT computations.
- 1.7.0
- 5.0
* - ``matmul.allow_tf32``
- Enables or disables the use of TensorFloat-32 (TF32) precision for
faster matrix multiplications on GPUs with Tensor Cores.
- 1.10.0
- ❌
* - ``matmul.allow_fp16_reduced_precision_reduction``
- Reduced precision reductions (e.g., with fp16 accumulation type) are
allowed with fp16 GEMMs.
- 2.0
- ❌
* - ``matmul.allow_bf16_reduced_precision_reduction``
- Reduced precision reductions are allowed with bf16 GEMMs.
- 2.0
- ❌
* - ``enable_cudnn_sdp``
- Globally enables cuDNN SDPA's kernels within SDPA.
- 2.0
- ❌
* - ``enable_flash_sdp``
- Globally enables or disables FlashAttention for SDPA.
- 2.1
- ❌
* - ``enable_mem_efficient_sdp``
- Globally enables or disables Memory-Efficient Attention for SDPA.
- 2.1
- ❌
* - ``enable_math_sdp``
- Globally enables or disables the PyTorch C++ implementation within SDPA.
- 2.1
- ❌
.. Need to validate and extend.
torch.backends.cudnn
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Supported ``torch`` options:
.. list-table::
:header-rows: 1
* - Data type
- Description
- Since PyTorch
- Since ROCm
* - ``allow_tf32``
- TensorFloat-32 tensor cores may be used in cuDNN convolutions on NVIDIA
Ampere or newer GPUs.
- 1.12.0
- ❌
* - ``deterministic``
- A bool that, if True, causes cuDNN to only use deterministic
convolution algorithms.
- 1.12.0
- 6.0
Automatic mixed precision: torch.amp
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyTorch that automates the process of using both 16-bit (half-precision,
float16) and 32-bit (single-precision, float32) floating-point types in model
training and inference.
.. list-table::
:header-rows: 1
* - Data type
- Description
- Since PyTorch
- Since ROCm
* - Autocasting
- Instances of autocast serve as context managers or decorators that allow
regions of your script to run in mixed precision.
- 1.9
- 2.5
* - Gradient scaling
- To prevent underflow, “gradient scaling” multiplies the networks
loss(es) by a scale factor and invokes a backward pass on the scaled
loss(es). Gradients flowing backward through the network are then
scaled by the same factor. In other words, gradient values have a
larger magnitude, so they dont flush to zero.
- 1.9
- 2.5
* - CUDA op-specific behavior
- These ops always go through autocasting whether they are invoked as part
of a ``torch.nn.Module``, as a function, or as a ``torch.Tensor`` method. If
functions are exposed in multiple namespaces, they go through
autocasting regardless of the namespace.
- 1.9
- 2.5
Distributed library features
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The PyTorch distributed library includes a collective of parallelism modules, a
communications layer, and infrastructure for launching and debugging large
training jobs. See :ref:`rocm-for-ai-pytorch-distributed` for more information.
The Distributed Library feature in PyTorch provides tools and APIs for building
and running distributed machine learning workflows. It allows training models
across multiple processes, GPUs, or nodes in a cluster, enabling efficient use
of computational resources and scalability for large-scale tasks.
.. list-table::
:header-rows: 1
* - Features
- Description
- Since PyTorch
- Since ROCm
* - TensorPipe
- TensorPipe is a point-to-point communication library integrated into
PyTorch for distributed training. It is designed to handle tensor data
transfers efficiently between different processes or devices, including
those on separate machines.
- 1.8
- 5.4
* - Gloo
- Gloo is designed for multi-machine and multi-GPU setups, enabling
efficient communication and synchronization between processes. Gloo is
one of the default backends for PyTorch's Distributed Data Parallel
(DDP) and RPC frameworks, alongside other backends like NCCL and MPI.
- 1.0
- 2.0
torch.compiler
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. list-table::
:header-rows: 1
* - Features
- Description
- Since PyTorch
- Since ROCm
* - ``torch.compiler`` (AOT Autograd)
- Autograd captures not only the user-level code, but also backpropagation,
which results in capturing the backwards pass “ahead-of-time”. This
enables acceleration of both forwards and backwards pass using
``TorchInductor``.
- 2.0
- 5.3
* - ``torch.compiler`` (TorchInductor)
- The default ``torch.compile`` deep learning compiler that generates fast
code for multiple accelerators and backends. You need to use a backend
compiler to make speedups through ``torch.compile`` possible. For AMD,
NVIDIA, and Intel GPUs, it leverages OpenAI Triton as the key building block.
- 2.0
- 5.3
torchaudio
--------------------------------------------------------------------------------
The `torchaudio <https://pytorch.org/audio/stable/index.html>`_ library provides
utilities for processing audio data in PyTorch, such as audio loading,
transformations, and feature extraction.
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to move audio
data (waveform tensor) explicitly to GPU using ``.to('cuda')``.
The following ``torchaudio`` features are GPU-accelerated.
.. list-table::
:header-rows: 1
* - Features
- Description
- Since torchaudio version
- Since ROCm
* - ``torchaudio.transforms.Spectrogram``
- Generate spectrogram of an input waveform using STFT.
- 0.6.0
- 4.5
* - ``torchaudio.transforms.MelSpectrogram``
- Generate the mel-scale spectrogram of raw audio signals.
- 0.9.0
- 4.5
* - ``torchaudio.transforms.MFCC``
- Extract of MFCC features.
- 0.9.0
- 4.5
* - ``torchaudio.transforms.Resample``
- Resample a signal from one frequency to another
- 0.9.0
- 4.5
torchvision
--------------------------------------------------------------------------------
The `torchvision <https://pytorch.org/vision/stable/index.html>`_ library
provide datasets, model architectures, and common image transformations for
computer vision.
The following ``torchvision`` features are GPU-accelerated.
.. list-table::
:header-rows: 1
* - Features
- Description
- Since torchvision version
- Since ROCm
* - ``torchvision.transforms.functional``
- Provides GPU-compatible transformations for image preprocessing like
resize, normalize, rotate and crop.
- 0.2.0
- 4.0
* - ``torchvision.ops``
- GPU-accelerated operations for object detection and segmentation tasks.
``torchvision.ops.roi_align``, ``torchvision.ops.nms`` and
``box_convert``.
- 0.6.0
- 3.3
* - ``torchvision.models`` with ``.to('cuda')``
- ``torchvision`` provides several pre-trained models (ResNet, Faster
R-CNN, Mask R-CNN, ...) that can run on CUDA for faster inference and
training.
- 0.1.6
- 2.x
* - ``torchvision.io``
- Video decoding and frame extraction using GPU acceleration with NVIDIAs
NVDEC and nvJPEG (rocJPEG) on CUDA-enabled GPUs.
- 0.4.0
- 6.3
torchtext
--------------------------------------------------------------------------------
The `torchtext <https://pytorch.org/text/stable/index.html>`_ library provides
utilities for processing and working with text data in PyTorch, including
tokenization, vocabulary management, and text embeddings. torchtext supports
preprocessing pipelines and integration with PyTorch models, simplifying the
implementation of natural language processing (NLP) tasks.
To leverage GPU acceleration in torchtext, you need to move tensors
explicitly to the GPU using ``.to('cuda')``.
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
* Only official release exists.
torchtune
--------------------------------------------------------------------------------
The `torchtune <https://pytorch.org/torchtune/stable/index.html>`_ library for
authoring, fine-tuning and experimenting with LLMs.
* Usage: It works out-of-the-box, enabling developers to fine-tune ROCm PyTorch solutions.
* Only official release exists.
torchserve
--------------------------------------------------------------------------------
The `torchserve <https://pytorch.org/torchserve/>`_ is a PyTorch domain library
for common sparsity and parallelism primitives needed for large-scale recommender
systems.
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
* Only official release exists.
torchrec
--------------------------------------------------------------------------------
The `torchrec <https://pytorch.org/torchrec/>`_ is a PyTorch domain library for
common sparsity and parallelism primitives needed for large-scale recommender
systems.
* torchrec does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
* Only official release exists.
Unsupported PyTorch features
----------------------------
The following are GPU-accelerated PyTorch features not currently supported by ROCm.
.. list-table::
:widths: 30, 60, 10
:header-rows: 1
* - Data type
- Description
- Since PyTorch
* - APEX batch norm
- Use APEX batch norm instead of PyTorch batch norm.
- 1.6.0
* - ``torch.backends.cuda`` / ``matmul.allow_tf32``
- A bool that controls whether TensorFloat-32 tensor cores may be used in
matrix multiplications.
- 1.7
* - ``torch.cuda`` / NVIDIA Tools Extension (NVTX)
- Integration with NVTX for profiling and debugging GPU performance using
NVIDIA's Nsight tools.
- 1.7.0
* - ``torch.cuda`` / Lazy loading NVRTC
- Delays JIT compilation with NVRTC until the code is explicitly needed.
- 1.8.0
* - ``torch-tensorrt``
- Integrate TensorRT library for optimizing and deploying PyTorch models.
ROCm does not have equialent library for TensorRT.
- 1.9.0
* - ``torch.backends`` / ``cudnn.allow_tf32``
- TensorFloat-32 tensor cores may be used in cuDNN convolutions.
- 1.10.0
* - ``torch.backends.cuda`` / ``matmul.allow_fp16_reduced_precision_reduction``
- Reduced precision reductions with fp16 accumulation type are
allowed with fp16 GEMMs.
- 2.0
* - ``torch.backends.cuda`` / ``matmul.allow_bf16_reduced_precision_reduction``
- Reduced precision reductions are allowed with bf16 GEMMs.
- 2.0
* - ``torch.nn.functional`` / ``scaled_dot_product_attention``
- Flash attention backend for SDPA to accelerate attention computation in
transformer-based models.
- 2.0
* - ``torch.backends.cuda`` / ``enable_cudnn_sdp``
- Globally enables cuDNN SDPA's kernels within SDPA.
- 2.0
* - ``torch.backends.cuda`` / ``enable_flash_sdp``
- Globally enables or disables FlashAttention for SDPA.
- 2.1
* - ``torch.backends.cuda`` / ``enable_mem_efficient_sdp``
- Globally enables or disables Memory-Efficient Attention for SDPA.
- 2.1
* - ``torch.backends.cuda`` / ``enable_math_sdp``
- Globally enables or disables the PyTorch C++ implementation within SDPA.
- 2.1
* - Dynamic parallelism
- PyTorch itself does not directly expose dynamic parallelism as a core
feature. Dynamic parallelism allow GPU threads to launch additional
threads which can be reached using custom operations via the
``torch.utils.cpp_extension`` module.
- Not a core feature
* - Unified memory support in PyTorch
- Unified Memory is not directly exposed in PyTorch's core API, it can be
utilized effectively through custom CUDA extensions or advanced
workflows.
- Not a core feature
Use cases and recommendations
================================================================================
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/train-a-model>` provides
guidance on how to leverage the ROCm platform for training AI models. It covers the steps, tools, and best practices
for optimizing training workflows on AMD GPUs using PyTorch features.
* :doc:`Single-GPU fine-tuning and inference </how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference>`
describes and demonstrates how to use the ROCm platform for the fine-tuning and inference of
machine learning models, particularly large language models (LLMs), on systems with a single AMD
Instinct MI300X accelerator. This page provides a detailed guide for setting up, optimizing, and
executing fine-tuning and inference workflows in such environments.
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/llm-fine-tuning-optimization/multi-gpu-fine-tuning-and-inference>`
describes and demonstrates the fine-tuning and inference of machine learning models on systems
with multi MI300X accelerators.
* The :doc:`Instinct MI300X workload optimization guide </how-to/tuning-guides/mi300x/workload>` provides detailed
guidance on optimizing workloads for the AMD Instinct MI300X accelerator using ROCm. This guide is aimed at helping
users achieve optimal performance for deep learning and other high-performance computing tasks on the MI300X
accelerator.
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
describes how PyTorch integrates with ROCm for AI workloads It outlines the use of PyTorch on the ROCm platform and
focuses on how to efficiently leverage AMD GPU hardware for training and inference tasks in AI applications.
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_

View File

@@ -32,7 +32,7 @@ architecture.
* [AMD Instinct™ MI250 microarchitecture](./gpu-arch/mi250.md)
* [AMD Instinct MI200/CDNA2 ISA](https://www.amd.com/system/files/TechDocs/instinct-mi200-cdna2-instruction-set-architecture.pdf)
* [White paper](https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna2-white-paper.pdf)
* [White paper](https://www.amd.com/system/files/documents/amd-cdna2-white-paper.pdf)
* [Performance counters](./gpu-arch/mi300-mi200-performance-counters.rst)
:::
@@ -45,7 +45,7 @@ architecture.
* [AMD Instinct™ MI100 microarchitecture](./gpu-arch/mi100.md)
* [AMD Instinct MI100/CDNA1 ISA](https://www.amd.com/system/files/TechDocs/instinct-mi100-cdna1-shader-instruction-set-architecture%C2%A0.pdf)
* [White paper](https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna-white-paper.pdf)
* [White paper](https://www.amd.com/system/files/documents/amd-cdna-whitepaper.pdf)
:::
@@ -55,6 +55,7 @@ architecture.
* [AMD RDNA3 ISA](https://www.amd.com/system/files/TechDocs/rdna3-shader-instruction-set-architecture-feb-2023_0.pdf)
* [AMD RDNA2 ISA](https://www.amd.com/system/files/TechDocs/rdna2-shader-instruction-set-architecture.pdf)
* [AMD RDNA ISA](https://www.amd.com/system/files/TechDocs/rdna-shader-instruction-set-architecture.pdf)
* [AMD RDNA Architecture White Paper](https://www.amd.com/system/files/documents/rdna-whitepaper.pdf)
:::

View File

@@ -6,8 +6,6 @@
import os
import shutil
import sys
from pathlib import Path
shutil.copy2("../RELEASE.md", "./about/release-notes.md")
@@ -32,15 +30,15 @@ if os.environ.get("READTHEDOCS", "") == "True":
project = "ROCm Documentation"
author = "Advanced Micro Devices, Inc."
copyright = "Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved."
version = "6.3.3"
release = "6.3.3"
version = "6.3.2"
release = "6.3.2"
setting_all_article_info = True
all_article_info_os = ["linux", "windows"]
all_article_info_author = ""
# pages with specific settings
article_pages = [
{"file": "about/release-notes", "os": ["linux"], "date": "2025-02-19"},
{"file": "about/release-notes", "os": ["linux"], "date": "2025-01-28"},
{"file": "compatibility/compatibility-matrix", "os": ["linux"]},
{"file": "compatibility/ml-compatibility/pytorch-compatibility", "os": ["linux"]},
{"file": "compatibility/ml-compatibility/tensorflow-compatibility", "os": ["linux"]},
@@ -51,9 +49,6 @@ article_pages = [
{"file": "how-to/rocm-for-ai/training/index", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/training/train-a-model", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/training/prerequisite-system-validation", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/training/benchmark-docker/megatron-lm", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/training/benchmark-docker/pytorch-training", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/training/scale-model-training", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/fine-tuning/index", "os": ["linux"]},
@@ -68,7 +63,7 @@ article_pages = [
{"file": "how-to/rocm-for-ai/inference/llm-inference-frameworks", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/inference/vllm-benchmark", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/inference/deploy-your-model", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/inference-optimization/index", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/inference-optimization/model-quantization", "os": ["linux"]},
{"file": "how-to/rocm-for-ai/inference-optimization/model-acceleration-libraries", "os": ["linux"]},
@@ -91,16 +86,11 @@ article_pages = [
external_toc_path = "./sphinx/_toc.yml"
# Add the _extensions directory to Python's search path
sys.path.append(str(Path(__file__).parent / 'extension'))
extensions = ["rocm_docs", "sphinx_reredirects", "sphinx_sitemap", "sphinxcontrib.datatemplates", "version-ref"]
compatibility_matrix_file = str(Path(__file__).parent / 'compatibility/compatibility-matrix-historical-6.0.csv')
extensions = ["rocm_docs", "sphinx_reredirects", "sphinx_sitemap"]
external_projects_current_project = "rocm"
# Uncomment if facing rate limit exceed issue with local build
# Uncomment if facing rate limit exceed issue with local build
# external_projects_remote_repository = ""
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "https://rocm-stg.amd.com/")
@@ -111,9 +101,8 @@ if os.environ.get("READTHEDOCS", "") == "True":
html_theme = "rocm_docs_theme"
html_theme_options = {"flavor": "rocm-docs-home"}
html_static_path = ["sphinx/static/css", "extension/how-to/rocm-for-ai/inference"]
html_css_files = ["rocm_custom.css", "rocm_rn.css", "vllm-benchmark.css"]
html_js_files = ["vllm-benchmark.js"]
html_static_path = ["sphinx/static/css"]
html_css_files = ["rocm_custom.css", "rocm_rn.css"]
html_title = "ROCm Documentation"

View File

@@ -1,153 +0,0 @@
vllm_benchmark:
unified_docker:
latest:
pull_tag: rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6/images/sha256-9a12ef62bbbeb5a4c30a01f702c8e025061f575aa129f291a49fbd02d6b4d6c9
rocm_version: 6.3.1
vllm_version: 0.6.6
pytorch_version: 2.7.0 (2.7.0a0+git3a58512)
model_groups:
- group: Llama
tag: llama
models:
- model: Llama 3.1 8B
mad_tag: pyt_vllm_llama-3.1-8b
model_repo: meta-llama/Llama-3.1-8B-Instruct
url: https://huggingface.co/meta-llama/Llama-3.1-8B
precision: float16
- model: Llama 3.1 70B
mad_tag: pyt_vllm_llama-3.1-70b
model_repo: meta-llama/Llama-3.1-70B-Instruct
url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct
precision: float16
- model: Llama 3.1 405B
mad_tag: pyt_vllm_llama-3.1-405b
model_repo: meta-llama/Llama-3.1-405B-Instruct
url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct
precision: float16
- model: Llama 3.2 11B Vision
mad_tag: pyt_vllm_llama-3.2-11b-vision-instruct
model_repo: meta-llama/Llama-3.2-11B-Vision-Instruct
url: https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct
precision: float16
- model: Llama 2 7B
mad_tag: pyt_vllm_llama-2-7b
model_repo: meta-llama/Llama-2-7b-chat-hf
url: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
precision: float16
- model: Llama 2 70B
mad_tag: pyt_vllm_llama-2-70b
model_repo: meta-llama/Llama-2-70b-chat-hf
url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
precision: float16
- model: Llama 3.1 70B FP8
mad_tag: pyt_vllm_llama-3.1-70b_fp8
model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV
url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV
precision: float8
- model: Llama 3.1 405B FP8
mad_tag: pyt_vllm_llama-3.1-405b_fp8
model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV
precision: float8
- group: Mistral
tag: mistral
models:
- model: Mixtral MoE 8x7B
mad_tag: pyt_vllm_mixtral-8x7b
model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1
url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
precision: float16
- model: Mixtral MoE 8x22B
mad_tag: pyt_vllm_mixtral-8x22b
model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1
url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1
precision: float16
- model: Mistral 7B
mad_tag: pyt_vllm_mistral-7b
model_repo: mistralai/Mistral-7B-Instruct-v0.3
url: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3
precision: float16
- model: Mixtral MoE 8x7B FP8
mad_tag: pyt_vllm_mixtral-8x7b_fp8
model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
precision: float8
- model: Mixtral MoE 8x22B FP8
mad_tag: pyt_vllm_mixtral-8x22b_fp8
model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
precision: float8
- model: Mistral 7B FP8
mad_tag: pyt_vllm_mistral-7b_fp8
model_repo: amd/Mistral-7B-v0.1-FP8-KV
url: https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV
precision: float8
- group: Qwen
tag: qwen
models:
- model: Qwen2 7B
mad_tag: pyt_vllm_qwen2-7b
model_repo: Qwen/Qwen2-7B-Instruct
url: https://huggingface.co/Qwen/Qwen2-7B-Instruct
precision: float16
- model: Qwen2 72B
mad_tag: pyt_vllm_qwen2-72b
model_repo: Qwen/Qwen2-72B-Instruct
url: https://huggingface.co/Qwen/Qwen2-72B-Instruct
precision: float16
- group: JAIS
tag: jais
models:
- model: JAIS 13B
mad_tag: pyt_vllm_jais-13b
model_repo: core42/jais-13b-chat
url: https://huggingface.co/core42/jais-13b-chat
precision: float16
- model: JAIS 30B
mad_tag: pyt_vllm_jais-30b
model_repo: core42/jais-30b-chat-v3
url: https://huggingface.co/core42/jais-30b-chat-v3
precision: float16
- group: DBRX
tag: dbrx
models:
- model: DBRX Instruct
mad_tag: pyt_vllm_dbrx-instruct
model_repo: databricks/dbrx-instruct
url: https://huggingface.co/databricks/dbrx-instruct
precision: float16
- model: DBRX Instruct FP8
mad_tag: pyt_vllm_dbrx_fp8
model_repo: amd/dbrx-instruct-FP8-KV
url: https://huggingface.co/amd/dbrx-instruct-FP8-KV
precision: float8
- group: Gemma
tag: gemma
models:
- model: Gemma 2 27B
mad_tag: pyt_vllm_gemma-2-27b
model_repo: google/gemma-2-27b
url: https://huggingface.co/google/gemma-2-27b
precision: float16
- group: Cohere
tag: cohere
models:
- model: C4AI Command R+ 08-2024
mad_tag: pyt_vllm_c4ai-command-r-plus-08-2024
model_repo: CohereForAI/c4ai-command-r-plus-08-2024
url: https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024
precision: float16
- model: C4AI Command R+ 08-2024 FP8
mad_tag: pyt_vllm_command-r-plus_fp8
model_repo: amd/c4ai-command-r-plus-FP8-KV
url: https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV
precision: float8
- group: DeepSeek
tag: deepseek
models:
- model: DeepSeek MoE 16B
mad_tag: pyt_vllm_deepseek-moe-16b-chat
model_repo: deepseek-ai/deepseek-moe-16b-chat
url: https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat
precision: float16

View File

@@ -1,212 +0,0 @@
function ready(proc) {
// Check if page is loaded. If so, init.
if (document.readyState !== "loading") {
proc();
} else {
// Otherwise, wait for DOMContentLoaded event.
document.addEventListener("DOMContentLoaded", proc);
}
}
ready(() => {
const ModelPicker = {
// Selector strings for DOM elements
SELECTORS: {
CONTAINER: "#vllm-benchmark-ud-params-picker",
MODEL_GROUP_BTN: 'div[data-param-k="model-group"][data-param-v]',
MODEL_PARAM_BTN: 'div[data-param-k="model"][data-param-v]',
MODEL_DOC: "div.model-doc",
},
CSS_CLASSES: {
HIDDEN: "hidden",
},
ATTRIBUTES: {
PARAM_KEY: "data-param-k", // URL search parameter key (i.e., "model")
PARAM_VALUE: "data-param-v", // URL search param value (e.g., "pyt_vllm_llama-3.1-8b", "pyt_vllm_llama-3.1-70b") -- these are MAD model tags
PARAM_GROUP: "data-param-group", // Model group (e.g., "llama", "mistral")
PARAM_STATE: "data-param-state", // Selection state
},
// Cache DOM elements
elements: {
container: null,
modelGroups: null,
modelParams: null,
modelDocs: null,
},
data: {
availableModels: new Set(),
modelsByGroup: new Map(),
modelToGroupMap: new Map(),
formattedModelClassMap: new Map(), //TODO
},
init() {
this.elements.container = document.querySelector(
this.SELECTORS.CONTAINER,
);
if (!this.elements.container) return;
this.cacheDOMElements();
if (!this.validateElements()) return;
this.buildModelData();
this.bindEvents();
this.initializeState();
},
cacheDOMElements() {
const { CONTAINER, MODEL_GROUP_BTN, MODEL_PARAM_BTN, MODEL_DOC } =
this.SELECTORS;
this.elements = {
container: document.querySelector(CONTAINER),
modelGroups: document.querySelectorAll(MODEL_GROUP_BTN),
modelParams: document.querySelectorAll(MODEL_PARAM_BTN),
modelDocs: document.querySelectorAll(MODEL_DOC),
};
},
validateElements() {
const { modelGroups, modelParams } = this.elements;
if (!modelGroups.length || !modelParams.length) {
console.warn("Model picker is missing required elements");
return false;
}
return true;
},
buildModelData() {
const { PARAM_VALUE, PARAM_GROUP } = this.ATTRIBUTES;
this.elements.modelParams.forEach((model) => {
const modelTag = model.getAttribute(PARAM_VALUE);
const groupTag = model.getAttribute(PARAM_GROUP);
if (!modelTag || !groupTag) return;
this.data.availableModels.add(modelTag);
this.data.modelToGroupMap.set(modelTag, groupTag);
// FIXME: this is because Sphinx auto-formats class names to use dashes
this.data.formattedModelClassMap.set(
modelTag,
modelTag.replace(/[^a-zA-Z0-9]/g, "-"),
);
if (!this.data.modelsByGroup.has(groupTag)) {
this.data.modelsByGroup.set(groupTag, []);
}
this.data.modelsByGroup.get(groupTag).push(modelTag);
});
},
// Event listeners for user interactions
bindEvents() {
const handleInteraction = (event) => {
const target = event.target.closest(`[${this.ATTRIBUTES.PARAM_KEY}]`);
if (!target) return;
const paramType = target.getAttribute(this.ATTRIBUTES.PARAM_KEY);
const paramValue = target.getAttribute(this.ATTRIBUTES.PARAM_VALUE);
if (paramType === "model") {
const groupTag = target.getAttribute(this.ATTRIBUTES.PARAM_GROUP);
if (groupTag) this.updateUI(paramValue, groupTag);
} else if (paramType === "model-group") {
const firstModelInGroup = this.data.modelsByGroup.get(paramValue)
?.[0];
if (firstModelInGroup) this.updateUI(firstModelInGroup, paramValue);
}
};
this.elements.container.addEventListener("click", handleInteraction);
this.elements.container.addEventListener("keydown", (event) => {
if (event.key === "Enter" || event.key === " ") {
event.preventDefault();
handleInteraction(event);
}
});
},
// Update the page based on the selected model
updateUI(modelTag, groupTag) {
const validModel = this.setModelSearchParam(modelTag);
// Update model group buttons
this.elements.modelGroups.forEach((group) => {
const isSelected =
group.getAttribute(this.ATTRIBUTES.PARAM_VALUE) === groupTag;
group.setAttribute(
this.ATTRIBUTES.PARAM_STATE,
isSelected ? "selected" : "",
);
group.setAttribute("aria-selected", isSelected.toString());
});
// Update model buttons
this.elements.modelParams.forEach((model) => {
const isInSelectedGroup =
model.getAttribute(this.ATTRIBUTES.PARAM_GROUP) === groupTag;
const isSelectedModel =
model.getAttribute(this.ATTRIBUTES.PARAM_VALUE) === validModel;
model.classList.toggle(this.CSS_CLASSES.HIDDEN, !isInSelectedGroup);
model.setAttribute(
this.ATTRIBUTES.PARAM_STATE,
isSelectedModel ? "selected" : "",
);
model.setAttribute("aria-selected", isSelectedModel.toString());
});
// Update visibility of doc sections
const formattedClass = this.data.formattedModelClassMap.get(validModel);
if (formattedClass) {
this.elements.modelDocs.forEach((doc) => {
doc.classList.toggle(
this.CSS_CLASSES.HIDDEN,
!doc.classList.contains(formattedClass),
);
});
}
},
// Get the current model from the URL search parameters.
getModelSearchParam() {
return new URLSearchParams(location.search).get("model");
},
// Set the model in the URL search parameters, or fallback to the first available one.
setModelSearchParam(modelTag) {
const defaultModel = [...this.data.availableModels][0];
const model = this.data.availableModels.has(modelTag)
? modelTag
: defaultModel;
const searchParams = new URLSearchParams(location.search);
searchParams.set("model", model);
history.replaceState(
{},
"",
`${location.pathname}?${searchParams.toString()}`,
);
return model;
},
// Initialize the UI state based on the current URL search parameter or default values.
initializeState() {
const currentModel = this.getModelSearchParam();
const validModel = this.setModelSearchParam(currentModel);
const initialGroup = this.data.modelToGroupMap.get(validModel) ??
[...this.data.modelsByGroup.keys()][0];
if (initialGroup) {
this.updateUI(validModel, initialGroup);
}
},
};
ModelPicker.init();
});

View File

@@ -1,266 +0,0 @@
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx.util import logging
import csv
from io import StringIO
import re
import shlex
logger = logging.getLogger(__name__)
class VersionReference(nodes.Inline, nodes.TextElement):
"""Represents an inline version reference."""
pass
class VersionSetDirective(Directive):
"""Directive for setting version references within a page scope."""
required_arguments = 2 # name and value
optional_arguments = 0
def run(self):
env = self.state.document.settings.env
if not hasattr(env, 'doc_version_refs'):
env.doc_version_refs = {}
current_doc = env.docname
if current_doc not in env.doc_version_refs:
env.doc_version_refs[current_doc] = {}
name, value = self.arguments
if name.lower() == 'latest':
logger.warning('Cannot override the "latest" keyword with version-set')
return []
# Handle 'latest' value by getting the actual version
if value.lower() == 'latest':
data = getattr(env, 'compatibility_matrix', None)
if data:
latest_version = get_latest_rocm_version(data)
if latest_version:
value = latest_version
env.doc_version_refs[current_doc][name] = value
return []
def clean_library_name(name):
"""Extract library name from RST formatting."""
# Handle :doc: format
doc_match = re.search(r':doc:`([^<]+)(?:\s+<[^>]+>)?`', name)
if doc_match:
return doc_match.group(1).strip()
# Handle other link formats
link_match = re.search(r'`([^<]+)(?:\s+<[^>]+>)?`_?', name)
if link_match:
return link_match.group(1).strip()
return name.strip()
def get_latest_rocm_version(data):
"""Get the latest ROCm version from the matrix headers."""
if not data or len(data) == 0:
return None
# Get all column names except 'ROCm Version'
columns = [col for col in data[0].keys() if col != 'ROCm Version']
# Return the first column name (assumed to be the latest version)
return columns[0] if columns else None
def version_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Role function to print version value.
Usage: :version:`version_name`
"""
try:
version_name = text.strip()
env = inliner.document.settings.env
if hasattr(env, 'doc_version_refs'):
current_doc = env.docname
if current_doc in env.doc_version_refs:
doc_refs = env.doc_version_refs[current_doc]
if version_name in doc_refs:
version = doc_refs[version_name]
node = nodes.Text(version)
return [node], []
msg = inliner.reporter.warning(
f'No version defined for name {version_name}',
line=lineno
)
return [], [msg]
except Exception as e:
msg = inliner.reporter.error(
f'Error looking up version: {str(e)}',
line=lineno
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
def version_ref_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Role function for version references.
Usage: :version-ref:`library_name release`
:version-ref:`"library name" release`
:version-ref:`library_name latest`
:version-ref:`rocm latest`
"""
try:
# Parse the text - handle both quoted and unquoted formats
if '"' in text:
parts = shlex.split(text)
else:
parts = text.split()
if len(parts) != 2:
msg = inliner.reporter.error(
'Version reference must be in format "library_name release" or "\\"library name\\" release"',
line=lineno
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
library_name, release = parts
env = inliner.document.settings.env
# Check if release is a version reference in current document
if hasattr(env, 'doc_version_refs'):
current_doc = env.docname
if current_doc in env.doc_version_refs:
doc_refs = env.doc_version_refs[current_doc]
if release in doc_refs:
release = doc_refs[release]
# Handle special case for "rocm latest"
if library_name.lower() == 'rocm' and release.lower() == 'latest':
data = getattr(env, 'compatibility_matrix', None)
if not data:
raise ValueError("Compatibility matrix not found in environment")
latest_version = get_latest_rocm_version(data)
if latest_version:
node = VersionReference()
node += nodes.Text(latest_version)
return [node], []
else:
msg = inliner.reporter.warning(
'No ROCm versions found in compatibility matrix',
line=lineno
)
return [], [msg]
version = lookup_version(inliner, library_name, release)
if version:
node = VersionReference()
node += nodes.Text(version)
return [node], []
else:
msg = inliner.reporter.warning(
f'No version found for library {library_name} in release {release}',
line=lineno
)
return [], [msg]
except Exception as e:
msg = inliner.reporter.error(
f'Error looking up version: {str(e)}',
line=lineno
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
def lookup_version(inliner, library_name, release):
"""Look up the version in the compatibility matrix."""
env = inliner.document.settings.env
data = getattr(env, 'compatibility_matrix', None)
if not data:
raise ValueError("Compatibility matrix not found in environment")
# Handle the 'latest' keyword
if release.lower() == 'latest':
latest_version = get_latest_rocm_version(data)
if not latest_version:
return None
release = latest_version
# For ROCm, check if the version exists in column headers
if library_name.lower() == 'rocm':
columns = [col for col in data[0].keys() if col != 'ROCm Version']
if release in columns:
return release
return None
# Find the library version
for row in data:
row_lib_name = clean_library_name(row['ROCm Version'])
if row_lib_name == library_name:
# Get the version, removing any whitespace
version = row.get(release, '').strip()
if version:
return version
# If not found, try a case-insensitive search
for row in data:
row_lib_name = clean_library_name(row['ROCm Version'])
if row_lib_name.lower() == library_name.lower():
version = row.get(release, '').strip()
if version:
return version
return None
def visit_version_reference(self, node):
self.body.append(f'<span class="version-reference">')
def depart_version_reference(self, node):
self.body.append('</span>')
def load_compatibility_matrix(app):
"""Load the compatibility matrix content from CSV."""
if not app.config.compatibility_matrix_file:
logger.warning('No compatibility matrix file configured')
return
try:
with open(app.config.compatibility_matrix_file, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
app.env.compatibility_matrix = list(reader)
logger.info('Successfully loaded compatibility matrix')
# Debug: print first few rows with their library names
for row in list(app.env.compatibility_matrix)[:5]:
if 'ROCm Version' in row:
lib_name = clean_library_name(row['ROCm Version'])
logger.debug(f"Loaded library: {lib_name}")
except Exception as e:
logger.error(f'Error loading compatibility matrix: {str(e)}')
def purge_version_refs(app, env, docname):
"""Remove version references for a document when it is purged"""
if hasattr(env, 'doc_version_refs'):
if docname in env.doc_version_refs:
del env.doc_version_refs[docname]
def setup(app):
app.add_node(VersionReference,
html=(visit_version_reference, depart_version_reference))
app.add_role('version-ref', version_ref_role)
app.add_role('version', version_role)
app.add_directive('version-set', VersionSetDirective)
# Add a config value for the compatibility matrix file path
app.add_config_value('compatibility_matrix_file', None, 'env')
# Connect to the builder-inited event to load the matrix
app.connect('builder-inited', load_compatibility_matrix)
# Connect to env-purge-doc event to clean up document-specific version refs
app.connect('env-purge-doc', purge_version_refs)
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}

View File

@@ -9,266 +9,422 @@ LLM inference performance validation on AMD Instinct MI300X
.. _vllm-benchmark-unified-docker:
.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml
The `ROCm vLLM Docker <https://hub.docker.com/r/rocm/vllm/tags>`_ image offers
a prebuilt, optimized environment for validating large language model (LLM)
inference performance on the AMD Instinct™ MI300X accelerator. This ROCm vLLM
Docker image integrates vLLM and PyTorch tailored specifically for the MI300X
accelerator and includes the following components:
{% set unified_docker = data.vllm_benchmark.unified_docker.latest %}
{% set model_groups = data.vllm_benchmark.model_groups %}
* `ROCm 6.3.1 <https://github.com/ROCm/ROCm>`_
The `ROCm vLLM Docker <{{ unified_docker.docker_hub_url }}>`_ image offers
a prebuilt, optimized environment for validating large language model (LLM)
inference performance on the AMD Instinct™ MI300X accelerator. This ROCm vLLM
Docker image integrates vLLM and PyTorch tailored specifically for the MI300X
accelerator and includes the following components:
* `vLLM 0.6.6 <https://docs.vllm.ai/en/latest>`_
* `ROCm {{ unified_docker.rocm_version }} <https://github.com/ROCm/ROCm>`_
* `PyTorch 2.7.0 (2.7.0a0+git3a58512) <https://github.com/pytorch/pytorch>`_
* `vLLM {{ unified_docker.vllm_version }} <https://docs.vllm.ai/en/latest>`_
With this Docker image, you can quickly validate the expected inference
performance numbers for the MI300X accelerator. This topic also provides tips on
optimizing performance with popular AI models. For more information, see the lists of
:ref:`available models for MAD-integrated benchmarking <vllm-benchmark-mad-models>`
and :ref:`standalone benchmarking <vllm-benchmark-standalone-options>`.
* `PyTorch {{ unified_docker.pytorch_version }} <https://github.com/pytorch/pytorch>`_
.. _vllm-benchmark-vllm:
With this Docker image, you can quickly validate the expected inference
performance numbers for the MI300X accelerator. This topic also provides tips on
optimizing performance with popular AI models.
.. note::
.. _vllm-benchmark-available-models:
vLLM is a toolkit and library for LLM inference and serving. AMD implements
high-performance custom kernels and modules in vLLM to enhance performance.
See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for
more information.
Available models
================
Getting started
===============
.. raw:: html
Use the following procedures to reproduce the benchmark results on an
MI300X accelerator with the prebuilt vLLM Docker image.
<div id="vllm-benchmark-ud-params-picker" class="container-fluid">
<div class="row">
<div class="col-2 me-2 model-param-head">Model</div>
<div class="row col-10">
{% for model_group in model_groups %}
<div class="col-3 model-param" data-param-k="model-group" data-param-v="{{ model_group.tag }}" tabindex="0">{{ model_group.group }}</div>
{% endfor %}
</div>
</div>
.. _vllm-benchmark-get-started:
<div class="row mt-1">
<div class="col-2 me-2 model-param-head">Model variant</div>
<div class="row col-10">
{% for model_group in model_groups %}
{% set models = model_group.models %}
{% for model in models %}
{% if models|length % 3 == 0 %}
<div class="col-4 model-param" data-param-k="model" data-param-v="{{ model.mad_tag }}" data-param-group="{{ model_group.tag }}" tabindex="0">{{ model.model }}</div>
{% else %}
<div class="col-6 model-param" data-param-k="model" data-param-v="{{ model.mad_tag }}" data-param-group="{{ model_group.tag }}" tabindex="0">{{ model.model }}</div>
{% endif %}
{% endfor %}
{% endfor %}
</div>
</div>
</div>
1. Disable NUMA auto-balancing.
.. _vllm-benchmark-vllm:
To optimize performance, disable automatic NUMA balancing. Otherwise, the GPU
might hang until the periodic balancing is finalized. For more information,
see :ref:`AMD Instinct MI300X system optimization <mi300x-disable-numa>`.
{% for model_group in model_groups %}
{% for model in model_group.models %}
.. code-block:: shell
.. container:: model-doc {{model.mad_tag}}
# disable automatic NUMA balancing
sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
# check if NUMA balancing is disabled (returns 0 if disabled)
cat /proc/sys/kernel/numa_balancing
0
.. note::
2. Download the :ref:`ROCm vLLM Docker image <vllm-benchmark-unified-docker>`.
See the `{{ model.model }} model card on Hugging Face <{{ model.url }}>`_ to learn more about your selected model.
Some models require access authorization prior to use via an external license agreement through a third party.
Use the following command to pull the Docker image from Docker Hub.
{% endfor %}
{% endfor %}
.. code-block:: shell
docker pull rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
.. note::
Once the setup is complete, choose between two options to reproduce the
benchmark results:
vLLM is a toolkit and library for LLM inference and serving. AMD implements
high-performance custom kernels and modules in vLLM to enhance performance.
See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for
more information.
- :ref:`MAD-integrated benchmarking <vllm-benchmark-mad>`
Getting started
===============
- :ref:`Standalone benchmarking <vllm-benchmark-standalone>`
Use the following procedures to reproduce the benchmark results on an
MI300X accelerator with the prebuilt vLLM Docker image.
.. _vllm-benchmark-mad:
.. _vllm-benchmark-get-started:
MAD-integrated benchmarking
===========================
1. Disable NUMA auto-balancing.
Clone the ROCm Model Automation and Dashboarding (`<https://github.com/ROCm/MAD>`__) repository to a local
directory and install the required packages on the host machine.
To optimize performance, disable automatic NUMA balancing. Otherwise, the GPU
might hang until the periodic balancing is finalized. For more information,
see :ref:`AMD Instinct MI300X system optimization <mi300x-disable-numa>`.
.. code-block:: shell
.. code-block:: shell
git clone https://github.com/ROCm/MAD
cd MAD
pip install -r requirements.txt
# disable automatic NUMA balancing
sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
# check if NUMA balancing is disabled (returns 0 if disabled)
cat /proc/sys/kernel/numa_balancing
0
Use this command to run a performance benchmark test of the Llama 3.1 8B model
on one GPU with ``float16`` data type in the host machine.
2. Download the `ROCm vLLM Docker image <{{ unified_docker.docker_hub_url }}>`_.
.. code-block:: shell
Use the following command to pull the Docker image from Docker Hub.
export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models"
python3 tools/run_models.py --tags pyt_vllm_llama-3.1-8b --keep-model-dir --live-output --timeout 28800
.. code-block:: shell
ROCm MAD launches a Docker container with the name
``container_ci-pyt_vllm_llama-3.1-8b``. The latency and throughput reports of the
model are collected in the following path: ``~/MAD/reports_float16/``.
docker pull {{ unified_docker.pull_tag }}
Although the following models are preconfigured to collect latency and
throughput performance data, you can also change the benchmarking parameters.
Refer to the :ref:`Standalone benchmarking <vllm-benchmark-standalone>` section.
Benchmarking
============
.. _vllm-benchmark-mad-models:
Once the setup is complete, choose between two options to reproduce the
benchmark results:
Available models
----------------
.. _vllm-benchmark-mad:
.. list-table::
:header-rows: 1
:widths: 2, 3
{% for model_group in model_groups %}
{% for model in model_group.models %}
* - Model name
- Tag
.. container:: model-doc {{model.mad_tag}}
* - `Llama 3.1 8B <https://huggingface.co/meta-llama/Llama-3.1-8B>`_
- ``pyt_vllm_llama-3.1-8b``
.. tab-set::
* - `Llama 3.1 70B <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
- ``pyt_vllm_llama-3.1-70b``
.. tab-item:: MAD-integrated benchmarking
* - `Llama 3.1 405B <https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct>`_
- ``pyt_vllm_llama-3.1-405b``
Clone the ROCm Model Automation and Dashboarding (`<https://github.com/ROCm/MAD>`__) repository to a local
directory and install the required packages on the host machine.
* - `Llama 3.2 11B Vision <https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct>`_
- ``pyt_vllm_llama-3.2-11b-vision-instruct``
.. code-block:: shell
* - `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-7b-chat-hf>`_
- ``pyt_vllm_llama-2-7b``
git clone https://github.com/ROCm/MAD
cd MAD
pip install -r requirements.txt
* - `Llama 2 70B <https://huggingface.co/meta-llama/Llama-2-70b-chat-hf>`_
- ``pyt_vllm_llama-2-70b``
Use this command to run the performance benchmark test on the `{{model.model}} <{{ model.url }}>`_ model
using one GPU with the ``{{model.precision}}`` data type on the host machine.
* - `Mixtral MoE 8x7B <https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1>`_
- ``pyt_vllm_mixtral-8x7b``
.. code-block:: shell
* - `Mixtral MoE 8x22B <https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1>`_
- ``pyt_vllm_mixtral-8x22b``
export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models"
python3 tools/run_models.py --tags {{model.mad_tag}} --keep-model-dir --live-output --timeout 28800
* - `Mistral 7B <https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3>`_
- ``pyt_vllm_mistral-7b``
MAD launches a Docker container with the name
``container_ci-{{model.mad_tag}}``. The latency and throughput reports of the
model are collected in the following path: ``~/MAD/reports_{{model.precision}}/``.
* - `Qwen2 7B <https://huggingface.co/Qwen/Qwen2-7B-Instruct>`_
- ``pyt_vllm_qwen2-7b``
Although the :ref:`available models <vllm-benchmark-available-models>` are preconfigured
to collect latency and throughput performance data, you can also change the benchmarking
parameters. See the standalone benchmarking tab for more information.
* - `Qwen2 72B <https://huggingface.co/Qwen/Qwen2-72B-Instruct>`_
- ``pyt_vllm_qwen2-72b``
.. tab-item:: Standalone benchmarking
* - `JAIS 13B <https://huggingface.co/core42/jais-13b-chat>`_
- ``pyt_vllm_jais-13b``
Run the vLLM benchmark tool independently by starting the
`Docker container <https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6/images/sha256-9a12ef62bbbeb5a4c30a01f702c8e025061f575aa129f291a49fbd02d6b4d6c9>`_
as shown in the following snippet.
* - `JAIS 30B <https://huggingface.co/core42/jais-30b-chat-v3>`_
- ``pyt_vllm_jais-30b``
.. code-block::
* - `DBRX Instruct <https://huggingface.co/databricks/dbrx-instruct>`_
- ``pyt_vllm_dbrx-instruct``
docker pull rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
docker run -it --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 16G --security-opt seccomp=unconfined --security-opt apparmor=unconfined --cap-add=SYS_PTRACE -v $(pwd):/workspace --env HUGGINGFACE_HUB_CACHE=/workspace --name vllm_v0.6.6 rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
* - `Gemma 2 27B <https://huggingface.co/google/gemma-2-27b>`_
- ``pyt_vllm_gemma-2-27b``
In the Docker container, clone the ROCm MAD repository and navigate to the
benchmark scripts directory at ``~/MAD/scripts/vllm``.
* - `C4AI Command R+ 08-2024 <https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024>`_
- ``pyt_vllm_c4ai-command-r-plus-08-2024``
.. code-block::
* - `DeepSeek MoE 16B <https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat>`_
- ``pyt_vllm_deepseek-moe-16b-chat``
git clone https://github.com/ROCm/MAD
cd MAD/scripts/vllm
* - `Llama 3.1 70B FP8 <https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV>`_
- ``pyt_vllm_llama-3.1-70b_fp8``
To start the benchmark, use the following command with the appropriate options.
* - `Llama 3.1 405B FP8 <https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV>`_
- ``pyt_vllm_llama-3.1-405b_fp8``
.. code-block::
* - `Mixtral MoE 8x7B FP8 <https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV>`_
- ``pyt_vllm_mixtral-8x7b_fp8``
./vllm_benchmark_report.sh -s $test_option -m {{model.model_repo}} -g $num_gpu -d {{model.precision}}
* - `Mixtral MoE 8x22B FP8 <https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV>`_
- ``pyt_vllm_mixtral-8x22b_fp8``
.. list-table::
:header-rows: 1
:align: center
* - `Mistral 7B FP8 <https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV>`_
- ``pyt_vllm_mistral-7b_fp8``
* - Name
- Options
- Description
* - `DBRX Instruct FP8 <https://huggingface.co/amd/dbrx-instruct-FP8-KV>`_
- ``pyt_vllm_dbrx_fp8``
* - ``$test_option``
- latency
- Measure decoding token latency
* - `C4AI Command R+ 08-2024 FP8 <https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV>`_
- ``pyt_vllm_command-r-plus_fp8``
* -
- throughput
- Measure token generation throughput
.. _vllm-benchmark-standalone:
* -
- all
- Measure both throughput and latency
Standalone benchmarking
=======================
* - ``$num_gpu``
- 1 or 8
- Number of GPUs
You can run the vLLM benchmark tool independently by starting the
:ref:`Docker container <vllm-benchmark-get-started>` as shown in the following
snippet.
* - ``$datatype``
- ``float16`` or ``float8``
- Data type
.. code-block::
.. note::
docker pull rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
docker run -it --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 16G --security-opt seccomp=unconfined --security-opt apparmor=unconfined --cap-add=SYS_PTRACE -v $(pwd):/workspace --env HUGGINGFACE_HUB_CACHE=/workspace --name vllm_v0.6.6 rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
The input sequence length, output sequence length, and tensor parallel (TP) are
already configured. You don't need to specify them with this script.
In the Docker container, clone the ROCm MAD repository and navigate to the
benchmark scripts directory at ``~/MAD/scripts/vllm``.
.. note::
.. code-block::
If you encounter the following error, pass your access-authorized Hugging
Face token to the gated models.
git clone https://github.com/ROCm/MAD
cd MAD/scripts/vllm
.. code-block::
Command
-------
OSError: You are trying to access a gated repo.
To start the benchmark, use the following command with the appropriate options.
See :ref:`Options <vllm-benchmark-standalone-options>` for the list of
options and their descriptions.
# pass your HF_TOKEN
export HF_TOKEN=$your_personal_hf_token
.. code-block:: shell
Here are some examples of running the benchmark with various options.
./vllm_benchmark_report.sh -s $test_option -m $model_repo -g $num_gpu -d $datatype
* Latency benchmark
See the :ref:`examples <vllm-benchmark-run-benchmark>` for more information.
Use this command to benchmark the latency of the {{model.model}} model on eight GPUs with the ``{{model.precision}}`` data type.
.. note::
.. code-block::
The input sequence length, output sequence length, and tensor parallel (TP) are
already configured. You don't need to specify them with this script.
./vllm_benchmark_report.sh -s latency -m {{model.model_repo}} -g 8 -d {{model.precision}}
.. note::
Find the latency report at ``./reports_{{model.precision}}_vllm_rocm{{unified_docker.rocm_version}}/summary/{{model.model_repo.split('/', 1)[1] if '/' in model.model_repo else model.model_repo}}_latency_report.csv``.
If you encounter the following error, pass your access-authorized Hugging
Face token to the gated models.
* Throughput benchmark
.. code-block:: shell
Use this command to throughput the latency of the {{model.model}} model on eight GPUs with the ``{{model.precision}}`` data type.
OSError: You are trying to access a gated repo.
.. code-block:: shell
# pass your HF_TOKEN
export HF_TOKEN=$your_personal_hf_token
./vllm_benchmark_report.sh -s latency -m {{model.model_repo}} -g 8 -d {{model.precision}}
.. _vllm-benchmark-standalone-options:
Find the throughput report at ``./reports_{{model.precision}}_vllm_rocm{{unified_docker.rocm_version}}/summary/{{model.model_repo.split('/', 1)[1] if '/' in model.model_repo else model.model_repo}}_throughput_report.csv``.
Options and available models
----------------------------
.. raw:: html
.. list-table::
:header-rows: 1
:align: center
<style>
mjx-container[jax="CHTML"][display="true"] {
text-align: left;
margin: 0;
}
</style>
* - Name
- Options
- Description
.. note::
* - ``$test_option``
- latency
- Measure decoding token latency
Throughput is calculated as:
* -
- throughput
- Measure token generation throughput
- .. math:: throughput\_tot = requests \times (\mathsf{\text{input lengths}} + \mathsf{\text{output lengths}}) / elapsed\_time
* -
- all
- Measure both throughput and latency
- .. math:: throughput\_gen = requests \times \mathsf{\text{output lengths}} / elapsed\_time
{% endfor %}
{% endfor %}
* - ``$model_repo``
- ``meta-llama/Llama-3.1-8B-Instruct``
- `Llama 3.1 8B <https://huggingface.co/meta-llama/Llama-3.1-8B>`_
* - (``float16``)
- ``meta-llama/Llama-3.1-70B-Instruct``
- `Llama 3.1 70B <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
* -
- ``meta-llama/Llama-3.1-405B-Instruct``
- `Llama 3.1 405B <https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct>`_
* -
- ``meta-llama/Llama-3.2-11B-Vision-Instruct``
- `Llama 3.2 11B Vision <https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct>`_
* -
- ``meta-llama/Llama-2-7b-chat-hf``
- `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-7b-chat-hf>`_
* -
- ``meta-llama/Llama-2-70b-chat-hf``
- `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-70b-chat-hf>`_
* -
- ``mistralai/Mixtral-8x7B-Instruct-v0.1``
- `Mixtral MoE 8x7B <https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1>`_
* -
- ``mistralai/Mixtral-8x22B-Instruct-v0.1``
- `Mixtral MoE 8x22B <https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1>`_
* -
- ``mistralai/Mistral-7B-Instruct-v0.3``
- `Mistral 7B <https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3>`_
* -
- ``Qwen/Qwen2-7B-Instruct``
- `Qwen2 7B <https://huggingface.co/Qwen/Qwen2-7B-Instruct>`_
* -
- ``Qwen/Qwen2-72B-Instruct``
- `Qwen2 72B <https://huggingface.co/Qwen/Qwen2-72B-Instruct>`_
* -
- ``core42/jais-13b-chat``
- `JAIS 13B <https://huggingface.co/core42/jais-13b-chat>`_
* -
- ``core42/jais-30b-chat-v3``
- `JAIS 30B <https://huggingface.co/core42/jais-30b-chat-v3>`_
* -
- ``databricks/dbrx-instruct``
- `DBRX Instruct <https://huggingface.co/databricks/dbrx-instruct>`_
* -
- ``google/gemma-2-27b``
- `Gemma 2 27B <https://huggingface.co/google/gemma-2-27b>`_
* -
- ``CohereForAI/c4ai-command-r-plus-08-2024``
- `C4AI Command R+ 08-2024 <https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024>`_
* -
- ``deepseek-ai/deepseek-moe-16b-chat``
- `DeepSeek MoE 16B <https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat>`_
* - ``$model_repo``
- ``amd/Llama-3.1-70B-Instruct-FP8-KV``
- `Llama 3.1 70B FP8 <https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV>`_
* - (``float8``)
- ``amd/Llama-3.1-405B-Instruct-FP8-KV``
- `Llama 3.1 405B FP8 <https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV>`_
* -
- ``amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV``
- `Mixtral MoE 8x7B FP8 <https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV>`_
* -
- ``amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV``
- `Mixtral MoE 8x22B FP8 <https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV>`_
* -
- ``amd/Mistral-7B-v0.1-FP8-KV``
- `Mistral 7B FP8 <https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV>`_
* -
- ``amd/dbrx-instruct-FP8-KV``
- `DBRX Instruct FP8 <https://huggingface.co/amd/dbrx-instruct-FP8-KV>`_
* -
- ``amd/c4ai-command-r-plus-FP8-KV``
- `C4AI Command R+ 08-2024 FP8 <https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV>`_
* - ``$num_gpu``
- 1 or 8
- Number of GPUs
* - ``$datatype``
- ``float16`` or ``float8``
- Data type
.. _vllm-benchmark-run-benchmark:
Running the benchmark on the MI300X accelerator
-----------------------------------------------
Here are some examples of running the benchmark with various options.
See :ref:`Options <vllm-benchmark-standalone-options>` for the list of
options and their descriptions.
Example 1: latency benchmark
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use this command to benchmark the latency of the Llama 3.1 70B model on eight GPUs with the ``float16`` and ``float8`` data types.
.. code-block::
./vllm_benchmark_report.sh -s latency -m meta-llama/Llama-3.1-70B-Instruct -g 8 -d float16
./vllm_benchmark_report.sh -s latency -m amd/Llama-3.1-70B-Instruct-FP8-KV -g 8 -d float8
Find the latency reports at:
- ``./reports_float16/summary/Llama-3.1-70B-Instruct_latency_report.csv``
- ``./reports_float8/summary/Llama-3.1-70B-Instruct-FP8-KV_latency_report.csv``
Example 2: throughput benchmark
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use this command to benchmark the throughput of the Llama 3.1 70B model on eight GPUs with the ``float16`` and ``float8`` data types.
.. code-block:: shell
./vllm_benchmark_report.sh -s throughput -m meta-llama/Llama-3.1-70B-Instruct -g 8 -d float16
./vllm_benchmark_report.sh -s throughput -m amd/Llama-3.1-70B-Instruct-FP8-KV -g 8 -d float8
Find the throughput reports at:
- ``./reports_float16/summary/Llama-3.1-70B-Instruct_throughput_report.csv``
- ``./reports_float8/summary/Llama-3.1-70B-Instruct-FP8-KV_throughput_report.csv``
.. raw:: html
<style>
mjx-container[jax="CHTML"][display="true"] {
text-align: left;
margin: 0;
}
</style>
.. note::
Throughput is calculated as:
- .. math:: throughput\_tot = requests \times (\mathsf{\text{input lengths}} + \mathsf{\text{output lengths}}) / elapsed\_time
- .. math:: throughput\_gen = requests \times \mathsf{\text{output lengths}} / elapsed\_time
Further reading
===============
@@ -290,3 +446,33 @@ Further reading
- To learn how to fine-tune LLMs, see
:doc:`Fine-tuning LLMs <../fine-tuning/index>`.
Previous versions
=================
This table lists previous versions of the ROCm vLLM Docker image for inference
performance validation. For detailed information about available models for
benchmarking, see the version-specific documentation.
.. list-table::
:header-rows: 1
:stub-columns: 1
* - ROCm version
- vLLM version
- PyTorch version
- Resources
* - 6.2.1
- 0.6.4
- 2.5.0
-
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4/images/sha256-ccbb74cc9e7adecb8f7bdab9555f7ac6fc73adb580836c2a35ca96ff471890d8>`_
* - 6.2.0
- 0.4.3
- 2.4.0
-
* `Documentation <https://rocm.docs.amd.com/en/docs-6.2.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.2_mi300_ubuntu22.04_py3.9_vllm_7c5fd50/images/sha256-9e4dd4788a794c3d346d7d0ba452ae5e92d39b8dfac438b2af8efdc7f15d22c0>`_

View File

@@ -1,547 +0,0 @@
:orphan:
.. meta::
:description: How to train a model using Megatron-LM for ROCm.
:keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch
******************************************
Training a model with Megatron-LM for ROCm
******************************************
The Megatron-LM framework for ROCm is a specialized fork of the robust Megatron-LM,
designed to enable efficient training of large-scale language models on AMD
GPUs. By leveraging AMD Instinct™ MI300X series accelerators, Megatron-LM delivers
enhanced scalability, performance, and resource utilization for AI workloads.
It is purpose-built to support models like Llama 2, Llama 3, Llama 3.1, and
DeepSeek, enabling developers to train next-generation AI models more
efficiently. See the GitHub repository at `<https://github.com/ROCm/Megatron-LM>`__.
AMD provides a ready-to-use Docker image for MI300X accelerators containing
essential components, including PyTorch, ROCm libraries, and Megatron-LM
utilities. It contains the following software components to accelerate training
workloads:
+--------------------------+--------------------------------+
| Software component | Version |
+==========================+================================+
| ROCm | 6.3.0 |
+--------------------------+--------------------------------+
| PyTorch | 2.7.0a0+git637433 |
+--------------------------+--------------------------------+
| Python | 3.10 |
+--------------------------+--------------------------------+
| Transformer Engine | 1.11 |
+--------------------------+--------------------------------+
| Flash Attention | 3.0.0 |
+--------------------------+--------------------------------+
| hipBLASLt | git258a2162 |
+--------------------------+--------------------------------+
| Triton | 3.1 |
+--------------------------+--------------------------------+
Supported features and models
=============================
Megatron-LM provides the following key features to train large language models efficiently:
- Transformer Engine (TE)
- APEX
- GEMM tuning
- Torch.compile
- 3D parallelism: TP + SP + CP
- Distributed optimizer
- Flash Attention (FA) 3
- Fused kernels
- Pre-training
.. _amd-megatron-lm-model-support:
The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator.
* Llama 2 7B
* Llama 2 70B
* Llama 3 8B
* Llama 3 70B
* Llama 3.1 8B
* Llama 3.1 70B
* DeepSeek-V2-Lite
.. note::
Some models, such as Llama 3, require an external license agreement through
a third party (for example, Meta).
System validation
=================
If you have already validated your system settings, skip this step. Otherwise,
complete the :ref:`system validation and optimization steps <train-a-model-system-validation>`
to set up your system before starting training.
Disable NUMA auto-balancing
---------------------------
Generally, application performance can benefit from disabling NUMA auto-balancing. However,
it might be detrimental to performance with certain types of workloads.
Run the command ``cat /proc/sys/kernel/numa_balancing`` to check your current NUMA (Non-Uniform
Memory Access) settings. Output ``0`` indicates this setting is disabled. If there is no output or
the output is ``1``, run the following command to disable NUMA auto-balancing.
.. code-block:: shell
sudo sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
See :ref:`mi300x-disable-numa` for more information.
.. _mi300x-amd-megatron-lm-training:
Environment setup
=================
The pre-built ROCm Megatron-LM environment allows users to quickly validate system performance, conduct
training benchmarks, and achieve superior performance for models like Llama 3.1, Llama 2, and DeepSeek V2.
Use the following instructions to set up the environment, configure the script to train models, and
reproduce the benchmark results on the MI300X accelerators with the AMD Megatron-LM Docker
image.
.. _amd-megatron-lm-requirements:
Download the Docker image
-------------------------
1. Use the following command to pull the Docker image from Docker Hub.
.. code-block:: shell
docker pull rocm/megatron-lm:v25.3
2. Launch the Docker container.
.. code-block:: shell
docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME:$HOME -v $HOME/.ssh:/root/.ssh --shm-size 64G --name megatron_training_env rocm/megatron-lm:v25.3
3. Use these commands if you exit the ``megatron_training_env`` container and need to return to it.
.. code-block:: shell
docker start megatron_training_env
docker exec -it megatron_training_env bash
The Docker container includes a pre-installed, verified version of Megatron-LM from the `release branch <https://github.com/ROCm/Megatron-LM/tree/megatron_release_v25.3>`_.
.. _amd-megatron-lm-environment-setup:
Configuration scripts
---------------------
.. tab-set::
.. tab-item:: Llama
:sync: llama
If you're working with Llama 2 7B or Llama 2 70 B, use the ``train_llama2.sh`` configuration
script in the ``examples/llama`` directory of
`<https://github.com/ROCm/Megatron-LM/tree/megatron_release_v25.3/examples/llama>`__.
Likewise, if you're working with Llama 3 or Llama 3.1, then use ``train_llama3.sh`` and update
the configuration script accordingly.
.. tab-item:: DeepSeek V2
:sync: deepseek
Use the ``train_deepseek_v2.sh`` configuration script in the ``examples/deepseek_v2``
directory of
`<https://github.com/ROCm/Megatron-LM/tree/megatron_release_v25.3/examples/deepseek_v2>`__
and update the configuration script accordingly.
Network interface
^^^^^^^^^^^^^^^^^
.. tab-set::
.. tab-item:: Llama
:sync: llama
To avoid connectivity issues in multi-node deployments, ensure the correct network interface
is set in your training scripts.
1. Run the following command (outside the container) to find the active network interface on your system.
.. code-block:: shell
ip a
2. Update the ``NCCL_SOCKET_IFNAME`` and ``GLOO_SOCKET_IFNAME`` variables with your systems network interface. For
example:
.. code-block:: shell
export NCCL_SOCKET_IFNAME=ens50f0np0
export GLOO_SOCKET_IFNAME=ens50f0np0
Dataset options
^^^^^^^^^^^^^^^
.. tab-set::
.. tab-item:: Llama
:sync: llama
You can use either mock data or real data for training.
* Mock data can be useful for testing and validation. Use the ``MOCK_DATA`` variable to toggle between mock and real data. The default
value is ``1`` for enabled.
.. code-block:: bash
MOCK_DATA=1
* If you're using a real dataset, update the ``DATA_PATH`` variable to point to the location of your dataset.
.. code-block:: bash
MOCK_DATA=0
DATA_PATH=${DATA_PATH:-"/data/bookcorpus_text_sentence"} # Change to where your dataset is stored
Ensure that the files are accessible inside the Docker container.
.. tab-item:: DeepSeek V2
:sync: deepseek
If you don't already have the dataset, download the DeepSeek dataset using the following
commands:
.. code-block:: shell
mkdir deepseek-datasets
cd deepseek-datasets
wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/SlimPajama.json
wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/alpaca_zh-train.json
wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/alpaca_zh-valid.json
wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/mmap_deepseekv2_datasets_text_document.bin
wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/deepseek-datasets/mmap_deepseekv2_datasets_text_document.idx
You can use either mock data or real data for training.
* Mock data can be useful for testing and validation. Use the ``MOCK_DATA`` variable to toggle between mock and real data. The default
value is ``1`` for enabled.
.. code-block:: bash
MOCK_DATA=1
* If you're using a real dataset, update the ``DATA_DIR`` variable to point to the location of your dataset.
.. code-block:: bash
MOCK_DATA=0
DATA_DIR="/root/data/deepseek-datasets" # Change to where your dataset is stored
Ensure that the files are accessible inside the Docker container.
Tokenizer
^^^^^^^^^
Tokenization is the process of converting raw text into tokens that can be processed by the model. For Llama
models, this typically involves sub-word tokenization, where words are broken down into smaller units based on
a fixed vocabulary. The tokenizer is trained along with the model on a large corpus of text, and it learns a
fixed vocabulary that can represent a wide range of text from different domains. This allows Llama models to
handle a variety of input sequences, including unseen words or domain-specific terms.
.. tab-set::
.. tab-item:: Llama
:sync: llama
To train any of the Llama 2 models that :ref:`this Docker image supports <amd-megatron-lm-model-support>`, use the ``Llama2Tokenizer``.
To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``.
Set the Hugging Face model link in the ``TOKENIZER_MODEL`` variable.
For example, if you're using the Llama 3.1 8B model:
.. code-block:: shell
TOKENIZER_MODEL=meta-llama/Llama-3.1-8B
.. tab-item:: DeepSeek V2
:sync: deepseek
To train any of the DeepSeek V2 models that :ref:`this Docker image supports <amd-megatron-lm-model-support>`, use the ``DeepSeekV2Tokenizer``.
Multi-node training
^^^^^^^^^^^^^^^^^^^
.. tab-set::
.. tab-item:: Llama
:sync: llama
If you're running multi-node training, update the following environment variables. They can
also be passed as command line arguments.
* Change ``localhost`` to the master node's hostname:
.. code-block:: shell
MASTER_ADDR="${MASTER_ADDR:-localhost}"
* Set the number of nodes you want to train on (for instance, ``2``, ``4``, ``8``):
.. code-block:: shell
NNODES="${NNODES:-1}"
* Set the rank of each node (0 for master, 1 for the first worker node, and so on):
.. code-block:: shell
NODE_RANK="${NODE_RANK:-0}"
* Set ``DATA_CACHE_PATH`` to a common directory accessible by all the nodes (for example, an
NFS directory) for multi-node runs:
.. code-block:: shell
DATA_CACHE_PATH=/root/cache # Set to a common directory for multi-node runs
* For multi-node runs, make sure the correct network drivers are installed on the nodes. If
inside a Docker, either install the drivers inside the Docker container or pass the network
drivers from the host while creating the Docker container.
Start training on AMD Instinct accelerators
===========================================
The prebuilt Megatron-LM with ROCm training environment allows users to quickly validate
system performance, conduct training benchmarks, and achieve superior
performance for models like Llama 3.1 and Llama 2. This container should not be
expected to provide generalized performance across all training workloads. You
can expect the container to perform in the model configurations described in
the following section, but other configurations are not validated by AMD.
Use the following instructions to set up the environment, configure the script
to train models, and reproduce the benchmark results on MI300X series
accelerators with the AMD Megatron-LM Docker image.
.. tab-set::
.. tab-item:: Llama
:sync: llama
.. tab-set::
.. tab-item:: Single node training
:sync: single-node
To run training on a single node, navigate to the Megatron-LM folder and use the
following command:
.. code-block:: shell
TEE_OUTPUT=1 MBS=2 BS=128 TP=1 TE_FP8=1 SEQ_LENGTH=8192 MODEL_SIZE=8 bash examples/llama/train_llama3.sh
.. tab-item:: Multi-node training
:sync: multi-node
To run training on multiple nodes, launch the Docker container on each node. For example, for a two node setup (``NODE0`` as the master node), use these commands.
* On the master node ``NODE0``:
.. code-block:: shell
TEE_OUTPUT=1 MBS=2 BS=256 TP=1 TE_FP8=1 SEQ_LENGTH=8192 MODEL_SIZE=8 MASTER_ADDR=IP_NODE0 NNODES=2 NODE_RANK=0 bash examples/llama/train_llama3.sh
* On the worker node ``NODE1``:
.. code-block:: shell
TEE_OUTPUT=1 MBS=2 BS=256 TP=1 TE_FP8=1 SEQ_LENGTH=8192 MODEL_SIZE=8 MASTER_ADDR=IP_NODE0 NNODES=2 NODE_RANK=1 bash examples/llama/train_llama3.sh
.. tab-item:: DeepSeek V2
:sync: deepseek
To run the training on a single node, go to ``/Megatron-LM`` folder and use the following command:
.. code-block:: shell
cd /workspace/Megatron-LM
GEMM_TUNING=1 PR=bf16 MBS=4 AC=none bash examples/deepseek_v2/train_deepseekv2.sh
Key options
-----------
.. _amd-megatron-lm-benchmark-test-vars:
The benchmark tests support the following sets of variables:
.. tab-set::
.. tab-item:: Llama
:sync: llama
``TEE_OUTPUT``
``1`` to enable training logs or ``0`` to disable.
``TE_FP8``
``0`` for BP16 (default) or ``1`` for FP8 GEMMs.
``GEMM_TUNING``
``1`` to enable GEMM tuning, which boosts performance by using the best GEMM kernels.
``USE_FLASH_ATTN``
``1`` to enable Flash Attention.
``ENABLE_PROFILING``
``1`` to enable PyTorch profiling for performance analysis.
``transformer-impl``
``transformer_engine`` to use the Transformer Engine (TE) or ``local`` to disable TE.
``MODEL_SIZE``
``8B`` or ``70B`` for Llama 3 and 3.1. ``7B`` or ``70B`` for Llama 2.
``TOTAL_ITERS``
The total number of iterations -- ``10`` by default.
``MOCK_DATA``
``1`` to use mock data or ``0`` to use real data provided by you.
``MBS``
Micro batch size.
``BS``
Global batch size.
``TP``
Tensor parallel (``1``, ``2``, ``4``, ``8``).
``SEQ_LENGTH``
Input sequence length.
.. tab-item:: DeepSeek V2
:sync: deepseek
``PR``
Precision for training. ``bf16`` for BF16 (default) or ``fp8`` for FP8 GEMMs.
``GEMM_TUNING``
``1`` to enable GEMM tuning, which boosts performance by using the best GEMM kernels.
``TOTAL_ITERS``
The total number of iterations -- ``10`` by default.
``MOCK_DATA``
``1`` to use mock data or ``0`` to use real data provided by you.
``MBS``
Micro batch size.
``GBS``
Global batch size.
Benchmarking examples
---------------------
.. tab-set::
.. tab-item:: Llama
:sync: llama
.. tab-set::
.. tab-item:: Single node training
:sync: single-node
Use this command to run training with Llama 2 7B model on a single node. You can specify MBS, BS, FP,
datatype, and so on.
.. code-block:: bash
TEE_OUTPUT=1 MBS=5 BS=120 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
See the sample output:
.. image:: ../../../../data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png
:width: 800
.. tab-item:: Multi-node training
:sync: multi-node
Launch the Docker container on each node.
In this example, run training with Llama 2 7B model on 2 nodes with specific MBS, BS, FP, datatype, and
so on.
On the master node:
.. code-block:: bash
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
On the worker node:
.. code-block:: bash
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
Sample output for 2-node training:
Master node:
.. image:: ../../../../data/how-to/rocm-for-ai/2-node-training-master.png
:width: 800
Worker node:
.. image:: ../../../../data/how-to/rocm-for-ai/2-node-training-worker.png
:width: 800
Previous versions
=================
This table lists previous versions of the ROCm Megatron-LM Docker image for training
performance validation. For detailed information about available models for
benchmarking, see the version-specific documentation.
.. list-table::
:header-rows: 1
:stub-columns: 1
* - ROCm version
- Megatron-LM version
- PyTorch version
- Resources
* - 6.1
- 24.12-dev
- 2.4.0
-
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.0/how-to/rocm-for-ai/train-a-model.html>`_
* `Docker Hub <https://hub.docker.com/layers/rocm/megatron-lm/24.12-dev/images/sha256-5818c50334ce3d69deeeb8f589d83ec29003817da34158ebc9e2d112b929bf2e>`_

View File

@@ -1,341 +0,0 @@
:orphan:
.. meta::
:description: How to train a model using PyTorch for ROCm.
:keywords: ROCm, AI, LLM, train, PyTorch, torch, Llama, flux, tutorial, docker
**************************************
Training a model with PyTorch for ROCm
**************************************
PyTorch is an open-source machine learning framework that is widely used for
model training with GPU-optimized components for transformer-based models.
The PyTorch for ROCm training Docker (``rocm/pytorch-training:v25.3``) image
provides a prebuilt optimized environment for fine-tuning and pretraining a
model on AMD Instinct MI325X and MI300X accelerators. It includes the following
software components to accelerate training workloads:
+--------------------------+--------------------------------+
| Software component | Version |
+==========================+================================+
| ROCm | 6.3.0 |
+--------------------------+--------------------------------+
| PyTorch | 2.7.0a0+git637433 |
+--------------------------+--------------------------------+
| Python | 3.10 |
+--------------------------+--------------------------------+
| Transformer Engine | 1.11 |
+--------------------------+--------------------------------+
| Flash Attention | 3.0.0 |
+--------------------------+--------------------------------+
| hipBLASLt | git258a2162 |
+--------------------------+--------------------------------+
| Triton | 3.1 |
+--------------------------+--------------------------------+
.. _amd-pytorch-training-model-support:
Supported models
================
The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator.
* Llama 3.1 8B
* Llama 3.1 70B
* FLUX.1-dev
.. note::
Only these models are supported in the following steps.
Some models, such as Llama 3, require an external license agreement through
a third party (for example, Meta).
System validation
=================
If you have already validated your system settings, skip this step. Otherwise,
complete the :ref:`system validation and optimization steps <train-a-model-system-validation>`
to set up your system before starting training.
Disable NUMA auto-balancing
---------------------------
Generally, application performance can benefit from disabling NUMA auto-balancing. However,
it might be detrimental to performance with certain types of workloads.
Run the command ``cat /proc/sys/kernel/numa_balancing`` to check your current NUMA (Non-Uniform
Memory Access) settings. Output ``0`` indicates this setting is disabled. If there is no output or
the output is ``1``, run the following command to disable NUMA auto-balancing.
.. code-block:: shell
sudo sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
See :ref:`mi300x-disable-numa` for more information.
Environment setup
=================
This Docker image is optimized for specific model configurations outlined
below. Performance can vary for other training workloads, as AMD
doesnt validate configurations and run conditions outside those described.
Download the Docker image
-------------------------
1. Use the following command to pull the Docker image from Docker Hub.
.. code-block:: shell
docker pull rocm/pytorch-training:v25.3
2. Run the Docker container.
.. code-block:: shell
docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME:$HOME -v $HOME/.ssh:/root/.ssh --shm-size 64G --name training_env rocm/pytorch-training:v25.3
3. Use these commands if you exit the ``training_env`` container and need to return to it.
.. code-block:: shell
docker start training_env
docker exec -it training_env bash
4. In the Docker container, clone the `<https://github.com/ROCm/MAD>`__ repository and navigate to the benchmark scripts directory.
.. code-block:: shell
git clone https://github.com/ROCm/MAD
cd MAD/scripts/pytorch-train
Prepare training datasets and dependencies
------------------------------------------
The following benchmarking examples may require downloading models and datasets
from Hugging Face. To ensure successful access to gated repos, set your
``HF_TOKEN``.
Run the setup script to install libraries and datasets needed for benchmarking.
.. code-block:: shell
./pytorch_benchmark_setup.sh
``pytorch_benchmark_setup.sh`` installs the following libraries:
.. list-table::
:header-rows: 1
* - Library
- Benchmark model
- Reference
* - ``accelerate``
- Llama 3.1 8B, FLUX
- `Hugging Face Accelerate <https://huggingface.co/docs/accelerate/en/index>`_
* - ``datasets``
- Llama 3.1 8B, 70B, FLUX
- `Hugging Face Datasets <https://huggingface.co/docs/datasets/v3.2.0/en/index>`_ 3.2.0
* - ``torchdata``
- Llama 3.1 70B
- `TorchData <https://pytorch.org/data/beta/index.html>`_
* - ``tomli``
- Llama 3.1 70B
- `Tomli <https://pypi.org/project/tomli/>`_
* - ``tiktoken``
- Llama 3.1 70B
- `tiktoken <https://github.com/openai/tiktoken>`_
* - ``blobfile``
- Llama 3.1 70B
- `blobfile <https://pypi.org/project/blobfile/>`_
* - ``tabulate``
- Llama 3.1 70B
- `tabulate <https://pypi.org/project/tabulate/>`_
* - ``wandb``
- Llama 3.1 70B
- `Weights & Biases <https://github.com/wandb/wandb>`_
* - ``sentencepiece``
- Llama 3.1 70B, FLUX
- `SentencePiece <https://github.com/google/sentencepiece>`_ 0.2.0
* - ``tensorboard``
- Llama 3.1 70 B, FLUX
- `TensorBoard <https://www.tensorflow.org/tensorboard>`_ 2.18.0
* - ``csvkit``
- FLUX
- `csvkit <https://csvkit.readthedocs.io/en/latest/>`_ 2.0.1
* - ``deepspeed``
- FLUX
- `DeepSpeed <https://github.com/deepspeedai/DeepSpeed>`_ 0.16.2
* - ``diffusers``
- FLUX
- `Hugging Face Diffusers <https://huggingface.co/docs/diffusers/en/index>`_ 0.31.0
* - ``GitPython``
- FLUX
- `GitPython <https://github.com/gitpython-developers/GitPython>`_ 3.1.44
* - ``opencv-python-headless``
- FLUX
- `opencv-python-headless <https://pypi.org/project/opencv-python-headless/>`_ 4.10.0.84
* - ``peft``
- FLUX
- `PEFT <https://huggingface.co/docs/peft/en/index>`_ 0.14.0
* - ``protobuf``
- FLUX
- `Protocol Buffers <https://github.com/protocolbuffers/protobuf>`_ 5.29.2
* - ``pytest``
- FLUX
- `PyTest <https://docs.pytest.org/en/stable/>`_ 8.3.4
* - ``python-dotenv``
- FLUX
- `python-dotenv <https://pypi.org/project/python-dotenv/>`_ 1.0.1
* - ``seaborn``
- FLUX
- `Seaborn <https://seaborn.pydata.org/>`_ 0.13.2
* - ``transformers``
- FLUX
- `Transformers <https://huggingface.co/docs/transformers/en/index>`_ 4.47.0
``pytorch_benchmark_setup.sh`` downloads the following models from Hugging Face:
* `meta-llama/Llama-3.1-70B-Instruct <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
* `black-forest-labs/FLUX.1-dev <https://huggingface.co/black-forest-labs/FLUX.1-dev>`_
Along with the following datasets:
* `WikiText <https://huggingface.co/datasets/Salesforce/wikitext>`_
* `bghira/pseudo-camera-10k <https://huggingface.co/datasets/bghira/pseudo-camera-10k>`_
Start training on AMD Instinct accelerators
===========================================
The prebuilt PyTorch with ROCm training environment allows users to quickly validate
system performance, conduct training benchmarks, and achieve superior
performance for models like Llama 3.1 and Llama 2. This container should not be
expected to provide generalized performance across all training workloads. You
can expect the container to perform in the model configurations described in
the following section, but other configurations are not validated by AMD.
Use the following instructions to set up the environment, configure the script
to train models, and reproduce the benchmark results on MI300X series
accelerators with the AMD PyTorch training Docker image.
Once your environment is set up, use the following commands and examples to start benchmarking.
Pretraining
-----------
To start the pretraining benchmark, use the following command with the
appropriate options. See the following list of options and their descriptions.
.. code-block:: shell
./pytorch_benchmark_report.sh -t $training_mode -m $model_repo -p $datatype -s $sequence_length
Options and available models
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. list-table::
:header-rows: 1
* - Name
- Options
- Description
* - ``$training_mode``
- ``pretrain``
- Benchmark pretraining
* -
- ``finetune_fw``
- Benchmark full weight fine-tuning (Llama 3.1 70B with BF16)
* -
- ``finetune_lora``
- Benchmark LoRA fine-tuning (Llama 3.1 70B with BF16)
* - ``$datatype``
- FP8 or BF16
- Only Llama 3.1 8B supports FP8 precision.
* - ``$model_repo``
- Llama-3.1-8B
- `Llama 3.1 8B <https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct>`_
* -
- Llama-3.1-70B
- `Llama 3.1 70B <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
* -
- Flux
- `FLUX.1 [dev] <https://huggingface.co/black-forest-labs/FLUX.1-dev>`_
Fine-tuning
-----------
To start the fine-tuning benchmark, use the following command. It will run the benchmarking example of Llama 2 70B
with the WikiText dataset using the AMD fork of `torchtune <https://github.com/AMD-AIG-AIMA/torchtune>`_.
.. code-block:: shell
./pytorch_benchmark_report.sh -t {finetune_fw, finetune_lora} -p BF16 -m Llama-3.1-70B
Benchmarking examples
---------------------
Here are some examples of how to use the command.
* Example 1: Llama 3.1 70B with BF16 precision with `torchtitan <https://github.com/ROCm/torchtitan>`_.
.. code-block:: shell
./pytorch_benchmark_report.sh -t pretrain -p BF16 -m Llama-3.1-70B -s 8192
* Example 2: Llama 3.1 8B with FP8 precision using Transformer Engine (TE) and Hugging Face Accelerator.
.. code-block:: shell
./pytorch_benchmark_report.sh -t pretrain -p FP8 -m Llama-3.1-70B -s 8192
* Example 3: FLUX.1-dev with BF16 precision with FluxBenchmark.
.. code-block:: shell
./pytorch_benchmark_report.sh -t pretrain -p BF16 -m Flux
* Example 4: Torchtune full weight fine-tuning with Llama 3.1 70B
.. code-block:: shell
./pytorch_benchmark_report.sh -t finetune_fw -p BF16 -m Llama-3.1-70B
* Example 5: Torchtune LoRA fine-tuning with Llama 3.1 70B
.. code-block:: shell
./pytorch_benchmark_report.sh -t finetune_lora -p BF16 -m Llama-3.1-70B

View File

@@ -19,10 +19,6 @@ training, fine-tuning, and inference. It leverages popular machine learning fram
In this guide, you'll learn about:
- Training a model
- :doc:`Training a model <train-a-model>`
- :doc:`Train a model with Megatron-LM <benchmark-docker/megatron-lm>`
- :doc:`Train a model with PyTorch <benchmark-docker/pytorch-training>`
- :doc:`Scaling model training <scale-model-training>`
- :doc:`Scale model training <scale-model-training>`

View File

@@ -1,130 +0,0 @@
:orphan:
.. meta::
:description: Prerequisite system validation before using ROCm for AI.
:keywords: ROCm, AI, LLM, train, megatron, Llama, tutorial, docker, torch, pytorch, jax
.. _train-a-model-system-validation:
**********************************************
Prerequisite system validation before training
**********************************************
Complete the following system validation and optimization steps to set up your system before starting training.
Disable NUMA auto-balancing
---------------------------
Generally, application performance can benefit from disabling NUMA auto-balancing. However,
it might be detrimental to performance with certain types of workloads.
Run the command ``cat /proc/sys/kernel/numa_balancing`` to check your current NUMA (Non-Uniform
Memory Access) settings. Output ``0`` indicates this setting is disabled. If there is no output or
the output is ``1``, run the following command to disable NUMA auto-balancing.
.. code-block:: shell
sudo sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
See :ref:`mi300x-disable-numa` for more information.
Hardware verification with ROCm
-------------------------------
Use the command ``rocm-smi --setperfdeterminism 1900`` to set the max clock speed up to 1900 MHz
instead of the default 2100 MHz. This can reduce the chance of a PCC event lowering the attainable
GPU clocks. This setting will not be required for new IFWI releases with the production PRC feature.
You can restore this setting to its default value with the ``rocm-smi -r`` command.
Run the command:
.. code-block:: shell
rocm-smi --setperfdeterminism 1900
See :ref:`mi300x-hardware-verification-with-rocm` for more information.
RCCL Bandwidth Test for multi-node setups
-----------------------------------------
ROCm Collective Communications Library (RCCL) is a standalone library of standard collective communication
routines for GPUs. See the :doc:`RCCL documentation <rccl:index>` for more information. Before starting
pretraining, running a RCCL bandwidth test helps ensure that the multi-GPU or multi-node setup is optimized
for efficient distributed training.
Running the RCCL bandwidth test helps verify that:
- The GPUs can communicate across nodes or within a single node.
- The interconnect (such as InfiniBand, Ethernet, or Infinite fabric) is functioning as expected and
provides adequate bandwidth for communication.
- No hardware setup or cabling issues could affect the communication between GPUs
Tuning and optimizing hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In distributed training, specific hyperparameters related to distributed communication can be tuned based on
the results of the RCCL bandwidth test. These variables are already set in the Docker image:
.. code-block:: shell
# force all RCCL streams to be high priority
export TORCH_NCCL_HIGH_PRIORITY=1
# specify which RDMA interfaces to use for communication
export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7
# define the Global ID index used in RoCE mode
export NCCL_IB_GID_INDEX=3
# avoid data corruption/mismatch issue that existed in past releases
export RCCL_MSCCL_ENABLE=0
Running the RCCL Bandwidth Test
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's recommended you run the RCCL bandwidth test before launching training. It ensures system
performance is sufficient to launch training. RCCL is not included in the AMD Megatron-LM Docker
image; follow the instructions in `<https://github.com/ROCm/rccl-tests>`__ to get started.
See :ref:`mi300x-rccl` for more information.
Run on 8 GPUs (``-g 8``), scanning from 8 bytes to 10 GB:
.. code-block:: shell
./build/all_reduce_perf -b 8 -e 10G -f 2 -g 8
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-8-gpu.png
:width: 800
Using one MPI process per GPU and ``-g 1`` for performance-oriented runs on both single-node and multi-node is
recommended. So, a run on 8 GPUs looks something like:
.. code-block:: shell
mpirun -np 8 --bind-to numa ./build/all_reduce_perf -b 8 -e 10G -f 2 -g 1
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png
:width: 800
Running with one MPI process per GPU ensures a one-to-one mapping for CPUs and GPUs, which can be beneficial
for smaller message sizes. This better represents the real-world use of RCCL in deep learning frameworks like
PyTorch and TensorFlow.
Use the following script to run the RCCL test for four MI300X GPU nodes. Modify paths and node addresses as needed.
.. code-block::
/home/$USER/ompi_for_gpu/ompi/bin/mpirun -np 32 -H tw022:8,tw024:8,tw010:8, tw015:8 \
--mca pml ucx \
--mca btl ^openib \
-x NCCL_SOCKET_IFNAME=ens50f0np0 \
-x NCCL_IB_HCA=rdma0:1,rdma1:1,rdma2:1,rdma3:1,rdma4:1,rdma5:1,rdma6:1,rdma7:1 \
-x NCCL_IB_GID_INDEX=3 \
-x NCCL_MIN_NCHANNELS=40 \
-x NCCL_DEBUG=version \
$HOME/rccl-tests/build/all_reduce_perf -b 8 -e 8g -f 2 -g 1
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png
:width: 800

View File

@@ -0,0 +1,503 @@
.. meta::
:description: How to train a model using ROCm Megatron-LM
:keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch
**************************************
Training a model with ROCm Megatron-LM
**************************************
.. _amd-megatron-lm:
The ROCm Megatron-LM framework is a specialized fork of the robust Megatron-LM, designed to
enable efficient training of large-scale language models on AMD GPUs. By leveraging AMD Instinct™ MI300X
accelerators, AMD Megatron-LM delivers enhanced scalability, performance, and resource utilization for AI
workloads. It is purpose-built to :ref:`support models <amd-megatron-lm-model-support>`
like Meta's Llama 2, Llama 3, and Llama 3.1, enabling developers to train next-generation AI models with greater
efficiency. See the GitHub repository at `<https://github.com/ROCm/Megatron-LM>`__.
For ease of use, AMD provides a ready-to-use Docker image for MI300X accelerators containing essential
components, including PyTorch, PyTorch Lightning, ROCm libraries, and Megatron-LM utilities. It contains the
following software to accelerate training workloads:
+--------------------------+--------------------------------+
| Software component | Version |
+==========================+================================+
| ROCm | 6.1 |
+--------------------------+--------------------------------+
| PyTorch | 2.4.0 |
+--------------------------+--------------------------------+
| PyTorch Lightning | 2.4.0 |
+--------------------------+--------------------------------+
| Megatron Core | 0.9.0 |
+--------------------------+--------------------------------+
| Transformer Engine | 1.5.0 |
+--------------------------+--------------------------------+
| Flash Attention | v2.6 |
+--------------------------+--------------------------------+
| Transformers | 4.44.0 |
+--------------------------+--------------------------------+
Supported features and models
=============================
Megatron-LM provides the following key features to train large language models efficiently:
- Transformer Engine (TE)
- APEX
- GEMM tuning
- Torch.compile
- 3D parallelism: TP + SP + CP
- Distributed optimizer
- Flash Attention (FA) 2
- Fused kernels
- Pre-training
.. _amd-megatron-lm-model-support:
The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator.
* Llama 2 7B
* Llama 2 70B
* Llama 3 8B
* Llama 3 70B
* Llama 3.1 8B
* Llama 3.1 70B
Prerequisite system validation steps
====================================
Complete the following system validation and optimization steps to set up your system before starting training.
Disable NUMA auto-balancing
---------------------------
Generally, application performance can benefit from disabling NUMA auto-balancing. However,
it might be detrimental to performance with certain types of workloads.
Run the command ``cat /proc/sys/kernel/numa_balancing`` to check your current NUMA (Non-Uniform
Memory Access) settings. Output ``0`` indicates this setting is disabled. If there is no output or
the output is ``1``, run the following command to disable NUMA auto-balancing.
.. code-block:: shell
sudo sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
See :ref:`mi300x-disable-numa` for more information.
Hardware verification with ROCm
-------------------------------
Use the command ``rocm-smi --setperfdeterminism 1900`` to set the max clock speed up to 1900 MHz
instead of the default 2100 MHz. This can reduce the chance of a PCC event lowering the attainable
GPU clocks. This setting will not be required for new IFWI releases with the production PRC feature.
You can restore this setting to its default value with the ``rocm-smi -r`` command.
Run the command:
.. code-block:: shell
rocm-smi --setperfdeterminism 1900
See :ref:`mi300x-hardware-verification-with-rocm` for more information.
RCCL Bandwidth Test
-------------------
ROCm Collective Communications Library (RCCL) is a standalone library of standard collective communication
routines for GPUs. See the :doc:`RCCL documentation <rccl:index>` for more information. Before starting
pre-training, running a RCCL bandwidth test helps ensure that the multi-GPU or multi-node setup is optimized
for efficient distributed training.
Running the RCCL bandwidth test helps verify that:
- The GPUs can communicate across nodes or within a single node.
- The interconnect (such as InfiniBand, Ethernet, or Infinite fabric) is functioning as expected and
provides adequate bandwidth for communication.
- No hardware setup or cabling issues could affect the communication between GPUs
Tuning and optimizing hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In distributed training, specific hyperparameters related to distributed communication can be tuned based on
the results of the RCCL bandwidth test. These variables are already set in the Docker image:
.. code-block:: shell
# force all RCCL streams to be high priority
export TORCH_NCCL_HIGH_PRIORITY=1
# specify which RDMA interfaces to use for communication
export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7
# define the Global ID index used in RoCE mode
export NCCL_IB_GID_INDEX=3
# avoid data corruption/mismatch issue that existed in past releases
export RCCL_MSCCL_ENABLE=0
Running the RCCL Bandwidth Test
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's recommended you run the RCCL bandwidth test before launching training. It ensures system
performance is sufficient to launch training. RCCL is not included in the AMD Megatron-LM Docker
image; follow the instructions in `<https://github.com/ROCm/rccl-tests>`__ to get started.
See :ref:`mi300x-rccl` for more information.
Run on 8 GPUs (``-g 8``), scanning from 8 bytes to 10 GB:
.. code-block:: shell
./build/all_reduce_perf -b 8 -e 10G -f 2 -g 8
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-8-gpu.png
:width: 800
Using one MPI process per GPU and ``-g 1`` for performance-oriented runs on both single-node and multi-node is
recommended. So, a run on 8 GPUs looks something like:
.. code-block:: shell
mpirun -np 8 --bind-to numa ./build/all_reduce_perf -b 8 -e 10G -f 2 -g 1
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png
:width: 800
Running with one MPI process per GPU ensures a one-to-one mapping for CPUs and GPUs, which can be beneficial
for smaller message sizes. This better represents the real-world use of RCCL in deep learning frameworks like
PyTorch and TensorFlow.
Use the following script to run the RCCL test for four MI300X GPU nodes. Modify paths and node addresses as needed.
.. code-block::
/home/$USER/ompi_for_gpu/ompi/bin/mpirun -np 32 -H tw022:8,tw024:8,tw010:8, tw015:8 \
--mca pml ucx \
--mca btl ^openib \
-x NCCL_SOCKET_IFNAME=ens50f0np0 \
-x NCCL_IB_HCA=rdma0:1,rdma1:1,rdma2:1,rdma3:1,rdma4:1,rdma5:1,rdma6:1,rdma7:1 \
-x NCCL_IB_GID_INDEX=3 \
-x NCCL_MIN_NCHANNELS=40 \
-x NCCL_DEBUG=version \
$HOME/rccl-tests/build/all_reduce_perf -b 8 -e 8g -f 2 -g 1
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png
:width: 800
.. _mi300x-amd-megatron-lm-training:
Start training on MI300X accelerators
=====================================
The pre-built ROCm Megatron-LM environment allows users to quickly validate system performance, conduct
training benchmarks, and achieve superior performance for models like Llama 2 and Llama 3.1.
Use the following instructions to set up the environment, configure the script to train models, and
reproduce the benchmark results on the MI300X accelerators with the AMD Megatron-LM Docker
image.
.. _amd-megatron-lm-requirements:
Download the Docker image and required packages
-----------------------------------------------
1. Use the following command to pull the Docker image from Docker Hub.
.. code-block:: shell
docker pull rocm/megatron-lm:24.12-dev
2. Launch the Docker container.
.. code-block:: shell
docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $CACHE_DIR:/root/.cache --name megatron-dev-env rocm/megatron-lm:24.12-dev /bin/bash
3. Clone the ROCm Megatron-LM repository to a local directory and install the required packages on the host machine.
.. code-block:: shell
git clone https://github.com/ROCm/Megatron-LM
cd Megatron-LM
.. note::
This release is validated with ``ROCm/Megatron-LM`` commit `bb93ccb <https://github.com/ROCm/Megatron-LM/tree/bb93ccbfeae6363c67b361a97a27c74ab86e7e92>`_.
Checking out this specific commit is recommended for a stable and reproducible environment.
.. code-block:: shell
git checkout bb93ccbfeae6363c67b361a97a27c74ab86e7e92
Prepare training datasets
-------------------------
If you already have the preprocessed data, you can skip this section.
Use the following command to process datasets. We use GPT data as an example. You may change the merge table, use an
end-of-document token, remove sentence splitting, and use the tokenizer type.
.. code-block:: shell
python tools/preprocess_data.py \
--input my-corpus.json \
--output-prefix my-gpt2 \
--vocab-file gpt2-vocab.json \
--tokenizer-type GPT2BPETokenizer \
--merge-file gpt2-merges.txt \
--append-eod
In this case, the automatically generated output files are named ``my-gpt2_text_document.bin`` and
``my-gpt2_text_document.idx``.
.. image:: ../../../data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png
:width: 800
.. _amd-megatron-lm-environment-setup:
Environment setup
-----------------
In the ``examples/llama`` directory of Megatron-LM, if you're working with Llama 2 7B or Llama 2 70 B, use the
``train_llama2.sh`` configuration script. Likewise, if you're working with Llama 3 or Llama 3.1, then use
``train_llama3.sh`` and update the configuration script accordingly.
Network interface
^^^^^^^^^^^^^^^^^
To avoid connectivity issues, ensure the correct network interface is set in your training scripts.
1. Run the following command to find the active network interface on your system.
.. code-block:: shell
ip a
2. Update the ``NCCL_SOCKET_IFNAME`` and ``GLOO_SOCKET_IFNAME`` variables with your systems network interface. For
example:
.. code-block:: shell
export NCCL_SOCKET_IFNAME=ens50f0np0
export GLOO_SOCKET_IFNAME=ens50f0np0
Dataset options
^^^^^^^^^^^^^^^
You can use either mock data or real data for training.
* If you're using a real dataset, update the ``DATA_PATH`` variable to point to the location of your dataset.
.. code-block:: shell
DATA_DIR="/root/.cache/data" # Change to where your dataset is stored
DATA_PATH=${DATA_DIR}/bookcorpus_text_sentence
.. code-block:: shell
--data-path $DATA_PATH
Ensure that the files are accessible inside the Docker container.
* Mock data can be useful for testing and validation. If you're using mock data, replace ``--data-path $DATA_PATH`` with the ``--mock-data`` option.
.. code-block:: shell
--mock-data
Tokenizer
^^^^^^^^^
Tokenization is the process of converting raw text into tokens that can be processed by the model. For Llama
models, this typically involves sub-word tokenization, where words are broken down into smaller units based on
a fixed vocabulary. The tokenizer is trained along with the model on a large corpus of text, and it learns a
fixed vocabulary that can represent a wide range of text from different domains. This allows Llama models to
handle a variety of input sequences, including unseen words or domain-specific terms.
To train any of the Llama 2 models that this Docker image supports, use the ``Llama2Tokenizer``.
To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``.
Set the Hugging Face model link in the ``TOKENIZER_MODEL`` variable.
For example, if you're using the Llama 3.1 8B model:
.. code-block:: shell
TOKENIZER_MODEL=meta-llama/Llama-3.1-8B
Run benchmark tests
-------------------
.. note::
If you're running **multi node training**, update the following environment variables. They can
also be passed as command line arguments.
* Change ``localhost`` to the master node's hostname:
.. code-block:: shell
MASTER_ADDR="${MASTER_ADDR:-localhost}"
* Set the number of nodes you want to train on (for instance, ``2``, ``4``, ``8``):
.. code-block:: shell
NNODES="${NNODES:-1}"
* Set the rank of each node (0 for master, 1 for the first worker node, and so on):
.. code-block:: shell
NODE_RANK="${NODE_RANK:-0}"
* Use this command to run a performance benchmark test of any of the Llama 2 models that this Docker image supports (see :ref:`variables <amd-megatron-lm-benchmark-test-vars>`).
.. code-block:: shell
{variables} bash examples/llama/train_llama2.sh
* Use this command to run a performance benchmark test of any of the Llama 3 and Llama 3.1 models that this Docker image supports (see :ref:`variables <amd-megatron-lm-benchmark-test-vars>`).
.. code-block:: shell
{variables} bash examples/llama/train_llama3.sh
.. _amd-megatron-lm-benchmark-test-vars:
The benchmark tests support the same set of variables:
+--------------------------+-----------------------+-----------------------+
| Name | Options | Description |
+==========================+=======================+=======================+
| ``TEE_OUTPUT`` | 0 or 1 | 0: disable training |
| | | log |
| | | |
| | | 1: enable training |
| | | log |
+--------------------------+-----------------------+-----------------------+
| ``MBS`` | | Micro batch size |
+--------------------------+-----------------------+-----------------------+
| ``BS`` | | Batch size |
+--------------------------+-----------------------+-----------------------+
| ``TP`` | 1, 2, 4, 8 | Tensor parallel |
+--------------------------+-----------------------+-----------------------+
| ``TE_FP8`` | 0 or 1 | Datatype. |
| | | If it is set to 1, |
| | | FP8. |
| | | |
| | | If it is set to 0. |
| | | BP16 |
+--------------------------+-----------------------+-----------------------+
| ``NO_TORCH_COMPILE`` | 0 or 1 | If it is set to 1, |
| | | enable torch.compile. |
| | | |
| | | If it is set to 0. |
| | | Disable torch.compile |
| | | (default) |
+--------------------------+-----------------------+-----------------------+
| ``SEQ_LENGTH`` | | Input sequence length |
+--------------------------+-----------------------+-----------------------+
| ``GEMM_TUNING`` | 0 or 1 | If it is set to 1, |
| | | enable gemm tuning. |
| | | |
| | | If it is set to 0, |
| | | disable gemm tuning |
+--------------------------+-----------------------+-----------------------+
| ``USE_FLASH_ATTN`` | 0 or 1 | 0: disable flash |
| | | attention |
| | | |
| | | 1: enable flash |
| | | attention |
+--------------------------+-----------------------+-----------------------+
| ``ENABLE_PROFILING`` | 0 or 1 | 0: disable torch |
| | | profiling |
| | | |
| | | 1: enable torch |
| | | profiling |
+--------------------------+-----------------------+-----------------------+
| ``MODEL_SIZE`` | | The size of the mode: |
| | | 7B/70B, etc. |
+--------------------------+-----------------------+-----------------------+
| ``TOTAL_ITERS`` | | Total number of |
| | | iterations |
+--------------------------+-----------------------+-----------------------+
| ``transformer-impl`` | transformer_engine or | Enable transformer |
| | local | engine by default |
+--------------------------+-----------------------+-----------------------+
Benchmarking examples
^^^^^^^^^^^^^^^^^^^^^
.. tab-set::
.. tab-item:: Single node training
:sync: single
Use this command to run training with Llama 2 7B model on a single node. You can specify MBS, BS, FP,
datatype, and so on.
.. code-block:: bash
TEE_OUTPUT=1 MBS=5 BS=120 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
See the sample output:
.. image:: ../../../data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png
:width: 800
.. tab-item:: Multi node training
:sync: multi
Launch the Docker container on each node.
In this example, run training with Llama 2 7B model on 2 nodes with specific MBS, BS, FP, datatype, and
so on.
On the master node:
.. code-block:: bash
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
On the worker node:
.. code-block:: bash
TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1
SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh
You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script <amd-megatron-lm-environment-setup>`.
Sample output for 2-node training:
Master node:
.. image:: ../../../data/how-to/rocm-for-ai/2-node-training-master.png
:width: 800
Worker node:
.. image:: ../../../data/how-to/rocm-for-ai/2-node-training-worker.png
:width: 800

View File

@@ -308,24 +308,6 @@ Otherwise, if the system has Intel host CPUs add this instead to
intel_iommu=on iommu=pt
``modprobe.blacklist=amdgpu``
For some system configurations, the ``amdgpu`` driver needs to be blocked during kernel initialization to avoid an issue where after boot, the GPUs are not listed when running the command ``rocm-smi`` or ``amd-smi``.
Alternatively, configuring the AMD recommended system optimized BIOS settings might remove the need for using this setting. Some manufacturers and users might not implement the recommended system optimized BIOS settings.
If you experience the mentioned issue, then add this to ``GRUB_CMDLINE_LINUX``:
.. code-block:: text
modprobe.blacklist=amdgpu
After the change, the ``amdgpu`` module must be loaded to support the ROCm framework
software tools and utilities. Run the following command to load the ``amdgpu`` module:
.. code-block:: text
sudo modprobe amdgpu
Update GRUB
-----------

View File

@@ -32,8 +32,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- L1 Instruction Cache (KiB)
- VGPR File (KiB)
- SGPR File (KiB)
- GFXIP Major version
- GFXIP Minor version
*
- MI325X
- CDNA3
@@ -49,8 +47,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 64 per 2 CUs
- 512
- 12.5
- 9
- 4
*
- MI300X
- CDNA3
@@ -66,8 +62,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 64 per 2 CUs
- 512
- 12.5
- 9
- 4
*
- MI300A
- CDNA3
@@ -83,8 +77,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 64 per 2 CUs
- 512
- 12.5
- 9
- 4
*
- MI250X
- CDNA2
@@ -100,8 +92,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 2 CUs
- 512
- 12.5
- 9
- 0
*
- MI250
- CDNA2
@@ -117,8 +107,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 2 CUs
- 512
- 12.5
- 9
- 0
*
- MI210
- CDNA2
@@ -134,8 +122,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 2 CUs
- 512
- 12.5
- 9
- 0
*
- MI100
- CDNA
@@ -151,8 +137,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 3 CUs
- 256 VGPR and 256 AccVGPR
- 12.5
- 9
- 0
*
- MI60
- GCN5.1
@@ -168,8 +152,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 3 CUs
- 256
- 12.5
- 9
- 0
*
- MI50 (32GB)
- GCN5.1
@@ -185,8 +167,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 3 CUs
- 256
- 12.5
- 9
- 0
*
- MI50 (16GB)
- GCN5.1
@@ -202,8 +182,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 3 CUs
- 256
- 12.5
- 9
- 0
*
- MI25
- GCN5.0
@@ -219,8 +197,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 3 CUs
- 256
- 12.5
- 9
- 0
*
- MI8
- GCN3.0
@@ -236,8 +212,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 4 CUs
- 256
- 12.5
- 8
- 0
*
- MI6
- GCN4.0
@@ -253,8 +227,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 4 CUs
- 256
- 12.5
- 8
- 0
.. tab-item:: AMD Radeon PRO GPUs
@@ -266,7 +238,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- Model
- Architecture
- LLVM target name
- VRAM (GiB)
- Compute Units
- Wavefront Size
@@ -279,8 +250,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- L0 Instruction Cache (KiB)
- VGPR File (KiB)
- SGPR File (KiB)
- GFXIP Major version
- GFXIP Minor version
*
- Radeon PRO V710
- RDNA3
@@ -297,8 +266,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon PRO W7900 Dual Slot
- RDNA3
@@ -315,8 +282,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon PRO W7900
- RDNA3
@@ -333,8 +298,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon PRO W7800
- RDNA3
@@ -351,8 +314,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon PRO W7700
- RDNA3
@@ -369,8 +330,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon PRO W6800
- RDNA2
@@ -387,8 +346,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon PRO W6600
- RDNA2
@@ -405,8 +362,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon PRO V620
- RDNA2
@@ -423,8 +378,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon Pro W5500
- RDNA
@@ -441,8 +394,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 20
- 10
- 1
*
- Radeon Pro VII
- GCN5.1
@@ -459,8 +410,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 3 CUs
- 256
- 12.5
- 9
- 0
.. tab-item:: AMD Radeon GPUs
@@ -484,8 +433,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- L0 Instruction Cache (KiB)
- VGPR File (KiB)
- SGPR File (KiB)
- GFXIP Major version
- GFXIP Minor version
*
- Radeon RX 7900 XTX
- RDNA3
@@ -502,8 +449,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon RX 7900 XT
- RDNA3
@@ -520,8 +465,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon RX 7900 GRE
- RDNA3
@@ -538,8 +481,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon RX 7800 XT
- RDNA3
@@ -556,8 +497,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon RX 7700 XT
- RDNA3
@@ -574,8 +513,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 768
- 16
- 11
- 0
*
- Radeon RX 7600
- RDNA3
@@ -592,8 +529,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 11
- 0
*
- Radeon RX 6950 XT
- RDNA2
@@ -610,8 +545,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6900 XT
- RDNA2
@@ -628,8 +561,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6800 XT
- RDNA2
@@ -646,8 +577,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6800
- RDNA2
@@ -664,8 +593,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6750 XT
- RDNA2
@@ -682,8 +609,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6700 XT
- RDNA2
@@ -700,8 +625,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6700
- RDNA2
@@ -718,8 +641,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6650 XT
- RDNA2
@@ -736,8 +657,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6600 XT
- RDNA2
@@ -754,8 +673,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon RX 6600
- RDNA2
@@ -772,8 +689,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32
- 512
- 16
- 10
- 3
*
- Radeon VII
- GCN5.1
@@ -790,14 +705,12 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
- 32 per 3 CUs
- 256
- 12.5
- 9
- 0
Glossary
========
For more information about the terms used, see the
:ref:`specific documents and guides <gpu-arch-documentation>`, or
:ref:`specific documents and guides <gpu-arch-documentation>`, or
:doc:`Understanding the HIP programming model<hip:understand/programming_model>`.
**LLVM target name**
@@ -887,26 +800,6 @@ Purpose Vector Registers, used specifically in matrix instructions.
Size of the Scalar General Purpose Register (SGPR) file. Holds data used in
scalar instructions.
**GFXIP**
GFXIP (Graphics IP) is a versioning system used by AMD to identify the GPU
architecture and its instruction set. It helps categorize different generations
of GPUs and their feature sets.
**GFXIP major version**
Defines the GPU's core instruction set and architecture, which determines
compatibility with software stacks such as HIP and OpenCL. For example, a GFXIP
11 major version corresponds to the RDNA 3 (Navi 3x) architecture, influencing
driver support and available compute features.
**GFXIP minor version**
Represents specific variations within a GFXIP major version and affects feature sets,
optimizations, and driver behavior in software stacks such as HIP and OpenCL. Different
GPU models within the same major version can have unique capabilities, impacting
performance and supported instructions.
**GCD**
Graphics Compute Die.

View File

@@ -9,14 +9,16 @@
Data types and precision support
*************************************************************
This topic lists the data types support on AMD GPUs, ROCm libraries along
with corresponding :doc:`HIP <hip:index>` data types.
This topic lists the supported data types of AMD GPUs and ROCm libraries.
Corresponding :doc:`HIP <hip:index>` data types are also noted.
Integral types
==============
==========================================
The signed and unsigned integral types supported by ROCm are listed in
the following table.
the following table, along with their corresponding HIP type and a short
description.
.. list-table::
:header-rows: 1
@@ -46,9 +48,10 @@ the following table.
.. _precision_support_floating_point_types:
Floating-point types
====================
==========================================
The floating-point types supported by ROCm are listed in the following table.
The floating-point types supported by ROCm are listed in the following
table, along with their corresponding HIP type and a short description.
.. image:: ../data/about/compatibility/floating-point-data-types.png
:alt: Supported floating-point types
@@ -63,18 +66,18 @@ The floating-point types supported by ROCm are listed in the following table.
- Description
*
- float8 (E4M3)
- ``__hip_fp8_e4m3_fnuz``
- ``-``
- An 8-bit floating-point number that mostly follows IEEE-754 conventions
and **S1E4M3** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_,
with expanded range and no infinity or signed zero. NaN is represented
as negative zero.
and **S1E4M3** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ ,
with expanded range and no infinity or signed zero. NaN is
represented as negative zero.
*
- float8 (E5M2)
- ``__hip_fp8_e5m2_fnuz``
- ``-``
- An 8-bit floating-point number mostly following IEEE-754 conventions and
**S1E5M2** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_,
with expanded range and no infinity or signed zero. NaN is represented
as negative zero.
**S1E5M2** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ ,
with expanded range and no infinity or signed zero. NaN is
represented as negative zero.
*
- float16
- ``half``
@@ -87,7 +90,7 @@ The floating-point types supported by ROCm are listed in the following table.
format.
*
- tensorfloat32
- Not available
- ``-``
- A floating-point number that occupies 32 bits or less of storage,
providing improved range compared to half (16-bit) format, at
(potentially) greater throughput than single-precision (32-bit) formats.
@@ -114,15 +117,12 @@ The floating-point types supported by ROCm are listed in the following table.
* In some AMD documents and articles, float8 (E5M2) is referred to as bfloat8.
* The :doc:`low precision floating point types page <hip:reference/low_fp_types>`
describes how to use these types in HIP with examples.
ROCm support icons
==========================================
Level of support definitions
============================
In the following sections, icons represent the level of support. These icons,
described in the following table, are also used in the library data type support
pages.
In the following sections, icons represent the level of support. These
icons, described in the following table, are also used in the library data type
support pages.
.. list-table::
:header-rows: 1
@@ -130,11 +130,6 @@ pages.
*
- Icon
- Definition
*
- NA
- Not applicable
*
-
- Not supported
@@ -163,15 +158,16 @@ pages.
* Any type can be emulated by software, but this page does not cover such
cases.
Data type support by Hardware Architecture
Hardware data type support
==========================================
The MI200 series GPUs, which include MI210, MI250, and MI250X, are based on the
CDNA2 architecture. The MI300 series GPUs, consisting of MI300A, MI300X, and
MI325X, are based on the CDNA3 architecture.
The following tables provide information about AMD Instinct accelerators support
for various data types. The MI200 series GPUs, which include MI210, MI250, and
MI250X, are based on the CDNA2 architecture. The MI300 series GPUs, consisting
of MI300A, MI300X, and MI325X, are built on the CDNA3 architecture.
Compute units support
---------------------
-------------------------------------------------------------------------------
The following table lists data type support for compute units.
@@ -252,7 +248,7 @@ The following table lists data type support for compute units.
-
Matrix core support
-------------------
-------------------------------------------------------------------------------
The following table lists data type support for AMD GPU matrix cores.
@@ -333,7 +329,7 @@ The following table lists data type support for AMD GPU matrix cores.
-
Atomic operations support
-------------------------
-------------------------------------------------------------------------------
The following table lists data type support for atomic operations.
@@ -420,14 +416,14 @@ The following table lists data type support for atomic operations.
performance impact when they frequently access the same memory address.
Data type support in ROCm libraries
===================================
==========================================
ROCm library support for int8, float8 (E4M3), float8 (E5M2), int16, float16,
bfloat16, int32, tensorfloat32, float32, int64, and float64 is listed in the
following tables.
Libraries input/output type support
-----------------------------------
-------------------------------------------------------------------------------
The following tables list ROCm library support for specific input and output
data types. Refer to the corresponding library data type support page for a
@@ -448,37 +444,37 @@ detailed description.
- int32
- int64
*
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
- ✅/✅
- ❌/❌
- ❌/❌
- ❌/❌
*
- :doc:`rocRAND <rocrand:api-reference/data-type-support>`
- NA/✅
- NA/✅
- NA/✅
- NA/✅
- rocRAND (:doc:`details <rocrand:api-reference/data-type-support>`)
- -/✅
- -/✅
- -/✅
- -/✅
*
- :doc:`hipRAND <hiprand:api-reference/data-type-support>`
- NA/✅
- NA/✅
- NA/✅
- NA/✅
- hipRAND (:doc:`details <hiprand:api-reference/data-type-support>`)
- -/✅
- -/✅
- -/✅
- -/✅
*
- :doc:`rocPRIM <rocprim:reference/data-type-support>`
- rocPRIM (:doc:`details <rocprim:reference/data-type-support>`)
- ✅/✅
- ✅/✅
- ✅/✅
- ✅/✅
*
- :doc:`hipCUB <hipcub:api-reference/data-type-support>`
- hipCUB (:doc:`details <hipcub:api-reference/data-type-support>`)
- ✅/✅
- ✅/✅
- ✅/✅
- ✅/✅
*
- :doc:`rocThrust <rocthrust:data-type-support>`
- rocThrust (:doc:`details <rocthrust:data-type-support>`)
- ✅/✅
- ✅/✅
- ✅/✅
@@ -500,7 +496,7 @@ detailed description.
- float32
- float64
*
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
- ❌/❌
- ❌/❌
- ✅/✅
@@ -509,25 +505,25 @@ detailed description.
- ❌/❌
- ❌/❌
*
- :doc:`rocRAND <rocrand:api-reference/data-type-support>`
- NA/❌
- NA/❌
- NA/✅
- NA/❌
- NA/❌
- NA/✅
- NA/✅
- rocRAND (:doc:`details <rocrand:api-reference/data-type-support>`)
- -/❌
- -/❌
- -/✅
- -/❌
- -/❌
- -/✅
- -/✅
*
- :doc:`hipRAND <hiprand:api-reference/data-type-support>`
- NA/❌
- NA/❌
- NA/✅
- NA/❌
- NA/❌
- NA/✅
- NA/✅
- hipRAND (:doc:`details <hiprand:api-reference/data-type-support>`)
- -/❌
- -/❌
- -/✅
- -/❌
- -/❌
- -/✅
- -/✅
*
- :doc:`rocPRIM <rocprim:reference/data-type-support>`
- rocPRIM (:doc:`details <rocprim:reference/data-type-support>`)
- ❌/❌
- ❌/❌
- ✅/✅
@@ -536,7 +532,7 @@ detailed description.
- ✅/✅
- ✅/✅
*
- :doc:`hipCUB <hipcub:api-reference/data-type-support>`
- hipCUB (:doc:`details <hipcub:api-reference/data-type-support>`)
- ❌/❌
- ❌/❌
- ✅/✅
@@ -545,7 +541,7 @@ detailed description.
- ✅/✅
- ✅/✅
*
- :doc:`rocThrust <rocthrust:data-type-support>`
- rocThrust (:doc:`details <rocthrust:data-type-support>`)
- ❌/❌
- ❌/❌
- ⚠️/⚠️
@@ -554,14 +550,9 @@ detailed description.
- ✅/✅
- ✅/✅
.. note::
As random number generation libraries, rocRAND and hipRAND only specify output
data types for the random values they generate, with no need for input data
types.
Libraries internal calculations type support
--------------------------------------------
-------------------------------------------------------------------------------
The following tables list ROCm library support for specific internal data types.
Refer to the corresponding library data type support page for a detailed
@@ -582,7 +573,7 @@ description.
- int32
- int64
*
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
-
-
-
@@ -605,7 +596,7 @@ description.
- float32
- float64
*
- :doc:`hipSPARSELt <hipsparselt:reference/data-type-support>`
- hipSPARSELt (:doc:`details <hipsparselt:reference/data-type-support>`)
-
-
-

View File

@@ -10,7 +10,6 @@
| Version | Release date |
| ------- | ------------ |
| [6.3.3](https://rocm.docs.amd.com/en/docs-6.3.3/) | February 19, 2025 |
| [6.3.2](https://rocm.docs.amd.com/en/docs-6.3.2/) | January 28, 2025 |
| [6.3.1](https://rocm.docs.amd.com/en/docs-6.3.1/) | December 20, 2024 |
| [6.3.0](https://rocm.docs.amd.com/en/docs-6.3.0/) | December 3, 2024 |

View File

@@ -40,13 +40,11 @@ subtrees:
title: Training
subtrees:
- entries:
- file: how-to/rocm-for-ai/training/benchmark-docker/megatron-lm
title: Train a model with Megatron-LM
- file: how-to/rocm-for-ai/training/benchmark-docker/pytorch-training
title: Train a model with PyTorch
- file: how-to/rocm-for-ai/training/train-a-model.rst
title: Train a model
- file: how-to/rocm-for-ai/training/scale-model-training.rst
title: Scale model training
- file: how-to/rocm-for-ai/fine-tuning/index.rst
title: Fine-tuning LLMs
subtrees:
@@ -154,7 +152,7 @@ subtrees:
- entries:
- url: https://www.amd.com/system/files/TechDocs/instinct-mi200-cdna2-instruction-set-architecture.pdf
title: AMD Instinct MI200/CDNA2 ISA
- url: https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna2-white-paper.pdf
- url: https://www.amd.com/system/files/documents/amd-cdna2-white-paper.pdf
title: White paper
- file: conceptual/gpu-arch/mi100.md
title: MI100 microarchitecture
@@ -162,7 +160,7 @@ subtrees:
- entries:
- url: https://www.amd.com/system/files/TechDocs/instinct-mi100-cdna1-shader-instruction-set-architecture%C2%A0.pdf
title: AMD Instinct MI100/CDNA1 ISA
- url: https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna-white-paper.pdf
- url: https://www.amd.com/system/files/documents/amd-cdna-whitepaper.pdf
title: White paper
- file: conceptual/iommu.rst
title: Input-Output Memory Management Unit (IOMMU)

View File

@@ -1,4 +1,3 @@
rocm-docs-core==1.17.0
sphinx-reredirects
sphinx-sitemap
sphinxcontrib.datatemplates==0.11.0

View File

@@ -1,15 +1,13 @@
#
# This file is autogenerated by pip-compile with Python 3.11
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile docs/sphinx/requirements.in
# pip-compile requirements.in
#
accessible-pygments==0.0.5
# via pydata-sphinx-theme
alabaster==1.0.0
# via sphinx
appnope==0.1.4
# via ipykernel
asttokens==3.0.0
# via stack-data
attrs==25.1.0
@@ -25,7 +23,7 @@ beautifulsoup4==4.12.3
# via pydata-sphinx-theme
breathe==4.35.0
# via rocm-docs-core
certifi==2024.12.14
certifi==2024.8.30
# via requests
cffi==1.17.1
# via
@@ -39,14 +37,12 @@ click==8.1.7
# sphinx-external-toc
comm==0.2.2
# via ipykernel
cryptography==44.0.0
cryptography==43.0.3
# via pyjwt
debugpy==1.8.12
# via ipykernel
decorator==5.1.1
# via ipython
defusedxml==0.7.1
# via sphinxcontrib-datatemplates
deprecated==1.2.15
# via pygithub
docutils==0.21.2
@@ -55,9 +51,11 @@ docutils==0.21.2
# myst-parser
# pydata-sphinx-theme
# sphinx
exceptiongroup==1.2.2
# via ipython
executing==2.2.0
# via stack-data
fastjsonschema==2.21.1
fastjsonschema==2.20.0
# via
# nbformat
# rocm-docs-core
@@ -65,6 +63,8 @@ gitdb==4.0.11
# via gitpython
gitpython==3.1.43
# via rocm-docs-core
greenlet==3.1.1
# via sqlalchemy
idna==3.10
# via requests
imagesize==1.4.1
@@ -75,13 +75,13 @@ importlib-metadata==8.6.1
# myst-nb
ipykernel==6.29.5
# via myst-nb
ipython==8.32.0
ipython==8.31.0
# via
# ipykernel
# myst-nb
jedi==0.19.2
# via ipython
jinja2==3.1.4
jinja2==3.1.5
# via
# myst-parser
# sphinx
@@ -115,7 +115,7 @@ mdit-py-plugins==0.4.2
# via myst-parser
mdurl==0.1.2
# via markdown-it-py
myst-nb==1.2.0
myst-nb==1.1.2
# via rocm-docs-core
myst-parser==4.0.0
# via myst-nb
@@ -142,7 +142,7 @@ platformdirs==4.3.6
# via jupyter-core
prompt-toolkit==3.0.50
# via ipython
psutil==7.0.0
psutil==6.1.1
# via ipykernel
ptyprocess==0.7.0
# via pexpect
@@ -150,7 +150,7 @@ pure-eval==0.2.3
# via stack-data
pycparser==2.22
# via cffi
pydata-sphinx-theme==0.16.1
pydata-sphinx-theme==0.16.0
# via
# rocm-docs-core
# sphinx-book-theme
@@ -162,7 +162,7 @@ pygments==2.18.0
# ipython
# pydata-sphinx-theme
# sphinx
pyjwt[crypto]==2.10.1
pyjwt[crypto]==2.10.0
# via pygithub
pynacl==1.5.0
# via pygithub
@@ -175,7 +175,7 @@ pyyaml==6.0.2
# myst-parser
# rocm-docs-core
# sphinx-external-toc
pyzmq==26.2.1
pyzmq==26.2.0
# via
# ipykernel
# jupyter-client
@@ -215,8 +215,6 @@ sphinx==8.1.3
# sphinx-notfound-page
# sphinx-reredirects
# sphinx-sitemap
# sphinxcontrib-datatemplates
# sphinxcontrib-runcmd
sphinx-book-theme==1.1.3
# via rocm-docs-core
sphinx-copybutton==0.5.2
@@ -228,13 +226,11 @@ sphinx-external-toc==1.0.1
sphinx-notfound-page==1.0.4
# via rocm-docs-core
sphinx-reredirects==0.1.5
# via -r docs/sphinx/requirements.in
# via -r requirements.in
sphinx-sitemap==2.6.0
# via -r docs/sphinx/requirements.in
# via -r requirements.in
sphinxcontrib-applehelp==2.0.0
# via sphinx
sphinxcontrib-datatemplates==0.11.0
# via -r docs/sphinx/requirements.in
sphinxcontrib-devhelp==2.0.0
# via sphinx
sphinxcontrib-htmlhelp==2.1.0
@@ -243,16 +239,16 @@ sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-runcmd==0.2.0
# via sphinxcontrib-datatemplates
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
sqlalchemy==2.0.38
sqlalchemy==2.0.37
# via jupyter-cache
stack-data==0.6.3
# via ipython
tabulate==0.9.0
# via jupyter-cache
tomli==2.1.0
# via sphinx
tornado==6.4.2
# via
# ipykernel

View File

@@ -1,102 +0,0 @@
/* ------------------ Compatibility options grid ------------------ */
html {
--compat-border-radius: 2px;
--compat-accent-color: var(--pst-color-primary);
--compat-bg-color: var(--pst-color-on-background);
--compat-fg-color: var(--pst-color-primary-text);
--compat-head-color: var(--pst-color-surface);
--compat-param-hover-color: var(--pst-color-link-hover);
--compat-param-selected-color: var(--pst-color-primary);
}
html[data-theme="light"] {
--compat-border-color: var(--pst-gray-500);
--compat-param-disabled-color: var(--pst-gray-300);
}
html[data-theme="dark"] {
--compat-border-color: var(--pst-gray-600);
--compat-param-disabled-color: var(--pst-gray-600);
}
div#vllm-benchmark-ud-params-picker.container-fluid {
padding: 0 0 1rem 0;
}
div[data-param-k="model"] {
background-color: var(--compat-bg-color);
padding: 2px;
border: solid 1px var(--compat-border-color);
font-weight: 500;
cursor: pointer;
}
div[data-param-k="model"][data-param-state="selected"] {
background-color: var(--compat-param-selected-color);
color: var(--compat-fg-color);
}
div[data-param-k="model"][data-param-state="latest-version"] {
background-color: var(--compat-param-selected-color);
color: var(--compat-fg-color);
}
div[data-param-k="model"][data-param-state="disabled"] {
background-color: var(--compat-param-disabled-color);
text-decoration: line-through;
/* text-decoration-color: var(--pst-color-danger); */
cursor: auto;
}
div[data-param-k="model"]:not([data-param-state]):hover {
background-color: var(--compat-param-hover-color);
}
div[data-param-k="model-group"] {
background-color: var(--compat-bg-color);
padding: 2px;
border: solid 1px var(--compat-border-color);
font-weight: 500;
cursor: pointer;
}
div[data-param-k="model-group"][data-param-state="selected"] {
background-color: var(--compat-param-selected-color);
color: var(--compat-fg-color);
}
div[data-param-k="model-group"][data-param-state="latest-version"] {
background-color: var(--compat-param-selected-color);
color: var(--compat-fg-color);
}
div[data-param-k="model-group"][data-param-state="disabled"] {
background-color: var(--compat-param-disabled-color);
text-decoration: line-through;
/* text-decoration-color: var(--pst-color-danger); */
cursor: auto;
}
div[data-param-k="model-group"]:not([data-param-state]):hover {
background-color: var(--compat-param-hover-color);
}
.model-param-head {
background-color: var(--compat-head-color);
padding: 0.15rem 0.15rem 0.15rem 0.67rem;
/* margin: 2px; */
border-right: solid 2px var(--compat-accent-color);
font-weight: 600;
}
.model-param {
/* padding: 2px; */
/* margin: 0 2px 0 2px; */
/* margin: 2px; */
border: solid 1px var(--compat-border-color);
font-weight: 500;
}
.hidden {
display: none !important;
}

View File

@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<manifest>
<remote name="rocm-org" fetch="https://github.com/ROCm/" />
<default revision="refs/tags/rocm-6.3.3"
<default revision="refs/tags/rocm-6.3.2"
remote="rocm-org"
sync-c="true"
sync-j="4" />

View File

@@ -1,47 +0,0 @@
# ROCm 6.3.3 release notes
The release notes provide a summary of notable changes since the previous ROCm release.
- [Release highlights](#release-highlights)
- [Operating system and hardware support changes](#operating-system-and-hardware-support-changes)
- [ROCm components versioning](#rocm-components)
- [Detailed component changes](#detailed-component-changes)
- [ROCm known issues](#rocm-known-issues)
- [ROCm upcoming changes](#rocm-upcoming-changes)
```{note}
If youre using Radeon™ PRO or Radeon GPUs in a workstation setting with a display connected, see the [Use ROCm on Radeon GPUs](https://rocm.docs.amd.com/projects/radeon/en/latest/docs/compatibility/native_linux/native_linux_compatibility.html)
documentation to verify compatibility and system requirements.
```
## Release highlights
The following are notable new features and improvements in ROCm 6.3.3. For changes to individual components, see
[Detailed component changes](#detailed-component-changes).
### ROCm Offline Installer Creator updates
The ROCm Offline Installer Creator 6.3.3 adds a new Post-Install Options menu, which includes a new ``udev`` option for adding GPU resources access for all users. It also moves the user-specific GPU access option (for the ``video,render`` group) from the Driver Options menu to the Post-Install Options menu. See the [ROCm Offline Installer Creator](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/rocm-offline-installer.html#post-install-options-menu) documentation for more information.
### ROCm documentation updates
ROCm documentation continues to be updated to provide clearer and more comprehensive guidance for a wider variety of user needs and use cases.
* [Tutorials for AI developers](https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/) have been added. These tutorials are Jupyter notebook-based, easy-to-follow documents. They are ideal for AI developers who want to learn about specific topics, including inference, fine-tuning, and training.
* The [LLM inference performance validation guide for AMD Instinct MI300X](https://rocm.docs.amd.com/en/latest/how-to/rocm-for-ai/inference/vllm-benchmark.html)
now includes additional models for performance benchmarking. The accompanying ROCm vLLM Docker has been upgraded to ROCm 6.3.1.
* The HIP documentation has been updated with new resources for developers. To learn more about concurrency, parallelism, and stream management on devices and multiple GPUs, see [Asynchronous concurrent execution](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/asynchronous.html)
* The following HIP documentation topics have been updated:
- [Virtual memory management](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/memory_management/virtual_memory.html)
- [Programming for HIP runtime compiler (RTC)](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_rtc.html)
- [HIP porting guide](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_porting_guide.html)
- [Porting CUDA driver API](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_porting_driver_api.html)
- [CUDA to HIP API function comparison](https://rocm.docs.amd.com/projects/HIP/en/latest/reference/api_syntax.html)

View File

@@ -1,8 +0,0 @@
## ROCm known issues
ROCm known issues are noted on {fab}`github` [GitHub](https://github.com/ROCm/ROCm/labels/Verified%20Issue). For known
issues related to individual components, review the [Detailed component changes](#detailed-component-changes).
### Zero value is displayed in ROCTx aggregated statistics
The ROCTx markers are standalone markers within the ROCProfiler-SDK library. Each marker reports only a single timestamp, which is recorded as the `start_timestamp` and `end_timestamp`. As a result, the value for aggregated statistics presented in `TotalDurationNs`, `maxNs`, and `minNs`, is zero. The zero value indicates that the actual execution time is not associated with the markers, which is an expected behavior.

View File

@@ -1,7 +0,0 @@
## Operating system and hardware support changes
Operating system and hardware support remain unchanged in this release.
See the [Compatibility
matrix](https://rocm.docs.amd.com/en/docs-6.3.3/compatibility/compatibility-matrix.html)
for more information about operating system and hardware compatibility.

View File

@@ -1,17 +0,0 @@
## ROCm upcoming changes
The following changes to the ROCm software stack are anticipated for future releases.
### ROCTracer and ROCProfiler (rocprof and rocprofv2) deprecation
Development and support for ROCTracer and ROCProfiler (`rocprof` and `rocprofv2`) will phase out in favor of ROCprofiler-SDK (`rocprofv3`) in upcoming ROCm releases. Going forward, only critical defect fixes will be addressed for older versions of profiling tools and libraries. Upgrade to the latest version of ROCprofiler-SDK (`rocprofv3`) library to ensure continued support and access to new features.
### AMDGPU wavefront size compiler macro deprecation
The `__AMDGCN_WAVEFRONT_SIZE__` macro will be deprecated in an upcoming
release. It is recommended to remove any use of this macro. For more information, see [AMDGPU
support](https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.3/LLVM/clang/html/AMDGPUSupport.html).
### HIPCC Perl scripts deprecation
The HIPCC Perl scripts (`hipcc.pl` and `hipconfig.pl`) will be removed in an upcoming release.

View File

@@ -103,7 +103,7 @@ $(call adddep,composable_kernel,lightning hipcc hip_on_rocclr rocm-cmake)
$(call adddep,half,rocm-cmake)
$(call adddep,hipblas-common,lightning)
$(call adddep,hipblas,hip_on_rocclr rocblas rocsolver lightning hipcc)
$(call adddep,hipblaslt,hip_on_rocclr openmp_extras lightning hipcc hipblas-common rocm-dev)
$(call adddep,hipblaslt,hip_on_rocclr openmp_extras lightning hipcc hipblas-common roctracer)
$(call adddep,hipcub,hip_on_rocclr rocprim lightning hipcc)
$(call adddep,hipfft,hip_on_rocclr openmp_extras rocfft rocrand hiprand lightning hipcc)
$(call adddep,hipfort,rocblas hipblas rocsparse hipsparse rocfft hipfft rocrand hiprand rocsolver hipsolver lightning hipcc)

View File

@@ -10,7 +10,9 @@ printUsage() {
echo " -c, --clean Removes all amd_smi build artifacts"
echo " -r, --release Build non-debug version amd_smi (default is debug)"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
echo " -w, --wheel Creates python wheel package of amd-smi.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of type referred to by pkg_type"
echo " -p, --package <type> Specify packaging format"
echo " -h, --help Prints this help"
@@ -25,7 +27,6 @@ printUsage() {
PROJ_NAME="amdsmi"
PACKAGE_ROOT="$(getPackageRoot)"
TARGET="build"
PACKAGE_LIB=$(getLibPath)
PACKAGE_INCLUDE="$(getIncludePath)"
AMDSMI_BUILD_DIR=$(getBuildPath $PROJ_NAME)
@@ -42,7 +43,7 @@ SHARED_LIBS="ON"
CLEAN_OR_OUT=0;
PKGTYPE="deb"
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,static,address_sanitizer,outdir:,package: -- "$@"`
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,static,wheel,address_sanitizer,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -60,6 +61,8 @@ do
ADDRESS_SANITIZER=true ; shift ;;
(-s | --static)
ack_and_skip_static ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package)
@@ -99,6 +102,7 @@ build_amdsmi() {
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
$(rocm_common_cmake_params) \
$(rocm_cmake_params) \
${GEN_NINJA} \
-DENABLE_LDCONFIG=OFF \
-DAMD_SMI_PACKAGE="${AMDSMI_PKG_NAME}" \
-DCPACK_PACKAGE_VERSION_MAJOR="1" \
@@ -106,14 +110,14 @@ build_amdsmi() {
-DCPACK_PACKAGE_VERSION_PATCH="0" \
-DADDRESS_SANITIZER="$ADDRESS_SANITIZER" \
-DBUILD_TESTS=ON \
"$AMD_SMI_LIB_ROOT"
-S "$AMD_SMI_LIB_ROOT"
popd
fi
echo "Making amd_smi package:"
cmake --build "$AMDSMI_BUILD_DIR" -- $AMDSMI_MAKE_OPTS
cmake --build "$AMDSMI_BUILD_DIR" -- $AMDSMI_MAKE_OPTS install
cmake --build "$AMDSMI_BUILD_DIR" -- $AMDSMI_MAKE_OPTS package
cmake --build "$AMDSMI_BUILD_DIR" -- $DASH_JAY
cmake --build "$AMDSMI_BUILD_DIR" -- $DASH_JAY install
cmake --build "$AMDSMI_BUILD_DIR" -- $DASH_JAY package
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$AMDSMI_PACKAGE_DEB_DIR" $AMDSMI_BUILD_DIR/*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$AMDSMI_PACKAGE_RPM_DIR" $AMDSMI_BUILD_DIR/*.rpm
@@ -135,7 +139,7 @@ verifyEnvSetup
case $TARGET in
(clean) clean_amdsmi ;;
(build) build_amdsmi ;;
(build) build_amdsmi; build_wheel "$AMDSMI_BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac

View File

@@ -1,41 +1,42 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src AMDMIGraphX
build_amdmigraphx() {
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
cd $COMPONENT_SRC
if ! command -v rbuild &> /dev/null; then
pip3 install https://github.com/RadeonOpenCompute/rbuild/archive/master.tar.gz
fi
# Remove CK
xargs -d '\n' -a ${OUT_DIR}/ck.files rm -- || true
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
fi
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx1102;gfx942;gfx1200;gfx1201"
fi
init_rocm_common_cmake_params
mkdir -p ${BUILD_DIR} && rm -rf ${BUILD_DIR}/* && mkdir -p ${HOME}/amdmigraphx && rm -rf ${HOME}/amdmigraphx/*
rbuild package -d "${HOME}/amdmigraphx" -B "${BUILD_DIR}" \
--cxx="${ROCM_PATH}/llvm/bin/clang++" \
--cc="${ROCM_PATH}/llvm/bin/clang" \
--cxx="$(set_build_variables __CLANG++__)" \
--cc="$(set_build_variables __CLANG__)" \
"${rocm_math_common_cmake_params[@]}" \
-DCMAKE_MODULE_LINKER_FLAGS="-Wl,--enable-new-dtags,--build-id=sha1,--rpath,$ROCM_LIB_RPATH" \
-DGPU_TARGETS="${GPU_TARGETS}" \
-DCMAKE_INSTALL_RPATH=""
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
cd $BUILD_DIR && cmake --build . -- install -j${PROC}
show_build_cache_stats
@@ -50,7 +51,7 @@ clean_amdmigraphx() {
stage2_command_args "$@"
case $TARGET in
build) build_amdmigraphx ;;
build) build_amdmigraphx; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_amdmigraphx ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -14,6 +14,7 @@ printUsage() {
type referred to by pkg_type"
echo " -h, --help Prints this help"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -w, --wheel Creates python wheel package of comgr. It needs to be used along with -r option"
echo " -l, --link_llvm_static Link to LLVM statically. Default is to dynamically link to LLVM; requires that LLVM dylibs are created."
echo
echo "Possible values for <type>:"
@@ -24,6 +25,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME=amd_comgr
PROJ_NAME=$API_NAME
LIB_NAME=lib${API_NAME}
@@ -33,8 +35,8 @@ PACKAGE_ROOT=$(getPackageRoot)
PACKAGE_LIB=$(getLibPath)
PACKAGE_INCLUDE=$(getIncludePath)
BUILD_DIR=$(getBuildPath $API_NAME)
PACKAGE_DEB=$(getPackageRoot)/deb/$API_NAME
PACKAGE_RPM=$(getPackageRoot)/rpm/$API_NAME
PACKAGE_DEB=$PACKAGE_ROOT/deb/$PROJ_NAME
PACKAGE_RPM=$PACKAGE_ROOT/rpm/$PROJ_NAME
PACKAGE_PREFIX=$ROCM_INSTALL_PATH
BUILD_TYPE=Debug
MAKE_OPTS="$DASH_JAY CTEST_OUTPUT_ON_FAILURE=1 -C $BUILD_DIR"
@@ -44,13 +46,17 @@ CLEAN_OR_OUT=0;
PKGTYPE="deb"
MAKETARGET="deb"
# link to LLVM dynamicaly. Default is to link to llvm dynamically.
# temporarily set this default to OFF until we resolve Issues
LINK_LLVM_DYLIB="OFF"
VALID_STR=`getopt -o hcraslo:p: --long help,clean,release,address_sanitizer,static,link_llvm_static,outdir:,package: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswlo:p: --long help,clean,release,address_sanitizer,static,wheel,link_llvm_static,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -63,6 +69,8 @@ do
set_address_sanitizer_on ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-l | --link_llvm_static)
LINK_LLVM_DYLIB="OFF"; shift ;;
(-o | --outdir)
@@ -105,6 +113,7 @@ build() {
echo " Building Shared Object "
fi
# Remove CTEST var once SWDEV-381396 is fixed
cmake \
$(rocm_cmake_params) \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
@@ -143,7 +152,7 @@ verifyEnvSetup
case $TARGET in
(clean) clean ;;
(build) build ;;
(build) build; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac

View File

@@ -2,10 +2,14 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
# Temporarily disable Address Sanitizer
ENABLE_ADDRESS_SANITIZER=false
set_component_src composable_kernel
disable_debug_package_generation
# Set the GPU_ARCH_LIST to the supported GPUs needed after https://github.com/ROCm/composable_kernel/pull/1536/
GPU_ARCH_LIST="gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
build_miopen_ck() {
@@ -23,10 +27,17 @@ build_miopen_ck() {
GPU_ARCH_LIST="gfx942"
ack_and_skip_static
fi
# if ENABLE_GPU_ARCH is set in env by Job parameter ENABLE_GPU_ARCH, then set GPU_ARCH_LIST to that value
# This needs to be removed when CK aligns with other component and uses -DAMDGPU_TARGET or _DGPO_TARGET
# then we can use set_gpu_arch from compure_helper.sh and get rid of all if clauses
if [ -n "$ENABLE_GPU_ARCH" ]; then
GPU_ARCH_LIST="$ENABLE_GPU_ARCH"
fi
# Latest CK requiring Python 3.8 as the minimum.
# Point CMake to that explicit location and adjust LD_LIBRARY_PATH.
PYTHON_VERSION_WORKAROUND=''
echo "DISTRO_ID: ${DISTRO_ID}"
if [ "$DISTRO_ID" = "rhel-8.8" ] || [ "$DISTRO_ID" = "sles-15.5" ] ; then
if [ "$DISTRO_ID" = "rhel-8.8" ] || [ "$DISTRO_NAME" == "sles" ] || [ "$DISTRO_ID" = "debian-10" ]; then
EXTRA_PYTHON_PATH=/opt/Python-3.8.13
PYTHON_VERSION_WORKAROUND="-DCK_USE_ALTERNATIVE_PYTHON=${EXTRA_PYTHON_PATH}/bin/python3.8"
# For the python interpreter we need to export LD_LIBRARY_PATH.
@@ -38,23 +49,25 @@ build_miopen_ck() {
init_rocm_common_cmake_params
cmake \
${GEN_NINJA} \
-DBUILD_DEV=OFF \
"${rocm_math_common_cmake_params[@]}" \
${PYTHON_VERSION_WORKAROUND} \
-DCPACK_GENERATOR="${PKGTYPE^^}" \
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
-DCMAKE_CXX_COMPILER=$(set_build_variables __CLANG++__) \
-DCMAKE_C_COMPILER=$(set_build_variables __CLANG__) \
${LAUNCHER_FLAGS} \
-DGPU_ARCHS="${GPU_ARCH_LIST}" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS=" -O3 " \
"$COMPONENT_SRC"
cmake --build . -- -j${PROC} package
cmake --build "$BUILD_DIR" -- install
mkdir -p $PACKAGE_DIR && cp ./*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
}
# Use the function to unset the LDFLAGS and CXXFLAGS
# specifically set for ASAN
unset_asan_env_vars() {
ASAN_CMAKE_PARAMS="false"
export ADDRESS_SANITIZER="OFF"
@@ -77,7 +90,7 @@ clean_miopen_ck() {
stage2_command_args "$@"
case $TARGET in
build) build_miopen_ck ;;
build) build_miopen_ck; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_miopen_ck ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -11,6 +11,8 @@ printUsage() {
echo " -r, --release Make a release build instead of a debug build"
echo " --enable-assertions Enable assertions"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -w, --wheel Creates python wheel package of dbgapi.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -h, --help Prints this help"
@@ -25,6 +27,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME=rocm-dbgapi
AMD_DBGAPI_NAME=amd-dbgapi
MAKEINSTALL_MANIFEST=makeinstall_manifest.txt
@@ -36,21 +39,24 @@ PACKAGE_ROOT=$(getPackageRoot)
PACKAGE_LIB=$(getLibPath)
PACKAGE_INCLUDE=$(getIncludePath)
BUILD_DIR=$(getBuildPath $API_NAME)
PACKAGE_DEB=$(getPackageRoot)/deb/$API_NAME
PACKAGE_RPM=$(getPackageRoot)/rpm/$API_NAME
PACKAGE_DEB=$(getPackageRoot)/deb/$PROJ_NAME
PACKAGE_RPM=$(getPackageRoot)/rpm/$PROJ_NAME
#PACKAGE_PREFIX=$ROCM_INSTALL_PATH
BUILD_TYPE=Debug
MAKE_OPTS=($DASH_JAY -C "$BUILD_DIR")
MAKE_OPTS=($DASH_JAY -C "$BUILD_DIR") # Note that DASH_JAY might have a space after the -j
SHARED_LIBS="ON"
CLEAN_OR_OUT=0;
MAKETARGET="deb"
PKGTYPE="deb"
DODOCSBUILD=true
VALID_STR=$(getopt -o hcraso:p:M --long help,clean,release,enable-assertions,static,address_sanitizer,outdir:,package:skip_man_pages -- "$@")
#parse the arguments
VALID_STR=$(getopt -o hcraswo:p:M --long help,clean,release,enable-assertions,static,wheel,address_sanitizer,outdir:,package:skip_man_pages -- "$@")
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -66,12 +72,14 @@ do
set_address_sanitizer_on ;;
(-s | --static)
ack_and_skip_static ;;
(-w | --wheel)
WHEEL_PACKAGE=true ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; ((CLEAN_OR_OUT|=2)) ; shift 1 ;;
(-M | --skip_man_pages) DODOCSBUILD=false;;
(-p | --package)
MAKETARGET="$2" ; shift 1;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -104,7 +112,7 @@ build() {
then
echo " No $ROCM_DBGAPI_ROOT/CMakeLists.txt file, skipping rocm-dbgapi" >&2
echo " No $ROCM_DBGAPI_ROOT/CMakeLists.txt file, skipping rocm-dbgapi"
exit 0
exit 0 # THis is not an error
fi
echo "Building $PROJ_NAME"
@@ -121,11 +129,14 @@ build() {
cmake --build "$BUILD_DIR" -- "${MAKE_OPTS[@]}"
"$DODOCSBUILD" && cmake --build "$BUILD_DIR" -- "${MAKE_OPTS[@]}" doc
cmake --build "$BUILD_DIR" -- "${MAKE_OPTS[@]}" install
#install_manifest.txt is created by make install and make package with same name unless
#component packaging is enabled. To avoid overwriting by make package,move the manifest file
#to a different name and can be used for build clean up
mv "$BUILD_DIR/install_manifest.txt" "$BUILD_DIR/$MAKEINSTALL_MANIFEST"
cmake --build "$BUILD_DIR" -- "${MAKE_OPTS[@]}" package
mkdir -p "$PACKAGE_LIB"
# handle the library being in more than one place to avoid breaking the build.
(
shopt -s nullglob
cp -R "$BUILD_DIR/lib/${LIB_NAME}"* "$BUILD_DIR/${LIB_NAME}"* "$PACKAGE_LIB"
@@ -151,7 +162,7 @@ verifyEnvSetup
case $TARGET in
(clean) clean ;;
(build) build ;;
(build) build; build_wheel "$BUILD_DIR" "$PROJ_NAME";;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac

View File

@@ -10,7 +10,9 @@ printUsage() {
echo " -c, --clean Clean output and delete all intermediate work"
echo " -r, --release Build a release version of the package"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -w, --wheel Creates python wheel package of devicelibs.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -h, --help Prints this help"
@@ -19,10 +21,11 @@ printUsage() {
return 0
}
PROJ_NAME="devicelibs"
PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_BIN="$(getBinPath)"
PACKAGE_LIB="$(getLibPath)"
BUILD_PATH="$(getBuildPath devicelibs)"
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
INSTALL_PATH="$(getPackageRoot)"
LIGHTNING_BUILD_PATH="$(getBuildPath lightning)"
DEB_PATH="$(getDebPath devicelibs)"
@@ -36,11 +39,13 @@ CLEAN_OR_OUT=0;
PKGTYPE="deb"
MAKETARGET="deb"
VALID_STR=`getopt -o hcraso: --long help,clean,release,static,address_sanitizer,outdir: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo: --long help,clean,release,static,wheel,address_sanitizer,outdir: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -53,6 +58,8 @@ do
ack_and_ignore_asan ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
--) shift; break;; # end delimiter
@@ -71,7 +78,9 @@ fi
clean_devicelibs() {
# Delete cmake output directory
rm -rf "$BUILD_PATH"
# Delete the *.bc files from the PACKAGE_LIB directory
rm -f $PACKAGE_LIB/hc*.bc
rm -f $PACKAGE_LIB/irif*.bc
rm -f $PACKAGE_LIB/ockl*.bc
@@ -80,6 +89,7 @@ clean_devicelibs() {
rm -f $PACKAGE_LIB/opencl*.bc
rm -f $PACKAGE_LIB/openmp*.bc
rm -rf $PACKAGE_ROOT/amdgcn
# Delete any packages generated
rm -rf "$DEB_PATH"
rm -rf "$RPM_PATH"
}
@@ -96,6 +106,7 @@ build_devicelibs() {
if [ ! -e Makefile ]; then
cmake $(rocm_cmake_params) \
$(rocm_common_cmake_params) \
${GEN_NINJA} \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
-DROCM_DEVICE_LIBS_BITCODE_INSTALL_LOC_NEW="$bitcodeInstallLoc/amdgcn" \
-DROCM_DEVICE_LIBS_BITCODE_INSTALL_LOC_OLD="amdgcn" \
@@ -133,7 +144,7 @@ print_output_directory() {
}
case $TARGET in
(clean) clean_devicelibs ;;
(build) build_devicelibs; package_devicelibs ;;
(build) build_devicelibs; package_devicelibs; build_wheel "$BUILD_PATH" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac

View File

@@ -1,22 +1,31 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src half
build_half() {
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
# ASAN packaging is not required for HALF, since its header only package
# Setting the asan_cmake_params to false will disable ASAN packaging
ASAN_CMAKE_PARAMS="false"
fi
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
cmake \
-DCMAKE_INSTALL_PREFIX="$ROCM_PATH" \
-DCPACK_PACKAGING_INSTALL_PREFIX=${ROCM_PATH}\
-DCPACK_SET_DESTDIR="OFF" \
-DCPACK_RPM_PACKAGE_RELOCATABLE="ON" \
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
"$COMPONENT_SRC"
@@ -25,7 +34,7 @@ build_half() {
cmake --build "$BUILD_DIR" -- install
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -39,7 +48,7 @@ clean_half() {
stage2_command_args "$@"
case $TARGET in
build) build_half ;;
build) build_half; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_half ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -10,6 +10,8 @@ printUsage() {
echo " -r, --release Make a release build instead of a debug build"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -w, --wheel Creates python wheel package of hip-on-rocclr.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of type referred to by pkg_type"
echo " -t, --offload-arch=<arch> Specify arch for catch tests ex: --offload-arch=gfx1030 --offload-arch=gfx1100"
echo " -p, --package <type> Specify packaging format"
@@ -25,7 +27,6 @@ printUsage() {
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
MAKEOPTS="$DASH_JAY"
PROJ_NAME="hip-on-rocclr"
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
TARGET="build"
@@ -36,7 +37,6 @@ PACKAGE_RPM="$PACKAGE_ROOT/rpm/$PROJ_NAME"
PREFIX_PATH="$PACKAGE_ROOT"
CORE_BUILD_DIR="$(getBuildPath hsa-core)"
ROCclr_BUILD_DIR="$(getBuildPath rocclr)"
HIPCC_BUILD_DIR="$(getBuildPath hipcc)"
CATCH_BUILD_DIR="$(getBuildPath catch)"
CATCH_SRC="$HIP_CATCH_TESTS_ROOT/catch"
SAMPLES_SRC="$HIP_CATCH_TESTS_ROOT/samples"
@@ -53,9 +53,10 @@ MAKETARGET="deb"
PKGTYPE="deb"
OFFLOAD_ARCH=()
DEFAULT_OFFLOAD_ARCH=(gfx900 gfx906 gfx908 gfx90a gfx940 gfx941 gfx942 gfx1030 gfx1031 gfx1033 gfx1034 gfx1035 gfx1100 gfx1101 gfx1102 gfx1200 gfx1201)
DEFAULT_OFFLOAD_ARCH=(gfx900 gfx906 gfx908 gfx90a gfx942 gfx1030 gfx1031 gfx1033 gfx1034 gfx1035 gfx1100 gfx1101 gfx1102 gfx1103 gfx1150 gfx1151 gfx1200 gfx1201)
VALID_STR=`getopt -o hcrast:o: --long help,clean,release,address_sanitizer,static,offload-arch=:,outdir: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswt:o: --long help,clean,release,address_sanitizer,static,wheel,offload-arch=:,outdir: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -72,11 +73,13 @@ do
set_address_sanitizer_on ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-t | --offload-arch=)
OFFLOAD_ARCH+=( "$2" ); ((CLEAN_OR_OUT|=2)); shift 2 ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
@@ -98,7 +101,10 @@ if [ $RET_CONFLICT -ge 30 ]; then
exit $RET_CONFLICT
fi
clean_hip_on_rocclr() {
# Delete cmake output directory
rm -rf "$BUILD_PATH"
rm -rf "$PACKAGE_DEB"
rm -rf "$PACKAGE_RPM"
@@ -106,17 +112,23 @@ clean_hip_on_rocclr() {
}
build_hip_on_rocclr() {
# TODO This if condition is a temporary workaround so that mainline builds dont error out
# until build migrated from hipamd to clr repo
if [ -e "$CLR_ROOT/CMakeLists.txt" ]; then
# We are in a branch that has migrated to clr repo
_HIP_CMAKELIST_DIR="$CLR_ROOT"
_HIP_CMAKELIST_OPT="-DCLR_BUILD_HIP=ON -DCLR_BUILD_OCL=OFF"
if [ -e "$HIPOTHER_ROOT/hipnv" ]; then
# We are in a branch that has hipnv headers migrated
_HIP_CMAKELIST_OPT="$_HIP_CMAKELIST_OPT -DHIPNV_DIR=$HIPOTHER_ROOT/hipnv"
fi
elif [ ! -e "$HIPAMD_ROOT/CMakeLists.txt" ]; then
# We seem to have hit a branch in which both the old and the new repo don't exist
echo "No $HIPAMD_ROOT/CMakeLists.txt file, skipping hip on rocclr" >&2
echo "No $HIPAMD_ROOT/CMakeLists.txt file, skipping hip on rocclr"
exit 0
exit 0 # This is not an error
else
# We are in a branch that has not yet migrated to clr repo yet
_HIP_CMAKELIST_DIR="$HIPAMD_ROOT"
_HIP_CMAKELIST_OPT=""
fi
@@ -125,6 +137,7 @@ build_hip_on_rocclr() {
mkdir -p "$BUILD_PATH"
pushd "$BUILD_PATH"
# FIXME: Remove -DROCclr_DIR/LIBROCclr_STATIC_DIR
if [ ! -e Makefile ]; then
echo "Building HIP-On-ROCclr CMake environment"
print_lib_type $SHARED_LIBS
@@ -140,7 +153,7 @@ build_hip_on_rocclr() {
-DCMAKE_SKIP_BUILD_RPATH=TRUE \
-DCPACK_INSTALL_PREFIX="$ROCM_INSTALL_PATH" \
-DROCM_PATH="$ROCM_INSTALL_PATH" \
-DHIPCC_BIN_DIR="$HIPCC_BUILD_DIR" \
-DHIPCC_BIN_DIR="$ROCM_PATH/bin" \
-DHIP_CATCH_TEST=1 \
$_HIP_CMAKELIST_OPT \
"$_HIP_CMAKELIST_DIR"
@@ -165,8 +178,13 @@ build_catch_tests() {
rm -rf "$CATCH_BUILD_DIR"
mkdir -p "$CATCH_BUILD_DIR"
pushd "$CATCH_BUILD_DIR"
# use the newly built hip as HIP for catch
export HIP_PATH="$ROCM_INSTALL_PATH"
export ROCM_PATH="$ROCM_INSTALL_PATH"
# Note: The rocm_common_cmake_params will provide CMAKE_EXE_LINKER_FLAGS_INIT
# for binaries located in /opt/rocm/bin
# hip-catch binaries are located in each test folder under /opt/rocm/share/hip/catch_test/
# Append the EXE LINKER flags with ROCm library path
cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
@@ -177,6 +195,7 @@ build_catch_tests() {
$(rocm_common_cmake_params) \
-DCPACK_RPM_DEBUGINFO_PACKAGE=FALSE \
-DCPACK_DEBIAN_DEBUGINFO_PACKAGE=FALSE \
-DCMAKE_EXE_LINKER_FLAGS_INIT=-Wl,--enable-new-dtags,--build-id=sha1,--rpath,$ROCM_EXE_RPATH:$ROCM_INSTALL_PATH/lib:/opt/rocm/lib \
-DCPACK_INSTALL_PREFIX="$ROCM_INSTALL_PATH" \
"$CATCH_SRC"
@@ -190,6 +209,7 @@ build_catch_tests() {
}
package_samples() {
# TODO: To be removed once the issue related to ASAN builds are fixed
if [ "$ASAN_CMAKE_PARAMS" == "true" ] ; then
echo "Disable the packaging of HIP samples" >&2
return
@@ -198,13 +218,17 @@ package_samples() {
if [ ! -e "$SAMPLES_SRC/CMakeLists.txt" ]; then
echo "HIP samples source not found at: $SAMPLES_SRC" >&2
echo "Using samples package from hip project: $BUILD_PATH" >&2
# TODO: change to return failure after hip-tests samples change is available in mainline
return
fi
# package samples
rm -rf "$SAMPLES_BUILD_DIR"
mkdir -p "$SAMPLES_BUILD_DIR"
pushd "$SAMPLES_BUILD_DIR"
# The cmake path is different for asan and non-asan builds.
# Fetch after getting build type. Default will be non-asan build
local CMAKE_PATH="$(getCmakePath)"
# use the newly built hip as HIP for samples
export HIP_PATH="$ROCM_INSTALL_PATH"
export ROCM_PATH="$ROCM_INSTALL_PATH"
cmake \
@@ -291,10 +315,24 @@ print_output_directory() {
}
case $TARGET in
(clean) clean_hip_on_rocclr; clean_hip_tests ;;
(build) build_hip_on_rocclr; build_catch_tests; package_hip_on_rocclr; package_samples; copy_hip_tests;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
(clean)
clean_hip_on_rocclr
clean_hip_tests
;;
(build)
build_hip_on_rocclr
build_catch_tests
package_hip_on_rocclr
package_samples
build_wheel "$BUILD_PATH" "$PROJ_NAME"
copy_hip_tests
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -2,26 +2,29 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipBLAS-common
build_hipblas-common() {
echo "Start build"
CXX=$(set_build_variables __C_++__)
cd $COMPONENT_SRC
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
echo "C compiler: $CC"
echo "CXX compiler: $CXX"
init_rocm_common_cmake_params
cmake \
${GEN_NINJA} \
"${rocm_math_common_cmake_params[@]}" \
"$COMPONENT_SRC"
cmake --build "$BUILD_DIR" -- install
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -34,7 +37,7 @@ clean_hipblas-common() {
stage2_command_args "$@"
case $TARGET in
build) build_hipblas-common ;;
build) build_hipblas-common; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipblas-common ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -2,28 +2,32 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipBLAS
build_hipblas() {
echo "Start build"
CXX="g++"
CXX=$(set_build_variables __G_++__)
CXX_FLAG=
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
CXX="amdclang++"
CXX_FLAG="-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++"
CXX=$(set_build_variables __AMD_CLANG_++__)
CXX_FLAG=$(set_build_variables __CMAKE_CXX_PARAMS__)
fi
CLIENTS_SAMPLES="ON"
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
# fixme: remove CLIENTS_SAMPLES=OFF once SWDEV-417076 is fixed
CLIENTS_SAMPLES="OFF"
fi
SHARED_LIBS="ON"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
SHARED_LIBS="OFF"
fi
echo "C compiler: $CC"
echo "CXX compiler: $CXX"
@@ -39,10 +43,10 @@ build_hipblas() {
init_rocm_common_cmake_params
cmake \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
"${rocm_math_common_cmake_params[@]}" \
-DUSE_CUDA=OFF \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
-DBUILD_CLIENTS_TESTS=ON \
-DBUILD_CLIENTS_TESTS=ON \
-DBUILD_CLIENTS_BENCHMARKS=ON \
-DBUILD_CLIENTS_SAMPLES="${CLIENTS_SAMPLES}" \
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
@@ -54,7 +58,7 @@ build_hipblas() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -68,7 +72,7 @@ clean_hipblas() {
stage2_command_args "$@"
case $TARGET in
build) build_hipblas ;;
build) build_hipblas; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipblas ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,10 +1,20 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipBLASLt
#not enabling the debug files by defalut
disable_debug_package_generation
# if ENABLE_GPU_ARCH is set in env by Job parameter ENABLE_GPU_ARCH, then set GFX_ARCH to that value
if [ -n "$ENABLE_GPU_ARCH" ]; then
set_gpu_arch "$ENABLE_GPU_ARCH"
else
# gfx90a:xnack+;gfx90a:xnack-;gfx942
set_gpu_arch "all"
fi
build_hipblaslt() {
echo "Start build"
@@ -20,31 +30,31 @@ build_hipblaslt() {
cd $COMPONENT_SRC
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
rebuild_lapack
fi
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
# gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942
GPU_TARGETS=all
# hipblaslt requiring Python 3.12.
# Point CMake to that explicit location and adjust LD_LIBRARY_PATH.
if [ "$DISTRO_ID" = "rhel-8.8" ] || [ "$DISTRO_NAME" == "sles" ] || \
[ "$DISTRO_ID" = "rhel-9.1" ] || [ "$DISTRO_ID" = "almalinux-8.10" ] || \
[ "$DISTRO_ID" = "debian-10" ]; then
EXTRA_PYTHON_PATH=/opt/Python-3.12.7
EXTRA_CMAKE_OPTIONS="-DPython_ROOT=/opt/Python-3.12.7"
export LD_LIBRARY_PATH=${EXTRA_PYTHON_PATH}/lib
fi
init_rocm_common_cmake_params
CXX=$(set_build_variables CXX)\
CXX=$(set_build_variables __CXX__)\
cmake \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
"${rocm_math_common_cmake_params[@]}" \
-DTensile_LOGIC= \
-DTensile_CODE_OBJECT_VERSION=default \
-DTensile_CPU_THREADS= \
-DTensile_CODE_OBJECT_VERSION=4 \
-DTensile_CPU_THREADS=$((PROC / 4)) \
-DTensile_LIBRARY_FORMAT=msgpack \
-DBUILD_CLIENTS_SAMPLES=ON \
-DBUILD_CLIENTS_TESTS=ON \
-DLINK_BLIS=ON \
-DBUILD_CLIENTS_BENCHMARKS=ON \
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
${EXTRA_CMAKE_OPTIONS} \
"$COMPONENT_SRC"
cmake --build "$BUILD_DIR" -- -j${PROC}
@@ -52,8 +62,7 @@ build_hipblaslt() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -66,7 +75,7 @@ clean_hipblaslt() {
stage2_command_args "$@"
case $TARGET in
build) build_hipblaslt ;;
build) build_hipblaslt; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipblaslt ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -9,6 +9,8 @@ printUsage() {
echo "Options:"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -c, --clean Clean output and delete all intermediate work"
echo " -w, --wheel Creates python wheel package of hipcc.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -r, --release Makes a release build"
@@ -21,6 +23,7 @@ printUsage() {
}
## Build environment variables
API_NAME=hipcc
PROJ_NAME=$API_NAME
@@ -29,10 +32,11 @@ MAKEOPTS="$DASH_JAY"
BUILD_TYPE="Debug"
SHARED_LIBS="ON"
BUILD_DIR=$(getBuildPath $API_NAME)
PACKAGE_DEB=$(getPackageRoot)/deb/$API_NAME
PACKAGE_RPM=$(getPackageRoot)/rpm/$API_NAME
PACKAGE_DEB=$(getPackageRoot)/deb/$PROJ_NAME
PACKAGE_RPM=$(getPackageRoot)/rpm/$PROJ_NAME
PACKAGE_SRC="$(getSrcPath)"
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,address_sanitizer,static,outdir,wheel:,package: -- "$@"`
eval set -- "$VALID_STR"
@@ -45,13 +49,15 @@ do
TARGET="clean" ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 1 ;;
(-w | --wheel)
WHEEL_PACKAGE=true ;;
(-r | --release)
BUILD_TYPE="RelWithDebInfo" ;;
(-s | --static)
SHARED_LIBS="OFF" ;;
(-h | --help)
printUsage ; exit 0 ;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo "Invalid option [$1]" >&2; printUsage; exit 1 ;;
esac
@@ -87,11 +93,12 @@ build() {
fi
cmake \
${GEN_NINJA} \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
-DHIPCC_BACKWARD_COMPATIBILITY=OFF \
-DCMAKE_INSTALL_PREFIX="$OUT_DIR" \
-DCMAKE_INSTALL_PREFIX="$ROCM_PATH" \
$HIPCC_ROOT
popd
@@ -118,10 +125,20 @@ print_output_directory() {
}
case $TARGET in
(clean) clean ;;
(build) build ; copy_hipcc_sources ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
(clean)
clean
;;
(build)
build
build_wheel "$BUILD_DIR" "$PROJ_NAME"
copy_hipcc_sources
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -2,7 +2,7 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipCUB
@@ -17,26 +17,22 @@ build_hipcub() {
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
# ASAN packaging is not required for HIPCUB, since its header only package
# Setting the asan_cmake_params to false will disable ASAN packaging
ASAN_CMAKE_PARAMS="false"
fi
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
init_rocm_common_cmake_params
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
CXX=$(set_build_variables CXX)\
CXX=$(set_build_variables __CXX__)\
cmake \
${GEN_NINJA} \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
"${rocm_math_common_cmake_params[@]}" \
-DCMAKE_MODULE_PATH="${ROCM_PATH}/lib/cmake/hip;${ROCM_PATH}/hip/cmake" \
-Drocprim_DIR="${ROCM_PATH}/rocprim" \
-DBUILD_TEST=ON \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
"$COMPONENT_SRC"
cmake --build "$BUILD_DIR" -- -j${PROC}
@@ -44,7 +40,7 @@ build_hipcub() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -58,7 +54,7 @@ clean_hipcub() {
stage2_command_args "$@"
case $TARGET in
build) build_hipcub ;;
build) build_hipcub; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipcub ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -2,13 +2,17 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipFFT
build_hipfft() {
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
@@ -18,17 +22,10 @@ build_hipfft() {
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
init_rocm_common_cmake_params
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
cmake \
-DCMAKE_CXX_COMPILER=$(set_build_variables CXX) \
"$(set_build_variables __CMAKE_CXX_PARAMS__)" \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
"${rocm_math_common_cmake_params[@]}" \
-DCMAKE_MODULE_PATH="${ROCM_PATH}/lib/cmake/hip" \
-DCMAKE_SKIP_BUILD_RPATH=TRUE \
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
@@ -41,7 +38,7 @@ build_hipfft() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -55,7 +52,7 @@ clean_hipfft() {
stage2_command_args "$@"
case $TARGET in
build) build_hipfft ;;
build) build_hipfft; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipfft ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,13 +1,18 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipfort
build_hipfort() {
echo "Start build"
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
fi
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
@@ -26,7 +31,7 @@ build_hipfort() {
-DCMAKE_Fortran_FLAGS_DEBUG="" \
${LAUNCHER_FLAGS} \
-DROCM_SYMLINK_LIBS=OFF \
-DCMAKE_INSTALL_PREFIX=${ROCM_PATH} \
-DCMAKE_INSTALL_PREFIX=${ROCM_PATH} \
-DHIPFORT_AR="${ROCM_PATH}/${ROCM_LLVMDIR}/bin/llvm-ar" \
-DHIPFORT_RANLIB="${ROCM_PATH}/${ROCM_LLVMDIR}/bin/llvm-ranlib" \
"$COMPONENT_SRC"
@@ -36,7 +41,7 @@ build_hipfort() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -50,7 +55,7 @@ clean_hipfort() {
stage2_command_args "$@"
case $TARGET in
build) build_hipfort ;;
build) build_hipfort; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipfort ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -10,22 +10,33 @@ printUsage() {
echo " -c, --clean Clean output and delete all intermediate work"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -h, --help Prints this help"
echo " -r, --release Make a release build"
echo " -r, --release Make a release build"
echo " -n, --skip_hipify_tests Skip hipify-clang testing"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
echo " -i, --clang_headers Install clang headers"
echo " -w, --wheel Creates python wheel package of hipify-clang.
It needs to be used along with -r option"
echo " -h, --help Prints this help"
echo
return 0
}
# Build Environmental Variables
PROJ_NAME="hipify"
TARGET="build"
MAKEOPTS="$DASH_JAY"
NINJAOPTS="$DASH_JAY"
HIPIFY_CLANG_BUILD_DIR="$(getBuildPath $HIPIFY_ROOT)"
BUILD_TYPE="Debug"
PACKAGE_ROOT="$(getPackageRoot)"
HIPIFY_CLANG_HASH=""
LIGHTNING_PATH="$ROCM_INSTALL_PATH/llvm"
LIGHTNING_BUILD_PATH="$PACKAGE_ROOT/build/lightning"
RUN_HIPIFY_TESTS=true
CUDA_DEFAULT_VERSION="12.3.2"
CUDNN_DEFAULT_VERSION="9.2.0"
GCC_MIN_VERSION="9.2"
ADDRESS_SANITIZER=false
INSTALL_CLANG_HEADERS="OFF"
DEB_PATH="$(getDebPath hipify)"
@@ -36,7 +47,8 @@ MAKETARGET="deb"
PKGTYPE="deb"
VALID_STR=`getopt -o hcraso: --long help,clean,release,static,address_sanitizer,outdir: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcrnawsio: --long help,clean,release,skip_hipify_tests,wheel,static,address_sanitizer,clang_headers,outdir: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -48,15 +60,21 @@ do
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
(-r | --release)
BUILD_TYPE="RelWithDebInfo" ; shift ;;
(-n | --skip_hipify_tests)
RUN_HIPIFY_TESTS=false; shift ;;
(-a | --address_sanitizer)
set_asan_env_vars
set_address_sanitizer_on
ADDRESS_SANITIZER=true ; shift ;;
(-s | --static)
ack_and_skip_static ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-i | --clang_headers)
INSTALL_CLANG_HEADERS="ON" ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -79,6 +97,7 @@ clean_hipify() {
}
package_hipify() {
# set-up dirs
if [ "$PACKAGEEXT" = "deb" ]; then
rm -rf "$DEB_PATH"
mkdir -p "$DEB_PATH"
@@ -89,8 +108,9 @@ package_hipify() {
mkdir -p "$RPM_PATH"
fi
# make the pkg
pushd "$HIPIFY_CLANG_BUILD_DIR"
make $MAKEOPTS package_hipify-clang
ninja $NINJAOPTS package_hipify-clang
popd
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$DEB_PATH" $HIPIFY_CLANG_BUILD_DIR/hipify*.deb
@@ -102,7 +122,38 @@ build_hipify() {
mkdir -p "$HIPIFY_CLANG_BUILD_DIR"
pushd "$HIPIFY_CLANG_BUILD_DIR"
# Check the installed GCC version
INSTALLED_GCC_VERSION=$(gcc --version | head -n 1 | sed -E 's/[^0-9]*([0-9]+\.[0-9]+).*/\1/')
if echo "$INSTALLED_GCC_VERSION $GCC_MIN_VERSION" | awk '{exit !($1 < $2)}'; then
RUN_HIPIFY_TESTS=false
echo "Minimum required GCC version: $GCC_MIN_VERSION"
echo "Installed GCC version $INSTALLED_GCC_VERSION does not meet the minimum GCC version requirement."
echo "Skipping hipify tests"
fi
if $ADDRESS_SANITIZER ; then
echo "Skipping hipify tests becasue of Address Sanitizer"
RUN_HIPIFY_TESTS=false
fi
if $RUN_HIPIFY_TESTS ; then
# TODO: Add option for user defined cuda version?
CUDA_VERSION="${CUDA_DEFAULT_VERSION}"
CUDNN_VERSION="${CUDNN_DEFAULT_VERSION}"
if [[ "$DISTRO_ID" == "rhel-8"* || "$DISTRO_NAME" == "sles" || "$DISTRO_ID" == "debian-10" ]]; then
EXTRA_PYTHON_PATH=/opt/Python-3.8.13
export LD_LIBRARY_PATH=${EXTRA_PYTHON_PATH}/lib:$LD_LIBRARY_PATH
fi
echo "Copy FileCheck into ROCM_INSTALL_PATH"
cp "$LIGHTNING_BUILD_PATH/bin/FileCheck" "$LIGHTNING_PATH/bin/FileCheck"
fi
cmake \
${GEN_NINJA} \
-DHIPIFY_CLANG_TESTS="$RUN_HIPIFY_TESTS" \
-DCMAKE_BUILD_TYPE="$BUILD_TYPE" \
$(rocm_common_cmake_params) \
-DCMAKE_INSTALL_PREFIX="$ROCM_INSTALL_PATH" \
@@ -110,10 +161,20 @@ build_hipify() {
-DCMAKE_PREFIX_PATH="$LIGHTNING_PATH" \
-DADDRESS_SANITIZER="$ADDRESS_SANITIZER" \
-DHIPIFY_INSTALL_CLANG_HEADERS="$INSTALL_CLANG_HEADERS" \
-DCUDA_TOOLKIT_ROOT_DIR="/usr/local/cuda-${CUDA_DEFAULT_VERSION}" \
-DCUDA_DNN_ROOT_DIR="/usr/local/cuDNN/${CUDNN_DEFAULT_VERSION}" \
-DCUDA_CUB_ROOT_DIR="/usr/local/cuda-${CUDA_DEFAULT_VERSION}" \
-DLLVM_EXTERNAL_LIT="${LIGHTNING_BUILD_PATH}/bin/llvm-lit" \
$HIPIFY_ROOT
cmake --build . -- $MAKEOPTS install
if $RUN_HIPIFY_TESTS ; then
echo "Running hipify tests"
cmake --build . -- $NINJAOPTS test-hipify
fi
cmake --build . -- $NINJAOPTS install
popd
pushd "$HIPIFY_ROOT"
HIPIFY_CLANG_HASH=`git describe --dirty --long --match [0-9]* --always`
popd
@@ -132,10 +193,20 @@ print_output_directory() {
}
case $TARGET in
(clean) clean_hipify ;;
(build) build_hipify; package_hipify ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
(clean)
clean_hipify
;;
(build)
build_hipify
package_hipify
build_wheel "$HIPIFY_CLANG_BUILD_DIR" "$PROJ_NAME"
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipRAND
@@ -41,17 +41,10 @@ build_hiprand() {
init_rocm_common_cmake_params
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
CXX=$(set_build_variables CXX)\
CXX=$(set_build_variables __CXX__)\
cmake \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
-DBUILD_TEST=ON \
-DBUILD_BENCHMARK=ON \
@@ -67,7 +60,7 @@ build_hiprand() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
}
clean_hiprand() {
@@ -89,7 +82,7 @@ print_output_directory() {
}
case $TARGET in
build) build_hiprand ;;
build) build_hiprand; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hiprand ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -2,7 +2,7 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipSOLVER
@@ -10,12 +10,12 @@ build_hipsolver() {
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
CXX_FLAG="-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++"
CXX_FLAG=$(set_build_variables __CMAKE_CXX_PARAMS__)
fi
cd $COMPONENT_SRC
CXX="amdclang++"
CXX=$(set_build_variables __AMD_CLANG_++__)
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
@@ -39,7 +39,7 @@ build_hipsolver() {
init_rocm_common_cmake_params
cmake \
-DUSE_CUDA=OFF \
-DCMAKE_CXX_COMPILER=${CXX} \
-DCMAKE_CXX_COMPILER=${CXX} \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
@@ -55,7 +55,7 @@ build_hipsolver() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -69,7 +69,7 @@ clean_hipsolver() {
stage2_command_args "$@"
case $TARGET in
build) build_hipsolver ;;
build) build_hipsolver; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipsolver ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -2,7 +2,7 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
PATH=${ROCM_PATH}/bin:$PATH
set_component_src hipSPARSE
@@ -10,12 +10,12 @@ set_component_src hipSPARSE
build_hipsparse() {
echo "Start build"
CXX="g++"
CXX=$(set_build_variables __G_++__)
CXX_FLAG=
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
CXX="${ROCM_PATH}/llvm/bin/clang++"
CXX_FLAG="-DCMAKE_CXX_COMPILER=${ROCM_PATH}/llvm/bin/clang++"
CXX=$(set_build_variables __CXX__)
CXX_FLAG=$(set_build_variables __CMAKE_CXX_PARAMS__)
fi
cd $COMPONENT_SRC
@@ -54,7 +54,7 @@ build_hipsparse() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -68,7 +68,7 @@ clean_hipsparse() {
stage2_command_args "$@"
case $TARGET in
build) build_hipsparse ;;
build) build_hipsparse; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipsparse ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,10 +1,18 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipSPARSELt
disable_debug_package_generation
# if ENABLE_GPU_ARCH is set in env by Job parameter ENABLE_GPU_ARCH, then set GFX_ARCH to that value
if [ -n "$ENABLE_GPU_ARCH" ]; then
set_gpu_arch "$ENABLE_GPU_ARCH"
else
# gfx90a:xnack+;gfx90a:xnack-;gfx942
set_gpu_arch "all"
fi
while [ "$1" != "" ];
do
case $1 in
@@ -18,6 +26,31 @@ do
shift 1
done
create_blis_link()
{
#find the pre-installed blis library and create the link under $BUILD_DIR/deps/blis
BLIS_REF_ROOT="$BUILD_DIR/deps/blis"
mkdir -p "$BLIS_REF_ROOT"/include
for blis_path in "/opt/AMD/aocl/aocl-linux-gcc-4.2.0/gcc" \
"/opt/AMD/aocl/aocl-linux-gcc-4.1.0/gcc" \
"/opt/AMD/aocl/aocl-linux-gcc-4.0.0/gcc";
do
if [ -e "${blis_path}/lib_ILP64/libblis-mt.a" ] ; then
ln -sf "${blis_path}/include_ILP64" "${BLIS_REF_ROOT}/include/blis"
ln -sf "${blis_path}/lib_ILP64" "${BLIS_REF_ROOT}/lib"
return
fi
done
if [[ -e "/usr/local/lib/libblis.a" ]]; then
ln -sf /usr/local/include/blis ${BLIS_REF_ROOT}/include/blis
ln -sf /usr/local/lib ${BLIS_REF_ROOT}/lib
return
fi
echo "error: BLIS lib not found" >&2
return 1
}
build_hipsparselt() {
echo "Start build"
@@ -32,23 +65,19 @@ build_hipsparselt() {
cd $COMPONENT_SRC
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
init_rocm_common_cmake_params
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
# gfx940;gfx941;gfx942
GPU_TARGETS=all
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
create_blis_link
EXTRA_CMAKE_OPTIONS=("-DLINK_BLIS=ON" "-DBUILD_DIR=${BUILD_DIR}")
fi
FC=gfortran \
CXX="${ROCM_PATH}/bin/hipcc" \
init_rocm_common_cmake_params
CXX=$(set_build_variables __CXX__) \
cmake \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
"${rocm_math_common_cmake_params[@]}" \
-DTensile_LOGIC= \
-DTensile_CODE_OBJECT_VERSION=default \
-DTensile_CODE_OBJECT_VERSION=4 \
-DTensile_CPU_THREADS= \
-DTensile_LIBRARY_FORMAT=msgpack \
-DBUILD_CLIENTS_SAMPLES=ON \
@@ -56,13 +85,14 @@ build_hipsparselt() {
-DBUILD_CLIENTS_BENCHMARKS=ON \
-DCMAKE_INSTALL_PREFIX=${ROCM_PATH} \
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
${EXTRA_CMAKE_OPTIONS[@]} \
"$COMPONENT_SRC"
cmake --build "$BUILD_DIR" -- -j${PROC}
cmake --build "$BUILD_DIR" -- install
cmake --build "$BUILD_DIR" -- package
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
$SCCACHE_BIN -s || echo "Unable to display sccache stats"
}
@@ -86,7 +116,7 @@ print_output_directory() {
}
case $TARGET in
build) build_hipsparselt ;;
build) build_hipsparselt; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hipsparselt ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -2,9 +2,10 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src hipTensor
disable_debug_package_generation
build_hiptensor() {
echo "Start build hipTensor"
@@ -22,17 +23,11 @@ build_hiptensor() {
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
init_rocm_common_cmake_params
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942"
fi
cmake \
-B "${BUILD_DIR}" \
"${rocm_math_common_cmake_params[@]}" \
$(set_build_variables CMAKE_C_CXX) \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
"$(set_build_variables __CMAKE_CC_PARAMS__)" \
"$(set_build_variables __CMAKE_CXX_PARAMS__)" \
${LAUNCHER_FLAGS} \
"$COMPONENT_SRC"
@@ -41,7 +36,7 @@ build_hiptensor() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p "$PACKAGE_DIR" && cp ${BUILD_DIR}/*.${PKGTYPE} "$PACKAGE_DIR"
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -55,7 +50,7 @@ clean_hiptensor() {
stage2_command_args "$@"
case $TARGET in
build) build_hiptensor ;;
build) build_hiptensor; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_hiptensor ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -42,6 +42,7 @@ DEB_PATH="$(getDebPath $PROJ_NAME)"
RPM_PATH="$(getRpmPath $PROJ_NAME)"
INSTALL_PATH="${ROCM_INSTALL_PATH}/lib/llvm"
LLVM_ROOT_LCL="${LLVM_ROOT}"
ROCM_WHEEL_DIR="${BUILD_PATH}/_wheel"
TARGET="all"
MAKEOPTS="$DASH_JAY"
@@ -59,7 +60,7 @@ MAKETARGET="deb"
ASSERT_LLVM_VERSION_MAJOR=""
ASSERT_LLVM_VERSION_MINOR=""
SKIP_LIT_TESTS=0
SKIP_LIT_TESTS=1
BUILD_MANPAGES="ON"
STATIC_FLAG=
@@ -149,6 +150,7 @@ ENABLE_RUNTIMES="$ENABLE_RUNTIMES;libcxx;libcxxabi"
BOOTSTRAPPING_BUILD_LIBCXX=1
clean_lightning() {
rm -rf "$ROCM_WHEEL_DIR"
rm -rf "$BUILD_PATH"
rm -rf "$DEB_PATH"
rm -rf "$RPM_PATH"
@@ -330,6 +332,15 @@ build_lightning() {
echo "End Workaround for race condition"
cmake --build . -- $MAKEOPTS
case "$DISTRO_ID" in
(rhel*|centos*)
RHEL_BUILD=1
;;
(*)
RHEL_BUILD=0
;;
esac
if [ $SKIP_LIT_TESTS -eq 0 ]; then
if [ $RHEL_BUILD -eq 1 ]; then
cmake --build . -- $MAKEOPTS check-lld check-mlir
@@ -1147,4 +1158,9 @@ case $TARGET in
(*) die "Invalid target $TARGET" ;;
esac
if [[ $WHEEL_PACKAGE == true ]]; then
echo "Wheel Package build started !!!!"
create_wheel_package
fi
echo "Operation complete"

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src MIOpen
@@ -19,18 +19,18 @@ build_miopen_mlir() {
mkdir build && cd build
cmake \
-G Ninja \
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
-DCMAKE_C_COMPILER="$(set_build_variables __CLANG__)" \
-DCMAKE_CXX_COMPILER="$(set_build_variables __CLANG++__)" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_PREFIX_PATH="${ROCM_PATH};${HOME}/miopen-deps" \
-DCMAKE_PREFIX_PATH="${ROCM_PATH};${HOME}/miopen-deps" \
-DCMAKE_INSTALL_PREFIX="$ROCM_PATH" \
-DBUILD_FAT_LIBROCKCOMPILER=1 \
..
..
cmake --build . -- librockCompiler -j${PROC}
cmake --build . -- install
rm -rf _CPack_Packages/ && find -name '*.o' -delete
echo "Finished building rocMLIR"
}
@@ -43,12 +43,6 @@ build_miopen_deps() {
echo "Start build"
cd "$COMPONENT_SRC"
# Update rocm-recipes for boost link
# https://github.com/ROCm/MIOpen/pull/3457/files
sed -i 's/329203d79f9fe77ae5d0d742af0966bc57f4dfc8/92c6695449c85887962f45509b376f2eb0d284f7/g' \
rbuild.ini \
install_deps.cmake \
dev-requirements.txt
# Commenting the rocMLIR & composable_kernel from requirements.txt
sed -i '/ROCm\/rocMLIR@\|ROCm\/composable_kernel@/s/^/#/' requirements.txt
# Extract MLIR commit from requirements.txt
@@ -56,7 +50,7 @@ build_miopen_deps() {
pip3 install https://github.com/RadeonOpenCompute/rbuild/archive/master.tar.gz
PATH="${PATH}:${ROCM_PATH}:${HOME}/.local/bin" rbuild prepare -d "$HOME/miopen-deps" --cxx=${ROCM_PATH}/llvm/bin/clang++ --cc ${ROCM_PATH}/llvm/bin/clang
PATH="${PATH}:${ROCM_PATH}:${HOME}/.local/bin" rbuild prepare -d "$HOME/miopen-deps" --cxx="$(set_build_variables __CLANG++__)" --cc "$(set_build_variables __CLANG__)"
build_miopen_mlir "$MLIR_COMMIT"
show_build_cache_stats

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src MIOpen
@@ -9,6 +9,8 @@ PACKAGE_DIR=${PACKAGE_DIR%\/*}/miopen-hip
DEB_PATH=$PACKAGE_DIR
RPM_PATH=$PACKAGE_DIR
disable_debug_package_generation
build_miopen_hip() {
echo "Start build"
@@ -31,19 +33,19 @@ build_miopen_hip() {
"${rocm_math_common_cmake_params[@]}" \
-DMIOPEN_BACKEND=HIP \
-DMIOPEN_OFFLINE_COMPILER_PATHS_V2=1 \
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
-DCMAKE_PREFIX_PATH="${ROCM_PATH};${ROCM_PATH}/hip;${HOME}/miopen-deps" \
-DCMAKE_CXX_COMPILER=$(set_build_variables __CLANG++__) \
-DCMAKE_C_COMPILER=$(set_build_variables __CLANG__) \
-DCMAKE_PREFIX_PATH="${ROCM_PATH};${ROCM_PATH}/hip;/tmp/miopen-deps" \
-DHIP_OC_COMPILER="${ROCM_PATH}/bin/clang-ocl" \
-DMIOPEN_TEST_DISCRETE=OFF \
"$COMPONENT_SRC"
"$COMPONENT_SRC"
cmake --build "$BUILD_DIR" -- -j${PROC}
cmake --build "$BUILD_DIR" -- install
cmake --build "$BUILD_DIR" -- package
rm -rf $BUILD_DIR/_CPack_Packages/ && find $BUILD_DIR -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -62,7 +64,7 @@ checkout_lfs() {
stage2_command_args "$@"
case $TARGET in
build) build_miopen_hip ;;
build) build_miopen_hip; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_miopen_hip ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,39 +1,44 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src MIVisionX
BUILD_DEV=ON
build_mivisionx() {
if [ "$DISTRO_ID" = "mariner-2.0" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ] ; then
echo "Not building mivisionx for ${DISTRO_ID}. Exiting..."
return 0
fi
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
CXX=$(set_build_variables __AMD_CLANG_++__)
mkdir -p $BUILD_DIR && cd $BUILD_DIR
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
# Setting BUILD_DEV to OFF. This will prevent the installation of
# header files, other files in share,libexec folder. ASAN pkg doesn't need this
BUILD_DEV=OFF
fi
init_rocm_common_cmake_params
echo "C compiler: $CC"
echo "CXX compiler: $CXX"
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
init_rocm_common_cmake_params
cmake \
"${rocm_math_common_cmake_params[@]}" \
-DROCM_PATH="$ROCM_PATH" \
-DBUILD_DEV=$BUILD_DEV \
-DCMAKE_INSTALL_LIBDIR=$(getInstallLibDir) \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
-DROCM_DEP_ROCMCORE=ON \
-DROCAL_PYTHON=OFF \
${LAUNCHER_FLAGS} \
@@ -44,7 +49,7 @@ build_mivisionx() {
cpack -G ${PKGTYPE^^}
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -58,7 +63,7 @@ clean_mivisionx() {
stage2_command_args "$@"
case $TARGET in
build) build_mivisionx ;;
build) build_mivisionx; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_mivisionx ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -0,0 +1,149 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
printUsage() {
echo
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
echo
echo "Options:"
echo " -c, --clean Clean output and delete all intermediate work"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -p, --package <type> Specify packaging format"
echo " -r, --release Make a release build instead of a debug build"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -w, --wheel Creates python wheel package of omniperf.
It needs to be used along with -r option"
echo " -h, --help Prints this help"
echo
echo "Possible values for <type>:"
echo " deb -> Debian format (default)"
echo " rpm -> RPM format"
echo
return 0
}
## Build environment variables
API_NAME="omniperf"
PROJ_NAME="$API_NAME"
LIB_NAME="lib${API_NAME}"
TARGET="build"
MAKETARGET="deb"
PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_LIB="$(getLibPath)"
# PACKAGE_INCLUDE="$(getIncludePath)"
BUILD_DIR="$(getBuildPath $API_NAME)"
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
BUILD_TYPE="Debug"
MAKE_OPTS="$DASH_JAY -C $BUILD_DIR"
SHARED_LIBS="ON"
CLEAN_OR_OUT=0;
MAKETARGET="deb"
PKGTYPE="deb"
#parse the arguments
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,static,address_sanitizer,outdir:,package:,wheel -- "$@")
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
-h | --help)
printUsage ; exit 0;;
-c | --clean)
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
-r | --release)
BUILD_TYPE="Release" ; shift ;;
-a | --address_sanitizer)
set_asan_env_vars
set_address_sanitizer_on ; shift ;;
-s | --static)
SHARED_LIBS="OFF" ; shift ;;
-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
-p | --package)
MAKETARGET="$2" ; shift 2 ;;
-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
--) shift; break;; # end delimiter
*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
done
RET_CONFLICT=1
check_conflicting_options "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
if [ $RET_CONFLICT -ge 30 ]; then
print_vars "$API_NAME" "$TARGET" "$BUILD_TYPE" "$SHARED_LIBS" "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
exit $RET_CONFLICT
fi
clean() {
echo "Cleaning $PROJ_NAME"
rm -rf "$BUILD_DIR"
rm -rf "$PACKAGE_DEB"
rm -rf "$PACKAGE_RPM"
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
}
build() {
echo "Building $PROJ_NAME"
if [ "$DISTRO_ID" = centos-7 ]; then
echo "Skip make and uploading packages for Omniperf on Centos7 distro, due to python dependency"
exit 0
fi
if [ ! -d "$BUILD_DIR" ]; then
mkdir -p "$BUILD_DIR"
pushd "$BUILD_DIR" || exit
echo "ROCm CMake Params: $(rocm_cmake_params)"
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
#install python deps
#python3 -m pip install -t ${BUILD_DIR}/python-libs -r ${OMNIPERF_ROOT}/requirements.txt
print_lib_type $SHARED_LIBS
cmake \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
-DCHECK_PYTHON_DEPS=NO \
-DPYTHON_DEPS=${BUILD_DIR}/python-libs \
-DMOD_INSTALL_PATH=${BUILD_DIR}/modulefiles \
"$OMNIPERF_ROOT"
fi
make $MAKE_OPTS
make $MAKE_OPTS install
make $MAKE_OPTS package
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
}
print_output_directory() {
case ${PKGTYPE} in
("deb")
echo "${PACKAGE_DEB}";;
("rpm")
echo "${PACKAGE_RPM}";;
(*)
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
esac
exit
}
verifyEnvSetup
case "$TARGET" in
(clean) clean ;;
(build) build; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac
echo "Operation complete"

View File

@@ -0,0 +1,243 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
printUsage() {
echo
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
echo
echo "Options:"
echo " -c, --clean Clean output and delete all intermediate work"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -p, --package <type> Specify packaging format"
echo " -r, --release Make a release build instead of a debug build"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -w, --wheel Creates python wheel package of omnitrace.
It needs to be used along with -r option"
echo " -h, --help Prints this help"
echo
echo "Possible values for <type>:"
echo " deb -> Debian format (default)"
echo " rpm -> RPM format"
echo
return 0
}
## Build environment variables
API_NAME="omnitrace"
PROJ_NAME="$API_NAME"
LIB_NAME="lib${API_NAME}"
TARGET="build"
MAKETARGET="deb"
PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_LIB="$(getLibPath)"
# PACKAGE_INCLUDE="$(getIncludePath)"
BUILD_DIR="$(getBuildPath $API_NAME)"
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
# PACKAGE_PREFIX="$ROCM_INSTALL_PATH"
BUILD_TYPE="Debug"
MAKE_OPTS="-j 8"
SHARED_LIBS="ON"
CLEAN_OR_OUT=0
MAKETARGET="deb"
PKGTYPE="deb"
ASAN=0
#parse the arguments
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,address_sanitizer,static,outdir:,package:,wheel -- "$@")
eval set -- "$VALID_STR"
while true; do
#echo "parocessing $1"
case "$1" in
-h | --help)
printUsage
exit 0
;;
-c | --clean)
TARGET="clean"
((CLEAN_OR_OUT |= 1))
shift
;;
-r | --release)
BUILD_TYPE="RelWithDebInfo"
shift
;;
-a | --address_sanitizer)
ack_and_ignore_asan
# set_asan_env_vars
# set_address_sanitizer_on
ASAN=1
shift
;;
-s | --static)
SHARED_LIBS="OFF"
shift
;;
-o | --outdir)
TARGET="outdir"
PKGTYPE=$2
# OUT_DIR_SPECIFIED=1
((CLEAN_OR_OUT |= 2))
shift 2
;;
-p | --package)
MAKETARGET="$2"
shift 2
;;
-w | --wheel)
WHEEL_PACKAGE=true
shift
;;
--)
shift
break
;; # end delimiter
*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] " >&2
exit 20
;;
esac
done
RET_CONFLICT=1
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
if [ $RET_CONFLICT -ge 30 ]; then
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
exit $RET_CONFLICT
fi
clean() {
echo "Cleaning $PROJ_NAME"
rm -rf "$BUILD_DIR"
rm -rf "$PACKAGE_DEB"
rm -rf "$PACKAGE_RPM"
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
}
build_omnitrace() {
echo "Building $PROJ_NAME"
if [ "$DISTRO_ID" = "mariner-2.0" ] || [ "$DISTRO_ID" = "ubuntu-24.04" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ]; then
echo "Skip make and uploading packages for Omnitrace on \"${DISTRO_ID}\" distro"
exit 0
fi
if [ $ASAN == 1 ]; then
echo "Skip make and uploading packages for Omnitrace on ASAN build"
exit 0
fi
if [ ! -d "$BUILD_DIR" ]; then
mkdir -p "$BUILD_DIR"
echo "Created build directory: $BUILD_DIR"
fi
cd $OMNITRACE_ROOT || exit
echo "Current submodule status"
git submodule status
echo "Cached (old) submodule status"
git submodule status --cached
cat .git/config
echo "Updating submodules"
git submodule init
# copy the new URL to your local config
git submodule sync --recursive
# force update the submodule from the new URL
git submodule update --init --recursive --force
echo "Updated submodule status"
git submodule status
cat .git/config
echo "Build directory: $BUILD_DIR"
pushd "$BUILD_DIR" || exit
print_lib_type $SHARED_LIBS
ELFUTIL_URL="https://compute-artifactory.amd.com/artifactory/rocm-generic-local/dev-tools/omnitrace/elfutils-0.188.tar.bz2"
BINUTIL_URL="https://compute-artifactory.amd.com/artifactory/rocm-generic-local/dev-tools/omnitrace/binutils-2.40.tar.gz"
echo "ROCm CMake Params: $(rocm_cmake_params)"
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
echo "ELFUTIL_URL=$ELFUTIL_URL, BINUTIL_URL=$BINUTIL_URL"
if [ $ASAN == 1 ]; then
echo "Address Sanitizer path"
# Commenting out the below cmake command as it is not working as expected
# LD_LIBRARY_PATH=$ROCM_INSTALL_PATH/lib/asan:$LD_LIBRARY_PATH
# cmake \
# $(rocm_cmake_params) \
# $(rocm_common_cmake_params) \
# -DOMNITRACE_BUILD_{LIBUNWIND,DYNINST}=ON \
# -DDYNINST_BUILD_{TBB,BOOST,ELFUTILS,LIBIBERTY}=ON \
# -DAMDDeviceLibs_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/AMDDeviceLibs" \
# -Dhip_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hip" \
# -Dhip-lang_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hip-lang" \
# -Damd_comgr_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/amd_comgr" \
# -Dhsa-runtime64_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hsa-runtime64" \
# -Dhsakmt_DIR="${ROCM_INSTALL_PATH}/lib/asan/cmake/hsakmt" \
# -DROCM_PATH="${ROCM_INSTALL_PATH}/lib/asan" \
# -Drocprofiler_ROOT_DIR="${ROCM_INSTALL_PATH}/lib/asan" \
# -DCMAKE_HIP_COMPILER_ROCM_ROOT="${ROCM_INSTALL_PATH}" \
# -DCMAKE_PREFIX_PATH="${ROCM_INSTALL_PATH};${ROCM_INSTALL_PATH}/lib/asan" \
# -DCMAKE_LIBRARY_PATH="${ROCM_INSTALL_PATH}/lib/asan" \
# -DCPACK_DEBIAN_PACKAGE_SHLIBDEPS=OFF \
# "$OMNITRACE_ROOT"
else
cmake \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
-DOMNITRACE_BUILD_{LIBUNWIND,DYNINST}=ON \
-DDYNINST_BUILD_{TBB,BOOST,ELFUTILS,LIBIBERTY}=ON \
-DElfUtils_DOWNLOAD_URL="$ELFUTIL_URL" \
-D{DYNINST,TIMEMORY}_BINUTILS_DOWNLOAD_URL="$BINUTIL_URL" \
"$OMNITRACE_ROOT"
fi
popd || exit
echo "Make Options: $MAKE_OPTS"
cmake --build "$BUILD_DIR" --target all -- $MAKE_OPTS
cmake --build "$BUILD_DIR" --target install -- $MAKE_OPTS
cmake --build "$BUILD_DIR" --target package -- $MAKE_OPTS
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
}
print_output_directory() {
case ${PKGTYPE} in
"deb")
echo "${PACKAGE_DEB}"
;;
"rpm")
echo "${PACKAGE_RPM}"
;;
*)
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2
exit 1
;;
esac
exit
}
verifyEnvSetup
case "$TARGET" in
clean) clean ;;
build) build_omnitrace; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
outdir) print_output_directory ;;
*) die "Invalid target $TARGET" ;;
esac
echo "Operation complete"

View File

@@ -0,0 +1,144 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
PROJ_NAME="OpenCL-ICD-Loader"
TARGET="build"
MAKEOPTS="$DASH_JAY"
BUILD_TYPE="Debug"
OPENCL_ICD_LOADER_BUILD_DIR="$(getBuildPath ${PROJ_NAME})"
PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_DEB="$PACKAGE_ROOT/deb/${PROJ_NAME,,}"
PACKAGE_RPM="$PACKAGE_ROOT/rpm/${PROJ_NAME,,}"
CLEAN_OR_OUT=0;
PKGTYPE="deb"
MAKETARGET="deb"
printUsage() {
echo
echo "Usage: $(basename "${BASH_SOURCE}") [options ...]"
echo
echo "Options:"
echo " -c, --clean Clean output and delete all intermediate work"
echo " -p, --package <type> Specify packaging format"
echo " -r, --release Make a release build instead of a debug build"
echo " -w, --wheel Creates python wheel package of opencl-icd-loader.
It needs to be used along with -r option"
echo " -h, --help Prints this help"
echo " -o, --outdir Print path of output directory containing packages"
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
echo
echo "Possible values for <type>:"
echo " deb -> Debian format (default)"
echo " rpm -> RPM format"
echo
return 0
}
RET_CONFLICT=1
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
if [ $RET_CONFLICT -ge 30 ]; then
print_vars $TARGET $BUILD_TYPE $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
exit $RET_CONFLICT
fi
clean_opencl_icd_loader() {
echo "Cleaning $PROJ_NAME"
rm -rf "$OPENCL_ICD_LOADER_BUILD_DIR"
rm -rf "$PACKAGE_DEB"
rm -rf "$PACKAGE_RPM"
rm -rf "$PACKAGE_ROOT/opencl-icd-loader"
}
build_opencl_icd_loader() {
echo "Building $PROJ_NAME"
if [ ! -e "$OPENCL_ICD_LOADER_ROOT/CMakeLists.txt" ]
then
echo "No $OPENCL_ICD_LOADER_ROOT/CMakeLists.txt file, skipping opencl icd loader" >&2
echo "No $OPENCL_ICD_LOADER_ROOT/CMakeLists.txt file, skipping opencl icd loader"
exit 0 # This is not an error
fi
mkdir -p "$OPENCL_ICD_LOADER_BUILD_DIR"
pushd "$OPENCL_ICD_LOADER_BUILD_DIR"
if [ ! -e Makefile ]; then
cmake \
-S "$OPENCL_ICD_LOADER_ROOT" \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
-DENABLE_OPENCL_LAYERS="OFF" \
-DOPENCL_ICD_LOADER_HEADERS_DIR="$OPENCL_HEADERS_ROOT"
fi
cmake --build . -- $MAKEOPTS
echo "Installing opencl-icd-loader"
cmake --build . -- $MAKEOPTS install
popd
}
package_opencl_icd_loader() {
echo "Packaging $PROJ_NAME"
pushd "$OPENCL_ICD_LOADER_BUILD_DIR"
cmake --build . -- package
mkdir -p $PACKAGE_DEB
mkdir -p $PACKAGE_RPM
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" *.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" *.rpm
popd
}
print_output_directory() {
case ${PKGTYPE} in
("deb")
echo ${PACKAGE_DEB};;
("rpm")
echo ${PACKAGE_RPM};;
(*)
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
esac
exit
}
#parse the arguments
VALID_STR=`getopt -o hcraswlo:p: --long help,clean,release,wheel,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
case "$1" in
(-c | --clean )
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
(-r | --release )
BUILD_TYPE="RelWithDebInfo" ; shift ;;
(-h | --help )
printUsage ; exit 0 ;;
(-a | --address_sanitizer)
ack_and_ignore_asan ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package)
MAKETARGET="$2" ; shift 2;;
(-s | --static)
echo "-s parameter accepted but ignored" ; shift ;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
done
case $TARGET in
(clean)
clean_opencl_icd_loader
;;
(build)
build_opencl_icd_loader
package_opencl_icd_loader
build_wheel "$OPENCL_ICD_LOADER_BUILD_DIR" "$PROJ_NAME"
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -8,11 +8,13 @@ printUsage() {
echo
echo "Options:"
echo " -h, --help Prints this help"
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
echo " -c, --clean Clean output and delete all intermediate work"
echo " -r, --release Make a release build instead of a debug build"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of type referred to by pkg_type"
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
echo " -w, --wheel Creates python wheel package of opencl.
It needs to be used along with -r option"
echo
echo "Possible values for <type>:"
echo " deb -> Debian format (default)"
@@ -23,9 +25,7 @@ printUsage() {
}
PROJ_NAME="opencl-on-rocclr"
MAKEOPTS="$DASH_JAY"
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
TARGET="build"
PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_DEB="$PACKAGE_ROOT/deb/$PROJ_NAME"
@@ -39,7 +39,8 @@ MAKETARGET="deb"
PKGTYPE="deb"
VALID_STR=`getopt -o hcraso: --long help,clean,release,static,address_sanitizer,outdir: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcrawso: --long help,clean,release,static,wheel,address_sanitizer,outdir: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -56,9 +57,11 @@ do
set_address_sanitizer_on ; shift ;;
(-s | --static)
ack_and_skip_static ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -73,6 +76,7 @@ if [ $RET_CONFLICT -ge 30 ]; then
fi
clean_opencl_on_rocclr() {
# Delete cmake output directory
rm -rf "$BUILD_PATH"
rm -rf "$PACKAGE_DEB"
rm -rf "$PACKAGE_RPM"
@@ -83,14 +87,19 @@ clean_opencl_on_rocclr() {
}
build_opencl_on_rocclr() {
# TODO This if condition is a temporary workaround so that mainline builds dont error out
# until build migrated from opencl to clr repo
if [ -e "$CLR_ROOT/CMakeLists.txt" ]; then
# We are in a branch that has migrated to clr repo
_OCL_CMAKELIST_DIR="$CLR_ROOT"
_OCL_CMAKELIST_OPT="-DCLR_BUILD_HIP=OFF -DCLR_BUILD_OCL=ON"
elif [ ! -e "$OPENCL_ON_ROCclr_ROOT/CMakeLists.txt" ]; then
# We seem to have hit a branch in which both the old and the new repo don't exist
echo "No $OPENCL_ON_ROCclr_ROOT/CMakeLists.txt file, skipping opencl on rocclr" >&2
echo "No $OPENCL_ON_ROCclr_ROOT/CMakeLists.txt file, skipping opencl on rocclr"
exit 0
exit 0 # This is not an error
else
# We are in a branch that has not yet migrated to clr repo yet
_OCL_CMAKELIST_DIR="$OPENCL_ON_ROCclr_ROOT"
_OCL_CMAKELIST_OPT=""
fi
@@ -99,10 +108,12 @@ build_opencl_on_rocclr() {
mkdir -p "$BUILD_PATH"
pushd "$BUILD_PATH"
# FIXME: Remove -DROCclr_DIR/LIBROCclr_STATIC_DIR
if [ ! -e Makefile ]; then
echo "Building OpenCL-On-ROCclr CMake environment"
cmake \
${GEN_NINJA} \
$(rocm_cmake_params) \
-DUSE_COMGR_LIBRARY=ON \
$(rocm_common_cmake_params) \
@@ -147,10 +158,20 @@ print_output_directory() {
}
case $TARGET in
(clean) clean_opencl_on_rocclr ;;
(build) build_opencl_on_rocclr ; package_opencl_on_rocclr ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
(clean)
clean_opencl_on_rocclr
;;
(build)
build_opencl_on_rocclr
package_opencl_on_rocclr
build_wheel "$BUILD_PATH" "$PROJ_NAME"
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -10,6 +10,8 @@ printUsage() {
echo " -c, --clean Clean output and delete all intermediate work"
echo " -p, --package <type> Specify packaging format"
echo " -r, --release Make a release build instead of a debug build"
echo " -w, --wheel Creates python wheel package of openmp-extras.
It needs to be used along with -r option"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
@@ -23,9 +25,8 @@ printUsage() {
return 0
}
PROJ_NAME="openmp-extras"
packageMajorVersion="18.63"
packageMajorVersion="19.65"
packageMinorVersion="0"
packageVersion="${packageMajorVersion}.${packageMinorVersion}.${ROCM_LIBPATCH_VERSION}"
BUILD_PATH="$(getBuildPath $PROJ_NAME)"
@@ -35,9 +36,12 @@ TARGET="build"
MAKEOPTS="$DASH_JAY"
STATIC_PKG_DEPS="OFF"
# Should only need to update this variable when moving
# to the new ROCM_INSTALL_PATH.
export INSTALL_PREFIX=${ROCM_INSTALL_PATH}
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,address_sanitizer,static,outdir,package: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,address_sanitizer,static,outdir,wheel:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -52,17 +56,23 @@ do
-a | --address_sanitizer )
set_asan_env_vars
set_address_sanitizer_on
# The path will be appended to cmake prefix path for asan builds
# Required for finding cmake config files for asan builds
export ROCM_CMAKECONFIG_PATH="$INSTALL_PREFIX/lib/asan/cmake"
export VERBOSE=1
# openmp debug build of ompd uses python build, which defaults to gcc
export LDSHARED="$INSTALL_PREFIX/lib/llvm/bin/clang -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fwrapv -O2"
# SANITIZER is used in openmp-debug build scripts so that the asan C/CXX Flags are not overwritten
export SANITIZER=1 ;;
-o | --outdir )
shift 1; PKGTYPE=$1 ; TARGET="outdir" ;;
-w | --wheel )
WHEEL_PACKAGE=true ;;
-s | --static )
export STATIC_PKG_DEPS="ON" ;;
-h | --help )
printUsage ; exit 0 ;;
--) shift; break;;
--) shift; break;; # end delimiter
*)
MAKEARG=$@ ; break ;;
esac
@@ -71,6 +81,7 @@ done
clean_openmp_extras() {
# Delete cmake output and install directory
rm -rf "$BUILD_PATH"
rm -rf "$INSTALL_PREFIX/openmp-extras"
}
@@ -93,6 +104,9 @@ build_openmp_extras() {
echo BUILD_PATH: $BUILD_PATH
echo "INSTALL_PREFIX:$INSTALL_PREFIX"
export AOMP_STANDALONE_BUILD=0
# FIXME: Check the build scripts to see if support for DEVEL package
# is on. This can be removed once the old packaging logic is gone. This
# may return a non-zero, so for now do not error out the script.
set +e
checkDevel=$(grep "ENABLE_DEVEL_PACKAGE=ON" $AOMP_REPOS/aomp/bin/build_openmp.sh)
set -e
@@ -104,6 +118,7 @@ build_openmp_extras() {
fi
export BUILD_AOMP=$BUILD_PATH
# EPSDB does not build hsa, we need to pick it up from ROCm install location.
if [ "$EPSDB" == "1" ]; then
export ROCM_DIR=$ROCM_INSTALL_PATH
else
@@ -130,10 +145,17 @@ build_openmp_extras() {
echo "--------------------------"
fi
export AOMP_JENKINS_BUILD_LIST="extras openmp pgmath flang flang_runtime"
# can we stop building flang-classic ?
if [ -e $AOMP_REPOS/aomp/bin/disableClassic ]; then
export AOMP_JENKINS_BUILD_LIST="extras openmp"
export AOMP_SKIP_FLANG=1
else
export AOMP_JENKINS_BUILD_LIST="extras openmp pgmath flang flang_runtime"
fi
echo "BEGIN Build of openmp-extras"
"$AOMP_REPOS"/aomp/bin/build_aomp.sh $MAKEARG
# Create symlinks for omp headers. Stage 2 components need these.
local llvm_ver=`$INSTALL_PREFIX/lib/llvm/bin/clang --print-resource-dir | sed 's^/llvm/lib/clang/^ ^' | awk '{print $2}'`
if [ ! -e $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp.h ] ; then
if [ ! -h $ROCM_INSTALL_PATH/lib/llvm/lib/clang/$llvm_ver/include/omp.h ] ; then
@@ -154,6 +176,7 @@ build_openmp_extras() {
}
package_openmp_extras_deb() {
# Debian packaging
local packageName=$1
local packageDeb="$packageDir/deb"
local packageArch="amd64"
@@ -184,14 +207,20 @@ package_openmp_extras_deb() {
debDependencies="$debDependencies, openmp-extras-runtime, hsa-rocr-static-dev"
fi
fi
# copyPath = /opt/rocm
# installPath = /opt/rocm/lib/llvm
# FIXME: openmp-extras/devel logic can be removed once the new packaging lands
# New packaging uses installed_files.txt the openmp-extras/devel
# directory will no longer exist.
if [ -f "$BUILD_PATH"/build/installed_files.txt ] && [ ! -d "$INSTALL_PREFIX"/openmp-extras/devel ]; then
if [ "$packageType" == "runtime" ]; then
# Cleanup previous packages
rm -rf "$packageDir"
rm -rf "$DEB_PATH"
mkdir -p "$DEB_PATH"
mkdir -p $packageDeb/openmp-extras
# Licensing
# Copy licenses into share/doc/openmp-extras
mkdir -p $packageDeb/openmp-extras$copyPath/share/doc/openmp-extras
cp -r $AOMP_REPOS/aomp/LICENSE $packageDeb/openmp-extras$copyPath/share/doc/openmp-extras/LICENSE.apache2
cp -r $AOMP_REPOS/aomp-extras/LICENSE $packageDeb/openmp-extras$copyPath/share/doc/openmp-extras/LICENSE.mit
@@ -199,20 +228,29 @@ package_openmp_extras_deb() {
else
rm -rf $packageDeb/openmp-extras/*
mkdir -p $packageDeb/openmp-extras$copyPath/bin
cp -r --parents "$installPath"/lib-debug/src $packageDeb/openmp-extras
if [ -d "$installPath"/lib-debug/src ]; then
cp -r --parents "$installPath"/lib-debug/src $packageDeb/openmp-extras
else
cp -r --parents "$ompdSrcDir" $packageDeb/openmp-extras
fi
fi
else
# FIXME: Old packaging method, can delete once new packaging lands.
if [ "$packageType" == "runtime" ]; then
# Cleanup previous packages
rm -rf "$packageDir"
rm -rf "$DEB_PATH"
mkdir -p "$DEB_PATH"
mkdir -p $packageDeb/openmp-extras$installPath
mkdir -p $packageDeb/openmp-extras$installPath/lib/clang/$llvm_ver/include
# Licensing
# Copy licenses into share/doc/openmp-extras
mkdir -p $packageDeb/openmp-extras$copyPath/share/doc/openmp-extras
cp -r $AOMP_REPOS/aomp/LICENSE $packageDeb/openmp-extras$copyPath/share/doc/openmp-extras/LICENSE.apache2
cp -r $AOMP_REPOS/aomp-extras/LICENSE $packageDeb/openmp-extras$copyPath/share/doc/openmp-extras/LICENSE.mit
cp -r $AOMP_REPOS/flang/LICENSE.txt $packageDeb/openmp-extras$copyPath/share/doc/openmp-extras/LICENSE.flang
else
# Clean packageDeb for devel build
rm -rf $packageDeb/openmp-extras$installPath/*
rm -rf $packageDeb/openmp-extras/bin
rm -rf $packageDeb/openmp-extras$copyPath/share
@@ -224,10 +262,13 @@ package_openmp_extras_deb() {
mkdir -p "$(dirname $controlFile)"
# Copy openmp-extras files, bin will turn into llvm/bin
if [ -f "$BUILD_PATH"/build/installed_files.txt ] && [ ! -d "$INSTALL_PREFIX"/openmp-extras/devel ]; then
if [ "$packageType" == "runtime" ]; then
cat "$BUILD_PATH"/build/installed_files.txt | grep -P '\.so|\.a' | cut -d":" -f2 | cut -d" " -f2 | xargs -I {} cp -d --parents {} "$packageDeb"/openmp-extras
# libgomp and libiomp5 are not on the install_manifest.txt and need
# to be manually copied. Waiting on trunk patch to flow into amd-staging
# to ensure these symlinks are in the manifest.
cp -d --parents "$installPath/lib/libgomp.so" "$packageDeb"/openmp-extras
cp -d --parents "$installPath/lib/libiomp5.so" "$packageDeb"/openmp-extras
cp -d --parents "$installPath/lib-debug/libgomp.so" "$packageDeb"/openmp-extras
@@ -236,9 +277,11 @@ package_openmp_extras_deb() {
cat "$BUILD_PATH"/build/installed_files.txt | grep -Pv '\.so|\.a' | cut -d":" -f2 | cut -d" " -f2 | xargs -I {} cp -d --parents {} "$packageDeb"/openmp-extras
fi
else
# FIXME: Old packaging method, can delete once new packaging lands.
cp -r "$AOMP"/"$packageType"/* "$packageDeb"/openmp-extras"$installPath"
fi
# Copy examples
if [ "$packageType" == "devel" ]; then
mkdir -p "$packageDeb"/openmp-extras"$copyPath"/share/openmp-extras/examples
echo cp -r "$AOMP_REPOS"/aomp/examples/fortran "$packageDeb"/openmp-extras"$copyPath"/share/openmp-extras/examples
@@ -252,9 +295,11 @@ package_openmp_extras_deb() {
fi
if [ "$packageType" == "devel" ]; then
# Create symbolic links for openmp header files
ln -s ../../../../include/omp.h $packageDeb/openmp-extras$installPath/lib/clang/$llvm_ver/include/omp.h
ln -s ../../../../include/ompt.h $packageDeb/openmp-extras$installPath/lib/clang/$llvm_ver/include/ompt.h
ln -s ../../../../include/omp-tools.h $packageDeb/openmp-extras$installPath/lib/clang/$llvm_ver/include/omp-tools.h
# Only create symlinks if file exists
if [ ! -h "$packageDeb"/openmp-extras"$copyPath"/bin/aompcc ] && [ -e "$packageDeb"/openmp-extras"$installPath"/bin/aompcc ]; then
ln -s ../lib/llvm/bin/aompcc "$packageDeb"/openmp-extras"$copyPath"/bin/aompcc
fi
@@ -266,12 +311,14 @@ package_openmp_extras_deb() {
fi
fi
# Inspect
ls -l "$packageDeb"/openmp-extras"$installPath"
if [ "$packageType" == "devel" ]; then
ls -l "$packageDeb"/openmp-extras"$installPath"/bin
ls -l "$packageDeb"/openmp-extras"$copyPath"/bin
fi
# Create control file
{
echo "Package: $packageName"
echo "Architecture: $packageArch"
@@ -293,7 +340,9 @@ package_openmp_extras_deb() {
"$DEB_PATH/${packageName}_${packageVersion}-${CPACK_DEBIAN_PACKAGE_RELEASE}_${packageArch}.deb"
}
# ASAN debian package
package_openmp_extras_asan_deb() {
# Debian packaging
local packageName=$1
local packageDeb="$packageDir/deb"
local packageArch="amd64"
@@ -308,6 +357,8 @@ package_openmp_extras_asan_deb() {
rm -rf "$packageDir"
rm -rf "$DEB_PATH"
mkdir -p "$DEB_PATH"
# Licensing
# Copy licenses into share/doc/openmp-extras-asan
local licenseDir="$packageDeb/openmp-extras$copyPath/share/doc/openmp-extras-asan"
mkdir -p $licenseDir
cp -r $AOMP_REPOS/aomp/LICENSE $licenseDir/LICENSE.apache2
@@ -317,15 +368,18 @@ package_openmp_extras_asan_deb() {
mkdir -p "$(dirname $controlFile)"
if [ -f "$BUILD_PATH"/build/installed_files.txt ] && [ ! -d "$INSTALL_PREFIX"/openmp-extras ]; then
cat "$BUILD_PATH"/build/installed_files.txt | grep -P 'asan' | cut -d":" -f2 | cut -d" " -f2 | xargs -I {} cp -d --parents {} "$packageDeb"/openmp-extras
# libgomp and libiomp5 are not on the install_manifest.txt and need
# to be manually copied. Waiting on trunk patch to flow into amd-staging
# to ensure these symlinks are in the manifest.
cp -d --parents "$installPath/lib/asan/libgomp.so" "$packageDeb"/openmp-extras
cp -d --parents "$installPath/lib/asan/libiomp5.so" "$packageDeb"/openmp-extras
cp -d --parents "$installPath/lib-debug/asan/libgomp.so" "$packageDeb"/openmp-extras
cp -d --parents "$installPath/lib-debug/asan/libiomp5.so" "$packageDeb"/openmp-extras
# FIXME: Old packaging method, can delete once new packaging lands.
else
mkdir -p $packageDeb/openmp-extras$installPath/lib/asan
mkdir -p $packageDeb/openmp-extras$installPath/lib-debug/asan
# runtime folder have asan libraries. Copy to asan folder for packaging
cp -r "$AOMP"/lib/asan/* "$packageDeb"/openmp-extras"$installPath"/lib/asan/
cp -r "$AOMP"/lib-debug/asan/* "$packageDeb"/openmp-extras"$installPath"/lib-debug/asan/
cp -r "$AOMP"/"$asanLibDir"/lib/asan/* "$packageDeb"/openmp-extras"$installPath"/lib/asan/
@@ -334,6 +388,7 @@ package_openmp_extras_asan_deb() {
cp -r "$AOMP"/devel/lib-debug/asan/* "$packageDeb"/openmp-extras"$installPath"/lib-debug/asan/
fi
# Create control file
{
echo "Package: $packageName"
echo "Architecture: $packageArch"
@@ -352,6 +407,7 @@ package_openmp_extras_asan_deb() {
package_openmp_extras_rpm() {
# RPM packaging
local packageName=$1
local packageRpm="$packageDir/rpm"
local specFile="$packageDir/$packageName.spec"
@@ -376,6 +432,7 @@ package_openmp_extras_rpm() {
fi
fi
# Cleanup previous packages
if [ "$packageType" == "runtime" ]; then
rm -rf "$packageDir"
rm -rf "$RPM_PATH"
@@ -386,6 +443,8 @@ package_openmp_extras_rpm() {
mkdir -p "$(dirname $specFile)"
{
# FIXME: Remove all conditions for empty packageType when
# devel and runtime packaging changes land in mainline.
echo "%define is_runtime %( if [ $packageType == runtime ]; then echo "1" ; else echo "0"; fi )"
echo "%define is_devel %( if [ $packageType == devel ]; then echo "1" ; else echo "0"; fi )"
@@ -403,6 +462,7 @@ package_openmp_extras_rpm() {
echo "Obsoletes: $rpmObsoletes"
echo "%endif"
echo "%define debug_package %{nil}"
# Redefining __os_install_post to remove stripping
echo "%define __os_install_post %{nil}"
echo "%description"
echo "$packageSummaryLong"
@@ -419,6 +479,7 @@ package_openmp_extras_rpm() {
echo " mkdir -p \$RPM_BUILD_ROOT$copyPath/bin"
echo " mkdir -p \$RPM_BUILD_ROOT$installPath/lib/clang/$llvm_ver/include"
echo " %endif"
# FIXME: Old packaging method, can delete once new packaging lands.
echo "else"
echo " %if %is_runtime"
echo " mkdir -p \$RPM_BUILD_ROOT$installPath"
@@ -430,10 +491,13 @@ package_openmp_extras_rpm() {
echo " %endif"
echo "fi"
# Copy openmp-extras files, bin will turn into llvm/bin
echo "if [ -f $BUILD_PATH/build/installed_files.txt ] && [ ! -d $INSTALL_PREFIX/openmp-extras/devel ]; then"
echo " %if %is_runtime"
echo " cat $BUILD_PATH/build/installed_files.txt | grep -P '\.so|\.a' | cut -d':' -f2 | cut -d' ' -f2 | xargs -I {} cp -d --parents {} \$RPM_BUILD_ROOT"
# libgomp and libiomp5 are not on the install_manifest.txt and need
# to be manually copied. Waiting on trunk patch to flow into amd-staging
# to ensure these symlinks are in the manifest.
echo " cp -d --parents "$installPath/lib/libgomp.so" \$RPM_BUILD_ROOT"
echo " cp -d --parents "$installPath/lib/libiomp5.so" \$RPM_BUILD_ROOT"
echo " cp -d --parents "$installPath/lib-debug/libgomp.so" \$RPM_BUILD_ROOT"
@@ -442,14 +506,17 @@ package_openmp_extras_rpm() {
echo "%if %is_devel"
echo " cat "$BUILD_PATH"/build/installed_files.txt | grep -Pv '\.so|\.a' | cut -d':' -f2 | cut -d' ' -f2 | xargs -I {} cp -d --parents {} \$RPM_BUILD_ROOT"
echo "%endif"
# FIXME: Old packaging method, can delete once new packaging lands.
echo "else"
echo " cp -r $AOMP/$packageType/* \$RPM_BUILD_ROOT$installPath"
# Devel does not have examples in the llvm dir
echo " %if %is_devel"
echo " rm -rf \$RPM_BUILD_ROOT$installPath/share"
echo " %endif"
echo "fi"
# Create symbolic links from /opt/rocm/bin to /opt/rocm/lib/llvm/bin utilities
#echo "ls \$RPM_BUILD_ROOT$installPath"
echo "%if %is_devel"
echo " if [ ! -h \$RPM_BUILD_ROOT$copyPath/bin/aompcc ] && [ -e \$RPM_BUILD_ROOT$installPath/bin/aompcc ]; then"
echo " ln -s ../lib/llvm/bin/aompcc \$RPM_BUILD_ROOT$copyPath/bin/aompcc"
@@ -462,18 +529,23 @@ package_openmp_extras_rpm() {
echo " fi"
echo " ls \$RPM_BUILD_ROOT$copyPath"
# Create symbolic links for openmp header files
echo " ln -s ../../../../include/omp.h \$RPM_BUILD_ROOT/$installPath/lib/clang/$llvm_ver/include/omp.h"
echo " ln -s ../../../../include/ompt.h \$RPM_BUILD_ROOT/$installPath/lib/clang/$llvm_ver/include/ompt.h"
echo " ln -s ../../../../include/omp-tools.h \$RPM_BUILD_ROOT/$installPath/lib/clang/$llvm_ver/include/omp-tools.h"
echo "%endif"
echo 'find $RPM_BUILD_ROOT \! -type d | sed "s|$RPM_BUILD_ROOT||"> files.list'
# Copy examples
echo "%if %is_runtime"
# Licensing
# Copy licenses into share/doc/openmp-extras
echo " mkdir -p \$RPM_BUILD_ROOT$copyPath/share/doc/openmp-extras"
echo " cp -r $AOMP_REPOS/aomp/LICENSE \$RPM_BUILD_ROOT$copyPath/share/doc/openmp-extras/LICENSE.apache2"
echo " cp -r $AOMP_REPOS/aomp-extras/LICENSE \$RPM_BUILD_ROOT$copyPath/share/doc/openmp-extras/LICENSE.mit"
echo " cp -r $AOMP_REPOS/flang/LICENSE.txt \$RPM_BUILD_ROOT$copyPath/share/doc/openmp-extras/LICENSE.flang"
echo "%else"
# Copy devel examples to share/openmp-extras/examples
echo " mkdir -p \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples"
echo " cp -r $AOMP_REPOS/aomp/examples/fortran \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples"
echo " cp -r $AOMP_REPOS/aomp/examples/openmp \$RPM_BUILD_ROOT$copyPath/share/openmp-extras/examples"
@@ -493,6 +565,12 @@ package_openmp_extras_rpm() {
echo " $copyPath/share/openmp-extras"
echo "%endif"
echo "%defattr(-,root,root,-)"
# Note: In some OS like SLES, during upgrade rocm-core is getting upgraded first and followed by other packages
# rocm-core cannot delete rocm-ver folder, since it contains files of other packages that are yet to be upgraded
# To remove rocm-ver folder after upgrade the spec file of other packages should contain the rocm-ver directory
# Otherwise after upgrade empty old rocm-ver folder will be left out.
# If empty remove /opt/rocm-ver folder and its subdirectories created by
# openmp-extras runtime and devel package
echo "%if %is_runtime || %is_devel"
echo " $copyPath"
echo "%endif"
@@ -502,15 +580,19 @@ package_openmp_extras_rpm() {
mv $packageRpm/RPMS/x86_64/*.rpm $RPM_PATH
}
# ASAN RPM packaging
package_openmp_extras_asan_rpm() {
# RPM packaging
local packageName=$1
local packageRpm="$packageDir/rpm"
local specFile="$packageDir/$packageName.spec"
local packageSummary="AddressSanitizer OpenMP Extras provides instrumented openmp and flang libraries."
local packageSummaryLong="openmp-extras $packageVersion is based on LLVM 17 and is used for offloading to Radeon GPUs."
local rpmRequires="hsa-rocr-asan, rocm-core-asan"
# After build,runtime folder will have ASAN libaries.
local asanLibDir="runtime"
# Cleanup previous packages
rm -rf "$packageDir"
rm -rf "$RPM_PATH"
mkdir -p "$RPM_PATH"
@@ -527,6 +609,7 @@ package_openmp_extras_asan_rpm() {
echo "License: MIT and ASL 2.0 and ASL 2.0 with exceptions"
echo "Vendor: Advanced Micro Devices, Inc."
echo "Requires: $rpmRequires"
# Redefining __os_install_post to prevent binary stripping
echo "%define __os_install_post %{nil}"
echo "%description"
echo "%undefine _debugsource_packages"
@@ -539,11 +622,16 @@ package_openmp_extras_asan_rpm() {
echo "%install"
echo "if [ -f $BUILD_PATH/build/installed_files.txt ] && [ ! -d "$INSTALL_PREFIX"/openmp-extras ]; then"
echo " cat $BUILD_PATH/build/installed_files.txt | grep -P 'asan' | cut -d':' -f2 | cut -d' ' -f2 | xargs -I {} cp -d --parents {} \$RPM_BUILD_ROOT"
# libgomp and libiomp5 are not on the install_manifest.txt and need
# to be manually copied. Waiting on trunk patch to flow into amd-staging
# to ensure these symlinks are in the manifest.
echo " cp -d --parents "$installPath/lib/asan/libgomp.so" \$RPM_BUILD_ROOT"
echo " cp -d --parents "$installPath/lib/asan/libiomp5.so" \$RPM_BUILD_ROOT"
echo " cp -d --parents "$installPath/lib-debug/asan/libgomp.so" \$RPM_BUILD_ROOT"
echo " cp -d --parents "$installPath/lib-debug/asan/libiomp5.so" \$RPM_BUILD_ROOT"
# FIXME: Old packaging method, can delete once new packaging lands.
echo "else"
# Copy openmp-extras ASAN libraries to ASAN folders
echo " mkdir -p \$RPM_BUILD_ROOT$installPath/lib/asan"
echo " mkdir -p \$RPM_BUILD_ROOT$installPath/lib-debug/asan"
echo " cp -r $AOMP/lib/asan/* \$RPM_BUILD_ROOT$installPath/lib/asan"
@@ -554,8 +642,11 @@ package_openmp_extras_asan_rpm() {
echo " cp -r $AOMP/devel/lib-debug/asan/* \$RPM_BUILD_ROOT$installPath/lib-debug/asan"
echo "fi"
# Create symbolic links from /opt/rocm/bin to /opt/rocm/lib/llvm/bin utilities
echo 'find $RPM_BUILD_ROOT \! -type d | sed "s|$RPM_BUILD_ROOT||"> files.list'
# Licensing
# Copy licenses into share/doc/openmp-extras
echo " mkdir -p \$RPM_BUILD_ROOT$copyPath/share/doc/openmp-extras-asan"
echo " cp -r $AOMP_REPOS/aomp/LICENSE \$RPM_BUILD_ROOT$copyPath/share/doc/openmp-extras-asan/LICENSE.apache2"
echo " cp -r $AOMP_REPOS/aomp-extras/LICENSE \$RPM_BUILD_ROOT$copyPath/share/doc/openmp-extras-asan/LICENSE.mit"
@@ -566,6 +657,12 @@ package_openmp_extras_asan_rpm() {
echo "%files -f files.list"
echo " $copyPath/share/doc/openmp-extras-asan"
echo "%defattr(-,root,root,-)"
# Note: In some OS like SLES, during upgrade rocm-core is getting upgraded first and followed by other packages
# rocm-core cannot delete rocm-ver folder, since it contains files of other packages that are yet to be upgraded
# To remove rocm-ver folder after upgrade the spec file of other packages should contain the rocm-ver directory
# Otherwise after upgrade empty old rocm-ver folder will be left out.
# If empty remove /opt/rocm-ver folder and its subdirectories created by
# openmp-extras runtime and devel package
echo " $copyPath"
} > $specFile
@@ -581,21 +678,29 @@ package_openmp_extras() {
local llvm_ver=`$INSTALL_PREFIX/lib/llvm/bin/clang --print-resource-dir | sed 's^/llvm/lib/clang/^ ^' | awk '{print $2}'`
local debNames="openmp-extras-runtime openmp-extras-dev"
local rpmNames="openmp-extras-runtime openmp-extras-devel"
ompdSrcDir="$INSTALL_PREFIX/lib/llvm/share/gdb/python/ompd/src"
if [ "$SANITIZER" == "1" ]; then
local asanPkgName="openmp-extras-asan"
if [[ $DISTRO_NAME =~ "Ubuntu" ]]; then
if [[ $DISTRO_NAME =~ "Ubuntu" ]] || [[ $DISTRO_NAME =~ "Debian" ]]; then
echo "Warning: Assuming DEBs"
package_openmp_extras_asan_deb $asanPkgName
else
echo "Warning: Assuming RPMs"
package_openmp_extras_asan_rpm $asanPkgName
fi
# For ASAN build, create only ASAN package
# Devel and runtime pkg should be created from non-asan build. So return the execution
return 0
fi
if [[ $DISTRO_NAME =~ "Ubuntu" ]]; then
# Only build deb in Ubuntu environment
if [[ $DISTRO_NAME =~ "Ubuntu" ]] || [[ $DISTRO_NAME =~ "Debian" ]]; then
echo "Warning: Assuming DEBs"
for name in $debNames; do
package_openmp_extras_deb $name
done
# Only build RPM in CENTOS/SLES environment
else
echo "Warning: Assuming RPMs"
for name in $rpmNames; do
package_openmp_extras_rpm $name
done
@@ -603,6 +708,7 @@ package_openmp_extras() {
}
package_tests_deb(){
# Openmp-extras debian test packaging
local packageDir="$BUILD_PATH/package"
local packageDeb="$packageDir/deb"
local packageArch="amd64"
@@ -615,6 +721,7 @@ package_tests_deb(){
local installPath="$ROCM_INSTALL_PATH/share/openmp-extras/tests"
local packageName="openmp-extras-tests"
# Cleanup previous packages
rm -rf "$packageDir"
mkdir -p $packageDeb/openmp-extras"$installPath"
@@ -622,9 +729,15 @@ package_tests_deb(){
rm $(dirname $controlFile)
fi
mkdir -p "$(dirname $controlFile)"
# Copy openmp-extras files
cp -r "$AOMP_REPOS/aomp/." "$packageDeb/openmp-extras/$installPath"
rm -rf "$packageDeb"/openmp-extras"$installPath"/.git "$packageDeb"/openmp-extras"$installPath"/.github
cp "$OUT_DIR/build/lightning/bin/FileCheck" "$packageDeb/openmp-extras/$installPath/bin"
# Copy FileCheck
if [ -f "$OUT_DIR"/build/lightning/bin/FileCheck ]; then
cp "$OUT_DIR/build/lightning/bin/FileCheck" "$packageDeb/openmp-extras/$installPath/bin"
fi
# Create control file
{
echo "Package: $packageName"
echo "Architecture: $packageArch"
@@ -642,16 +755,19 @@ package_tests_deb(){
}
package_tests_rpm(){
# RPM packaging
AOMP_STANDALONE_BUILD=1 $AOMP_REPOS/aomp/bin/build_fixups.sh
local copyPath="$ROCM_INSTALL_PATH"
local packageDir="$BUILD_PATH/package"
local packageRpm="$packageDir/rpm"
local installPath="$ROCM_INSTALL_PATH/share/openmp-extras/tests"
local packageName="openmp-extras-tests"
local rpmRequires="openmp-extras-devel, rocm-core"
local specFile="$packageDir/$packageName.spec"
local packageSummary="Tests for openmp-extras."
local packageSummaryLong="Tests for openmp-extras $packageVersion is based on LLVM 18 and is used for offloading to Radeon GPUs."
# Cleanup previous packages
rm -rf "$packageDir"
mkdir -p "$packageRpm/openmp-extras/$installPath"
{
@@ -679,7 +795,9 @@ package_tests_rpm(){
echo "mkdir -p \$RPM_BUILD_ROOT$installPath"
echo "cp -R $AOMP_REPOS/aomp/. \$RPM_BUILD_ROOT$installPath"
echo "rm -rf \$RPM_BUILD_ROOT$installPath/.git \$RPM_BUILD_ROOT$installPath/.github"
echo "cp $OUT_DIR/build/lightning/bin/FileCheck \$RPM_BUILD_ROOT$installPath/bin"
echo "if [ -f $OUT_DIR/build/lightning/bin/FileCheck ]; then"
echo " cp $OUT_DIR/build/lightning/bin/FileCheck \$RPM_BUILD_ROOT$installPath/bin"
echo "fi"
echo 'find $RPM_BUILD_ROOT \! -type d | sed "s|$RPM_BUILD_ROOT||"> files.list'
echo "%clean"
@@ -698,8 +816,10 @@ package_tests_rpm(){
package_tests() {
local DISTRO_NAME=$(cat /etc/os-release | grep -e ^NAME=)
# Only build deb in Ubuntu environment
if [[ $DISTRO_NAME =~ "Ubuntu" ]]; then
package_tests_deb
# Only build RPM in CENTOS/SLES environment
else
package_tests_rpm
fi
@@ -718,10 +838,21 @@ print_output_directory() {
}
case $TARGET in
(clean) clean_openmp_extras ;;
(build) build_openmp_extras; package_openmp_extras; package_tests ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
(clean)
clean_openmp_extras
;;
(build)
build_openmp_extras
package_openmp_extras
package_tests
build_wheel "$BUILD_PATH" "$PROJ_NAME"
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -1,12 +1,10 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src rccl
ENABLE_ADDRESS_SANITIZER=false
build_rccl() {
echo "Start build"
@@ -24,19 +22,12 @@ build_rccl() {
mkdir -p $BUILD_DIR && cd $BUILD_DIR
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
init_rocm_common_cmake_params
CC=${ROCM_PATH}/bin/amdclang \
CXX=$(set_build_variables CXX) \
CXX=$(set_build_variables __CXX__) \
cmake \
"${rocm_math_common_cmake_params[@]}" \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
-DHIP_COMPILER=clang \
-DCMAKE_PREFIX_PATH="${ROCM_PATH};${ROCM_PATH}/share/rocm/cmake/" \
${LAUNCHER_FLAGS} \
@@ -50,7 +41,7 @@ build_rccl() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -64,7 +55,7 @@ clean_rccl() {
stage2_command_args "$@"
case $TARGET in
build) build_rccl ;;
build) build_rccl; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rccl ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,121 +1,45 @@
#!/bin/bash
source "$(dirname "$BASH_SOURCE")/compute_utils.sh"
set -ex
printUsage() {
echo
echo "Usage: $(basename $0) [-c|-r|-h] [makeopts]"
echo
echo "Options:"
echo " -c, --clean Removes all rdc build artifacts, except grpc"
echo " -g, --clean_grpc Removes the grpc files and artifacts"
echo " -r, --release Build release version of RDC (default is debug)"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
echo " -h, --help Prints this help"
echo
return 0
}
### Set up RPATH for RDC to simplify finding RDC libraries. Please note that
### ROCm has a setup_env.sh script which already populates these variables.
### These variables are then used inside compute_utils.sh.
###
### We need to append additional paths to these variables BEFORE sourcing
### compute_utils.sh
PACKAGE_ROOT="$(getPackageRoot)"
RDC_BUILD_DIR=$(getBuildPath rdc)
GRPC_BUILD_DIR=$(getBuildPath grpc)
TARGET="build"
PACKAGE_LIB=$(getLibPath)
PACKAGE_INCLUDE="$(getIncludePath)"
PACKAGE_BIN="$(getBinPath)"
RDC_PACKAGE_DEB_DIR="$PACKAGE_ROOT/deb/rdc"
RDC_PACKAGE_RPM_DIR="$PACKAGE_ROOT/rpm/rdc"
BUILD_TYPE="Debug"
MAKETARGET="deb"
MAKEARG="$DASH_JAY O=$RDC_BUILD_DIR"
SHARED_LIBS="ON"
CLEAN_OR_OUT=0;
CLEAN_GRPC="no"
PKGTYPE="deb"
RDC_MAKE_OPTS="$DASH_JAY O=$RDC_BUILD_DIR -C $RDC_BUILD_DIR"
BUILD_DOCS="no"
RDC_PKG_NAME_ROOT="rdc"
RDC_PKG_NAME="${RDC_PKG_NAME_ROOT}"
GRPC_PROTOC_ROOT="${RDC_BUILD_DIR}/grpc"
GRPC_SEARCH_ROOT="/usr/grpc"
GRPC_DESIRED_VERSION="1.61.0"
RDC_LIB_RPATH='$ORIGIN'
RDC_LIB_RPATH=$RDC_LIB_RPATH:'$ORIGIN/..'
RDC_LIB_RPATH=$RDC_LIB_RPATH:'$ORIGIN/rdc/grpc/lib'
RDC_LIB_RPATH=$RDC_LIB_RPATH:'$ORIGIN/grpc/lib'
RDC_EXE_RPATH='$ORIGIN/../lib'
RDC_EXE_RPATH=$RDC_EXE_RPATH:'$ORIGIN/../lib/rdc/grpc/lib'
VALID_STR=`getopt -o hcgradso:p: --long help,clean,clean_grpc,release,documentation,static,address_sanitizer,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
case "$1" in
(-h | --help)
printUsage ; exit 0;;
(-c | --clean)
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
(-g | --clean_grpc)
TARGET="clean_grpc" ; shift ;;
(-r | --release)
BUILD_TYPE="Release" ; shift ;;
(-a | --address_sanitizer)
set_asan_env_vars
set_address_sanitizer_on ; shift ;;
(-d | --documentation )
BUILD_DOCS="yes" ;;
(-s | --static)
ack_and_skip_static ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package)
MAKETARGET="$2" ; shift 2;;
--) shift; break;;
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
done
RET_CONFLICT=1
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
if [ $RET_CONFLICT -ge 30 ]; then
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
exit $RET_CONFLICT
# lib/rdc/librdc_rocp.so needs lib/librdc_bootstrap.so
# this also covers the ASAN usecase
ROCM_LIB_RPATH=$ROCM_LIB_RPATH:'$ORIGIN/..'
# grpc
ROCM_LIB_RPATH=$ROCM_LIB_RPATH:'$ORIGIN/rdc/grpc/lib'
ROCM_LIB_RPATH=$ROCM_LIB_RPATH:'$ORIGIN/grpc/lib'
# help RDC executables find RDC libraries
# lib/librdc_bootstrap.so.0 and grpc
ROCM_EXE_RPATH=$ROCM_EXE_RPATH:'$ORIGIN/../lib/rdc/grpc/lib'
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
ROCM_EXE_RPATH="$ROCM_ASAN_EXE_RPATH:$ROCM_EXE_RPATH"
fi
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
clean_rdc() {
rm -rf "$RDC_BUILD_DIR"
rm -rf "$RDC_PACKAGE_DEB_DIR"
rm -rf "$RDC_PACKAGE_RPM_DIR"
rm -rf "$RDC_BUILD_DIR/rdc"
set_component_src rdc
rm -rf "$PACKAGE_INCLUDE/rdc"
rm -f $PACKAGE_LIB/librdc*
rm -f $PACKAGE_BIN/rdci
return 0
}
clean_grpc() {
rm -rf "$GRPC_BUILD_DIR"
}
# RDC
# BUILD ARGUMENTS
BUILD_DOCS="no"
GRPC_PROTOC_ROOT="${BUILD_DIR}/grpc"
GRPC_SEARCH_ROOT="/usr/grpc"
GRPC_DESIRED_VERSION="1.67.1" # do not include 'v'
# lib/librocm_smi64.so and lib/libamd_smi.so
# check if exact version of gRPC is installed
find_grpc() {
grep -s -F "$GRPC_DESIRED_VERSION" ${GRPC_SEARCH_ROOT}/*/cmake/grpc/gRPCConfigVersion.cmake &&
GRPC_PROTOC_ROOT=$GRPC_SEARCH_ROOT
}
rdc_backwards_compat_cmake_params() {
grep -q "RDC_CLIENT_INSTALL_PREFIX" "$RDC_ROOT/CMakeLists.txt" &&
echo "-DRDC_CLIENT_INSTALL_PREFIX=$PACKAGE_ROOT"
}
build_rdc() {
if ! find_grpc; then
echo "ERROR: GRPC SEARCH FAILED!"
@@ -125,81 +49,96 @@ build_rdc() {
fi
echo "gRPC [${GRPC_DESIRED_VERSION}] found!"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
CXX=$(set_build_variables __C_++__)
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
# NOTE: Temp fix for ASAN failures SWDEV-515858
export ASAN_OPTIONS="detect_leaks=0:new_delete_type_mismatch=0"
fi
echo "Building RDC"
echo "RDC_BUILD_DIR: ${RDC_BUILD_DIR}"
echo "GRPC_PROTOC_ROOT: ${GRPC_PROTOC_ROOT}"
export LD_PRELOAD="$ASAN_LIB_PATH"
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
# NOTE: Temp workaround for libasan not being first in the library list.
# libasan not being first causes ADDRESS_SANITIZER builds to fail.
# This value is set by set_asan_env_vars. Which is only called when -a arg is passed.
export LD_PRELOAD="$ASAN_LIB_PATH"
fi
if [ ! -d "$RDC_BUILD_DIR/rdc_libs" ]; then
mkdir -p $RDC_BUILD_DIR
pushd $RDC_BUILD_DIR
echo "C compiler: $CC"
echo "CXX compiler: $CXX"
init_rocm_common_cmake_params
if [ ! -d "$BUILD_DIR/rdc_libs" ]; then
mkdir -p "$BUILD_DIR"
pushd "$BUILD_DIR"
cmake \
-DGRPC_ROOT="$GRPC_PROTOC_ROOT" \
-DGRPC_DESIRED_VERSION="$GRPC_DESIRED_VERSION" \
-DCMAKE_MODULE_PATH="$RDC_ROOT/cmake_modules" \
$(rocm_cmake_params) \
$(rdc_backwards_compat_cmake_params) \
$(rocm_common_cmake_params) \
-DROCM_DIR=$ROCM_INSTALL_PATH \
-DRDC_PACKAGE="${RDC_PKG_NAME}" \
-DCMAKE_MODULE_PATH="$COMPONENT_SRC/cmake_modules" \
"${rocm_math_common_cmake_params[@]}" \
-DCPACK_GENERATOR="${PKGTYPE^^}" \
-DROCM_DIR=$ROCM_PATH \
-DCPACK_PACKAGE_VERSION_MAJOR="1" \
-DCPACK_PACKAGE_VERSION_MINOR="$ROCM_LIBPATCH_VERSION" \
-DCPACK_PACKAGE_VERSION_PATCH="0" \
-DADDRESS_SANITIZER="$ADDRESS_SANITIZER" \
-DBUILD_TESTS=ON \
-DBUILD_PROFILER=ON \
-DBUILD_RVS=ON \
-DCMAKE_SKIP_BUILD_RPATH=TRUE \
-DCMAKE_EXE_LINKER_FLAGS_INIT="-Wl,--no-as-needed,-z,origin,--enable-new-dtags,--build-id=sha1,--rpath,$RDC_EXE_RPATH" \
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--no-as-needed,-z,origin,--enable-new-dtags,--build-id=sha1,--rpath,$RDC_LIB_RPATH" \
"$RDC_ROOT"
"$COMPONENT_SRC"
popd
fi
echo "Making rdc package:"
cmake --build "$RDC_BUILD_DIR" -- $RDC_MAKE_OPTS
cmake --build "$RDC_BUILD_DIR" -- $RDC_MAKE_OPTS install
cmake --build "$BUILD_DIR" -- -j${PROC}
cmake --build "$BUILD_DIR" -- install
unset LD_PRELOAD
cmake --build "$RDC_BUILD_DIR" -- $RDC_MAKE_OPTS package
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$RDC_PACKAGE_DEB_DIR" "$RDC_BUILD_DIR/$RDC_PKG_NAME"*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$RDC_PACKAGE_RPM_DIR" "$RDC_BUILD_DIR/$RDC_PKG_NAME"*.rpm
if [ ! -e $ROCM_INSTALL_PATH/include/rdc/rdc.h ]; then
cp -r "$ROCM_INSTALL_PATH/rdc/lib/." "$PACKAGE_LIB"
cp -r "$ROCM_INSTALL_PATH/rdc/bin/." "$PACKAGE_BIN"
cp -r "$ROCM_INSTALL_PATH/rdc/include/." "$PACKAGE_INCLUDE"
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
# NOTE: Must disable LD_PRELOAD hack before packaging!
# cmake fails with cryptic error on RHEL:
#
# AddressSanitizer:DEADLYSIGNAL
# ==17083==ERROR: AddressSanitizer: stack-overflow on address ...
#
# The issue is likely in python3.6 cpack scripts
unset LD_PRELOAD
fi
cmake --build "$BUILD_DIR" -- package
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
if [ "$BUILD_DOCS" = "yes" ]; then
echo "Building Docs"
cmake --build "$RDC_BUILD_DIR" -- $RDC_MAKE_OPTS doc
pushd $RDC_BUILD_DIR/latex
cmake --build "$BUILD_DIR" -- doc
pushd "$BUILD_DIR"/latex
cmake --build . --
mv refman.pdf "$ROCM_INSTALL_PATH/rdc/RDC_Manual.pdf"
mv refman.pdf "$ROCM_PATH/rdc/RDC_Manual.pdf"
popd
fi
}
print_output_directory() {
case ${PKGTYPE} in
("deb")
echo ${RDC_PACKAGE_DEB_DIR};;
("rpm")
echo ${RDC_PACKAGE_RPM_DIR};;
(*)
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
esac
exit
clean_rdc() {
echo "Cleaning RDC build directory: ${BUILD_DIR} ${PACKAGE_DIR}"
rm -rf "$BUILD_DIR" "$PACKAGE_DIR"
return 0
}
verifyEnvSetup
stage2_command_args "$@"
disable_debug_package_generation
case $TARGET in
(clean) clean_rdc ;;
(clean_grpc) clean_grpc ;;
(build) build_rdc ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
clean) clean_rdc ;;
build) build_rdc ;;
outdir) print_output_directory ;;
*) die "Invalid target $TARGET" ;;
esac
echo "Operation complete"

View File

@@ -1,35 +1,49 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src rocAL
build_rocal() {
echo "Start build"
# Enable ASAN
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
if [ "$DISTRO_ID" = "mariner-2.0" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ] ; then
echo "Not building rocal for ${DISTRO_ID}. Exiting..."
return 0
fi
# python3 ${COMPONENT_SRC}/rocAL-setup.py
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
# Enable ASAN
# Temporarily disable ASAN for rocal - SWDEV-471302
#if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
# set_asan_env_vars
# set_address_sanitizer_on
#fi
pushd /tmp
# PyBind11
rm -rf pybind11
git clone -b v2.11.1 https://github.com/pybind/pybind11
cd pybind11 && mkdir build && cd build
cmake -DDOWNLOAD_CATCH=ON -DDOWNLOAD_EIGEN=ON ../
make -j$(nproc) && sudo make install
cd ../..
# Turbo JPEG
rm -rf libjpeg-turbo
git clone -b 3.0.2 https://github.com/libjpeg-turbo/libjpeg-turbo.git
cd libjpeg-turbo && mkdir build && cd build
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=RELEASE -DENABLE_STATIC=FALSE -DCMAKE_INSTALL_DEFAULT_LIBDIR=lib -DWITH_JPEG8=TRUE ..
make -j$(nproc) && sudo make install
cd ../..
# RapidJSON
rm -rf rapidjson
git clone https://github.com/Tencent/rapidjson.git
cd rapidjson && mkdir build && cd build
cmake .. && make -j$(nproc) && sudo make install
@@ -37,16 +51,19 @@ build_rocal() {
mkdir -p $BUILD_DIR && cd $BUILD_DIR
cmake -DAMDRPP_PATH=$ROCM_PATH ${COMPONENT_SRC}
make -j${PROC}
# python3 ../rocAL-setup.py
if [[ "${DISTRO_ID}" == almalinux-8* ]]; then
cmake -DPYTHON_VERSION_SUGGESTED=3.8 -DAMDRPP_PATH=$ROCM_PATH ${COMPONENT_SRC}
else
cmake -DAMDRPP_PATH=$ROCM_PATH ${COMPONENT_SRC}
fi
make -j8
cmake --build . --target PyPackageInstall
sudo make install
sudo make package
sudo chown -R $(id -u):$(id -g) ${BUILD_DIR}
make package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR
cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -59,7 +76,7 @@ clean_rocal() {
stage2_command_args "$@"
case $TARGET in
build) build_rocal ;;
build) build_rocal; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocal ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -2,7 +2,7 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
PATH=${ROCM_PATH}/bin:$PATH
set_component_src rocALUTION
@@ -16,7 +16,7 @@ build_rocalution() {
cd $COMPONENT_SRC
CXX="g++"
CXX=$(set_build_variables __G_++__)
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
@@ -28,17 +28,11 @@ build_rocalution() {
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
init_rocm_common_cmake_params
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
cmake \
${GEN_NINJA} \
-DSUPPORT_HIP=ON \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
-DBUILD_CLIENTS_SAMPLES=ON \
-DBUILD_CLIENTS_TESTS=ON \
-DBUILD_CLIENTS_BENCHMARKS=ON \
@@ -51,7 +45,7 @@ build_rocalution() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -65,7 +59,7 @@ clean_rocalution() {
stage2_command_args "$@"
case $TARGET in
build) build_rocalution ;;
build) build_rocalution; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocalution ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -2,12 +2,13 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src rocBLAS
DEPS_DIR=${HOME}/rocblas
stage2_command_args "$@"
disable_debug_package_generation
build_rocblas() {
echo "Start build"
@@ -17,28 +18,37 @@ build_rocblas() {
SHARED_LIBS="OFF"
fi
#Removed GPU ARCHS from here as it will be part of compute_utils.sh ROCMOPS-7302 & ROCMOPS-8091
# Temporary workaround for rocBLAS to build with ASAN as suggested in #SWDEV-314505
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
export ASAN_OPTIONS="detect_leaks=0:verify_asan_link_order=0"
set_asan_env_vars
set_address_sanitizer_on
export ASAN_OPTIONS="detect_leaks=0:verify_asan_link_order=0"
# updating GPU_ARCHS for ASAN build to supported gpu arch only SWDEV-479178
#GPU_ARCHS="gfx90a:xnack+;gfx942:xnack+" #This will be part of compute_utils.sh ROCMOPS-7302 & ROCMOPS-8091
fi
LAZY_LOADING=ON
SEPARATE_ARCHES=ON
cd $COMPONENT_SRC
CXX=$(set_build_variables __AMD_CLANG_++__)
mkdir -p $DEPS_DIR && cp -r /usr/blis $DEPS_DIR
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# if ENABLE_GPU_ARCH is set in env by Job parameter ENABLE_GPU_ARCH, then set GFX_ARCH to that value. This will override any of the case values above
if [ -n "$ENABLE_GPU_ARCH" ]; then
#setting gfx arch as part of rocm_common_cmake_params
set_gpu_arch "${ENABLE_GPU_ARCH}"
fi
init_rocm_common_cmake_params
echo "C compiler: $CC"
echo "CXX compiler: $CXX"
init_rocm_common_cmake_params
cmake \
-DCMAKE_TOOLCHAIN_FILE=toolchain-linux.cmake \
-DBUILD_DIR="${BUILD_DIR}" \
"${rocm_math_common_cmake_params[@]}" \
"${rocm_math_common_cmake_params[@]}" \
-DROCM_DIR="${ROCM_PATH}" \
${LAUNCHER_FLAGS} \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
@@ -47,11 +57,10 @@ build_rocblas() {
-DBUILD_CLIENTS_BENCHMARKS=ON \
-DBUILD_CLIENTS_SAMPLES=ON \
-DLINK_BLIS=ON \
-DAMDGPU_TARGETS="${GPU_TARGETS}" \
-DTensile_CODE_OBJECT_VERSION=default \
-DTensile_LOGIC=asm_full \
-DTensile_SEPARATE_ARCHITECTURES=ON \
-DTensile_LAZY_LIBRARY_LOADING=ON \
-DTensile_SEPARATE_ARCHITECTURES="${SEPARATE_ARCHES}" \
-DTensile_LAZY_LIBRARY_LOADING="${LAZY_LOADING}" \
-DTensile_LIBRARY_FORMAT=msgpack \
-DBUILD_ADDRESS_SANITIZER="${ADDRESS_SANITIZER}" \
-DTENSILE_VENV_UPGRADE_PIP=ON \
@@ -63,7 +72,7 @@ build_rocblas() {
rm -rf _CPack_Packages/ && rm -rf ./library/src/build_tmp && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -75,7 +84,7 @@ clean_rocblas() {
}
case $TARGET in
build) build_rocblas ;;
build) build_rocblas; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocblas ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,9 +1,17 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src rocDecode
BUILD_DEV=ON
build_rocdecode() {
if [ "$DISTRO_ID" = "centos-7" ] || \
[ "$DISTRO_ID" = "mariner-2.0" ] || \
[ "$DISTRO_ID" = "azurelinux-3.0" ] || \
[ "$DISTRO_ID" = "debian-10" ]; then
echo "Not building rocDecode for ${DISTRO_ID}. Exiting..."
return 0
fi
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
@@ -11,18 +19,27 @@ build_rocdecode() {
fi
mkdir -p $BUILD_DIR && cd $BUILD_DIR
# python3 ${COMPONENT_SRC}/rocDecode-setup.py --developer OFF
cmake -DROCM_DEP_ROCMCORE=ON ${COMPONENT_SRC}
make -j8
make install
make package
# for i in {1..5}; do
# python3 ../rocDecode-setup.py --developer OFF && break || {
# echo "Attempt $i failed! Retrying in $((i * 30)) seconds..."
# sleep $((i * 30))
# }
# done
# python3 ${COMPONENT_SRC}/rocDecode-setup.py --developer OFF
init_rocm_common_cmake_params
cmake \
"${rocm_math_common_cmake_params[@]}" \
-DROCM_DEP_ROCMCORE=ON \
-DROCDECODE_ENABLE_ROCPROFILER_REGISTER=ON \
"${COMPONENT_SRC}"
cmake --build "$BUILD_DIR" -- -j${PROC}
cpack -G ${PKGTYPE^^} -B ${BUILD_DIR}
cmake --build "$BUILD_DIR" -- install
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR
cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
clean_rocdecode() {
@@ -32,7 +49,7 @@ clean_rocdecode() {
}
stage2_command_args "$@"
case $TARGET in
build) build_rocdecode ;;
build) build_rocdecode; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocdecode ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
PATH=${ROCM_PATH}/bin:$PATH
set_component_src rocFFT
@@ -9,6 +9,10 @@ set_component_src rocFFT
build_rocfft() {
echo "Start Build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
ack_and_skip_static
fi
cd $COMPONENT_SRC
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
@@ -18,17 +22,14 @@ build_rocfft() {
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
init_rocm_common_cmake_params
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
#Removed GPU ARCHS from here as it will be part of compute_utils.sh ROCMOPS-7302 & ROCMOPS-8091
CXX="${ROCM_PATH}/bin/hipcc" \
# Work around for HIP sources with C++ suffix, and force CXXFLAGS for both
# HIP and C++ compiles
CXX=$(set_build_variables __HIP_CC__) \
cmake \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
-DUSE_HIP_CLANG=ON \
-DHIP_COMPILER=clang \
-DBUILD_CLIENTS_SAMPLES=ON \
@@ -41,7 +42,7 @@ build_rocfft() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -55,7 +56,7 @@ clean_rocfft() {
stage2_command_args "$@"
case $TARGET in
build) build_rocfft ;;
build) build_rocfft; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocfft ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -1,9 +1,13 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src rocJPEG
BUILD_DEV=ON
build_rocjpeg() {
if [ "$DISTRO_ID" = "centos-7" ] || [ "$DISTRO_ID" = "sles-15.4" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ] || [ "$DISTRO_ID" = "debian-10" ]; then
echo "Not building rocJPEG for ${DISTRO_ID}. Exiting..."
return 0
fi
echo "Start build"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
@@ -13,16 +17,15 @@ build_rocjpeg() {
mkdir -p $BUILD_DIR && cd $BUILD_DIR
# python3 ../rocJPEG-setup.py
cmake -DROCM_DEP_ROCMCORE=ON "$COMPONENT_SRC"
make -j8
make install
make package
cmake ${GEN_NINJA} -DROCM_DEP_ROCMCORE=ON -DROCJPEG_ENABLE_ROCPROFILER_REGISTER=ON "$COMPONENT_SRC"
ninja -j8
ninja install
ninja package
cmake --build "$BUILD_DIR" -- -j${PROC}
cpack -G ${PKGTYPE^^}
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR
cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
clean_rocjpeg() {

View File

@@ -23,15 +23,15 @@ printUsage() {
return 0
}
## ROCm cmake build (using Makefile) environment variables
PROJ_NAME="rocm-cmake"
TARGET="build"
PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_DEB="$(getPackageRoot)/deb/rocm-cmake"
PACKAGE_RPM="$(getPackageRoot)/rpm/rocm-cmake"
ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
ROCM_CMAKE_PACKAGE_DEB="$(getPackageRoot)/deb/rocm-cmake"
ROCM_CMAKE_PACKAGE_RPM="$(getPackageRoot)/rpm/rocm-cmake"
## ROCm cmake build (using CMake) environment variables
ROCM_CMAKE_BUILD_DIR="$(getBuildPath $PROJ_NAME)"
ROCM_CMAKE_PACKAGE_DEB="$PACKAGE_ROOT/deb/$PROJ_NAME"
ROCM_CMAKE_PACKAGE_RPM="$PACKAGE_ROOT/rpm/$PROJ_NAME"
ROCM_CMAKE_BUILD_TYPE="debug"
BUILD_TYPE="Debug"
SHARED_LIBS="ON"
@@ -39,11 +39,13 @@ CLEAN_OR_OUT=0;
PKGTYPE="deb"
MAKETARGET="deb"
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,static,wheel,address_sanitizer,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -55,11 +57,13 @@ do
ack_and_ignore_asan ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package)
MAKETARGET="$2" ; shift 2;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -115,10 +119,19 @@ print_output_directory() {
}
case $TARGET in
(clean) clean_rocm_cmake ;;
(build) build_rocm_cmake ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
(clean)
clean_rocm_cmake
;;
(build)
build_rocm_cmake
build_wheel "$ROCM_CMAKE_BUILD_DIR" "$PROJ_NAME"
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -13,8 +13,10 @@ printUsage() {
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -w, --wheel Creates python wheel package of rocm-core.
It needs to be used along with -r option"
echo " -h, --help Prints this help"
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
echo
echo "Possible values for <type>:"
echo " deb -> Debian format (default)"
@@ -24,6 +26,7 @@ printUsage() {
return 0
}
## ROCm build (using CMake) environment variables
PROJ_NAME="rocm-core"
PACKAGE_ROOT="$(getPackageRoot)"
ROCM_CORE_BUILD_DIR="$(getBuildPath rocm_core)"
@@ -38,7 +41,8 @@ MAKETARGET="deb"
PKGTYPE="deb"
ADDRESS_SANITIZER=false
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,static,address_sanitizer,outdir:,package: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,static,address_sanitizer,outdir,wheel:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -56,6 +60,8 @@ do
ADDRESS_SANITIZER=true ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package )
@@ -87,14 +93,11 @@ build_rocm_core() {
if [ ! -d "$ROCM_CORE_BUILD_DIR" ]; then
mkdir -p "$ROCM_CORE_BUILD_DIR"
fi
pushd "$ROCM_CORE_BUILD_DIR"
cmake \
-DCMAKE_VERBOSE_MAKEFILE=1 \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
-DCMAKE_INSTALL_PREFIX="$ROCM_INSTALL_PATH" \
-DCPACK_PACKAGING_INSTALL_PREFIX="$ROCM_INSTALL_PATH" \
-DCPACK_GENERATOR="${CPACKGEN:-"DEB;RPM"}" \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
-DCPACK_DEBIAN_PACKAGE_RELEASE=$CPACK_DEBIAN_PACKAGE_RELEASE \
-DCPACK_RPM_PACKAGE_RELEASE=$CPACK_RPM_PACKAGE_RELEASE \
-DROCM_VERSION="$ROCM_VERSION" \
@@ -109,24 +112,33 @@ build_rocm_core() {
}
print_output_directory() {
case ${PKGTYPE} in
case ${PKGTYPE} in
("deb")
echo ${ROCM_CORE_PACKAGE_DEB};;
("rpm")
echo ${ROCM_CORE_PACKAGE_RPM};;
(*)
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
esac
exit
esac
exit
}
verifyEnvSetup
case $TARGET in
(clean) clean_rocm_core ;;
(build) build_rocm_core ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
(clean)
clean_rocm_core
;;
(build)
build_rocm_core
build_wheel "$ROCM_CORE_BUILD_DIR" "$PROJ_NAME"
;;
(outdir)
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"

View File

@@ -2,13 +2,18 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
stage2_command_args "$@"
build_rocm-dev(){
$(dirname "${BASH_SOURCE[0]}")/build_rocm.sh -d
mv ${OUT_DIR}/${PKGTYPE}/meta ${OUT_DIR}/${PKGTYPE}/rocm-dev
}
case $TARGET in
build) echo "end of rocm-dev build..." ;;
outdir) ;;
clean) echo "Cleaning rocm-dev is not required..." ;;
build) build_rocm-dev ;;
outdir) echo "${OUT_DIR}/${PKGTYPE}/rocm-dev" ;;
clean) rm -rf ${OUT_DIR}/${PKGTYPE}/rocm-dev ;;
*) die "Invalid target $TARGET" ;;
esac

View File

@@ -1,7 +1,6 @@
#!/bin/bash
source "${BASH_SOURCE%/*}/compute_utils.sh" || return
# Can't use -R or -r in here
remove_make_r_flags
printUsage() {
@@ -15,8 +14,10 @@ printUsage() {
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
echo " -w, --wheel Creates python wheel package of rocm-gdb.
It needs to be used along with -r option"
echo " -h, --help Prints this help"
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
echo
echo "Possible values for <type>:"
echo " deb -> Debian format (default)"
@@ -32,23 +33,32 @@ toStdoutStderr(){
}
linkFiles(){
# Attempt to use hard links first for speed and save disk,
# if that fails do a copy
cp -lfR "$1" "$2" || cp -fR "$1" "$2"
}
## Build environment variables
PROJ_NAME=rocm-gdb
TARGET=build
MAKETARGET=deb
BUILD_DIR=$(getBuildPath $PROJ_NAME)
PACKAGE_DEB=$(getPackageRoot)/deb/$PROJ_NAME
PACKAGE_RPM=$(getPackageRoot)/rpm/$PROJ_NAME
MAKE_OPTS="$DASH_JAY"
MAKETARGET=deb # Not currently used
BUILD_DIR=$(getBuildPath $PROJ_NAME) # e.g. out/ubuntu.16.04/16.04/build/rocm-gdb
PACKAGE_DEB=$(getPackageRoot)/deb/$PROJ_NAME # e.g. out/ubuntu.16.04/16.04/deb
PACKAGE_RPM=$(getPackageRoot)/rpm/$PROJ_NAME # e.g. out/ubuntu.16.04/16.04/rpm
MAKE_OPTS="$DASH_JAY" # e.g. -j 56
BUG_URL="https://github.com/ROCm-Developer-Tools/ROCgdb/issues"
SHARED_LIBS="ON"
CLEAN_OR_OUT=0;
MAKETARGET="deb"
PKGTYPE="deb"
LDFLAGS="$LDFLAGS -Wl,--enable-new-dtags"
LIB_AMD_PYTHON="libamdpython.so"
LIB_AMD_PYTHON="amdpythonlib.so" #lib name which replaces required python lib
LIB_AMD_PYTHON_DIR_PATH=${ROCM_INSTALL_PATH}/lib
# A curated list of things to keep. It would be safer to have a list
# of things to remove, as failing to remove something is usually much
# less harmful than removeing too much.
tokeep=(
main${ROCM_INSTALL_PATH}/bin/rocgdb
@@ -77,13 +87,18 @@ tokeep=(
keep_wanted_files(){
(
cd "$BUILD_DIR/package/"
# generate the keep pattern as one name per line
printf -v keeppattern '%s\n' "${tokeep[@]}"
find main/opt -type f | grep -xv "$keeppattern" | xargs -r rm
# prune empty directories
find main/opt -type d -empty -delete
)
return 0
}
# Move to a function so that both package_deb and package_rpm can call
# and remove the depenency of package_rpm_tests on package_deb.
# Copy the required files to create a stand-alone testsuite.
copy_testsuite_files() {
(
dest="$BUILD_DIR/package/tests${ROCM_INSTALL_PATH}/test/gdb/"
@@ -113,43 +128,57 @@ clean() {
rm -rf $PACKAGE_RPM
}
# set the lexically bound variable VERSION to the current version
# A passed parameter gives the default
# Note that this might need to be stripped further as it might have
# things like "-git" which is not acceptable as a version to rpm.
get_version(){
VERSION=$(sed -n 's/^.*char version[^"]*"\([^"]*\)".*;.*/\1/p' $BUILD_DIR/gdb/version.c || : )
VERSION=${VERSION:-$1}
}
package_deb(){
# Package main binary.
# TODO package documentation when we build some.
mkdir -p "$BUILD_DIR/package/main/DEBIAN"
# Extract version from build
local VERSION
get_version unknown
# add upgrade related version sub-field
VERSION="${VERSION}.${ROCM_LIBPATCH_VERSION}"
#create postinstall and prerm
grep -v '^# ' > "$BUILD_DIR/package/main/DEBIAN/preinst" <<EOF
grep -v '^# ' > "$BUILD_DIR/package/main/DEBIAN/postinst" <<EOF
#!/bin/sh
# Pre-installation script commands
echo "Running pre-installation script..."
mkdir -p ${ROCM_INSTALL_PATH}/lib
PYTHON_LIB_INSTALLED=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}')
ln -s \$PYTHON_LIB_INSTALLED ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
echo "pre-installation done."
# Post-installation script commands
echo "Running post-installation script..."
mkdir -p $LIB_AMD_PYTHON_DIR_PATH
# Choosing the lowest version of the libpython3 installed.
PYTHON_LIB_INSTALLED=\$(find /lib/ -name 'libpython3*.so' | head -n 1)
echo "Installing rocm-gdb with [\$PYTHON_LIB_INSTALLED]."
ln -s \$PYTHON_LIB_INSTALLED $LIB_AMD_PYTHON_DIR_PATH/$LIB_AMD_PYTHON
echo "post-installation done."
EOF
grep -v '^# ' > "$BUILD_DIR/package/main/DEBIAN/postrm" <<EOF
grep -v '^# ' > "$BUILD_DIR/package/main/DEBIAN/prerm" <<EOF
#!/bin/sh
# Post-uninstallation script commands
echo "Running post-uninstallation script..."
PYTHON_LINK_BY_OPENCL=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}' | awk -F'/' '{print \$NF}')
rm -f ${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL
rm -f ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
if [ -L "${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON" ] || \
[ -L "${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL" ] ; then
# Pre-uninstallation script commands
echo "Running pre-uninstallation script..."
rm -f $LIB_AMD_PYTHON_DIR_PATH/$LIB_AMD_PYTHON
if [ -L "$LIB_AMD_PYTHON_DIR_PATH/$LIB_AMD_PYTHON" ] ; then
echo " some rocm-gdb requisite libs could not be removed"
else
echo " all requisite libs removed successfully "
fi
echo "post-uninstallation done."
echo "pre-uninstallation done."
EOF
chmod +x $BUILD_DIR/package/main/DEBIAN/postrm
chmod +x $BUILD_DIR/package/main/DEBIAN/preinst
chmod +x $BUILD_DIR/package/main/DEBIAN/prerm
chmod +x $BUILD_DIR/package/main/DEBIAN/postinst
# Create control file, with variable substitution.
# Lines with # at the start are removed, to allow for comments
mkdir "$BUILD_DIR/debian"
@@ -167,8 +196,9 @@ Section: utils
Architecture: amd64
Essential: no
Priority: optional
Depends: \${shlibs:Depends}, rocm-dbgapi, rocm-core
Depends: \${shlibs:Depends}, rocm-dbgapi, rocm-core, python3-dev
EOF
# Use dpkg-shlibdeps to list shlib dependencies, the result is placed
# in $BUILD_DIR/debian/substvars.
(
@@ -179,6 +209,7 @@ EOF
fi
dpkg-shlibdeps --ignore-missing-info -e "$BUILD_DIR/package/main/${ROCM_INSTALL_PATH}/bin/rocgdb"
)
# Generate the final DEBIAN/control, and substitute the shlibs:Depends.
# This is a bit unorthodox as we are only using bits and pieces of the
# dpkg tools.
@@ -191,8 +222,10 @@ EOF
-e "s/\\$\{shlibs:Depends\}/$SHLIB_DEPS/" \
< debian/control > "$BUILD_DIR/package/main/DEBIAN/control"
)
mkdir -p "$OUT_DIR/deb/$PROJ_NAME"
fakeroot dpkg-deb -Zgzip --build "$BUILD_DIR/package/main" "$OUT_DIR/deb/$PROJ_NAME"
# Package the tests so they can be run on a test slave
mkdir -p "$BUILD_DIR/package/tests/DEBIAN"
mkdir -p "$BUILD_DIR/package/tests/${ROCM_INSTALL_PATH}/test/gdb"
@@ -213,21 +246,28 @@ Priority: optional
# rocm-core as policy says everything to depend on rocm-core
Depends: ${PROJ_NAME} (=${VERSION}-${CPACK_DEBIAN_PACKAGE_RELEASE}), dejagnu, rocm-core, make
EOF
copy_testsuite_files
fakeroot dpkg-deb -Zgzip --build "$BUILD_DIR/package/tests" "$OUT_DIR/deb/$PROJ_NAME"
}
package_rpm(){
# TODO, use this to package the tests as well. In the mean time hard code the package
set -- rocm-gdb
local packageDir="$BUILD_DIR/package_rpm/$1"
local specFile="$packageDir/$1.spec"
local packageRpm="$packageDir/rpm"
local packageDir="$BUILD_DIR/package_rpm/$1" # e.g. out/ubuntu-16.04/16.04/build/rocm-gdb/package_rpm/main
local specFile="$packageDir/$1.spec" # The generated spec file
local packageRpm="$packageDir/rpm" # The RPM infrastructure
# Extract version from build. If more than one line matches then
# expect failures. Solution is to find which version is the wanted one.
local VERSION
get_version 0.0.0
# add upgrade related version sub-field
VERSION=${VERSION}.${ROCM_LIBPATCH_VERSION}
# get the __os_install_post macro, edit it to remove the python bytecode generation
# rpm --showrc shows the default macros with priority level (-14) in this case
# so remove everything before this macro, everything after it, remove the offending line,
# and make it available as the ospost variable to insert into spec file
local ospost="$(echo '%define __os_install_post \'
rpm --showrc | sed '1,/^-14: __os_install_post/d;
/^-14:/,$d;/^%{nil}/!s/$/ \\/;
@@ -238,6 +278,8 @@ package_rpm(){
mkdir -p "$packageDir"
# Create the spec file.
# Allow comments in the generation of the specfile, may be overkill.
grep -v '^## ' <<- EOF > $specFile
## Set up where this stuff goes
%define _topdir $packageRpm
@@ -256,8 +298,7 @@ Version: ${VERSION//-/_}
Release: ${CPACK_RPM_PACKAGE_RELEASE}%{?dist}
License: GPL
Prefix: ${ROCM_INSTALL_PATH}
Requires: rocm-core
Provides: $LIB_AMD_PYTHON()(64bit)
Requires: rocm-core, rocm-dbgapi
%description
This is ROCgdb, the ROCm source-level debugger for Linux, based on
@@ -278,27 +319,6 @@ https://github.com/RadeonOpenCompute/ROCm
## into the local RPM_BUILD_ROOT and left the defaults take over. Need
## to quote the dollar signs as we want rpm to expand them when it is
## run, rather than the shell when we build the spec file.
%pre
# Post-install script commands
echo "Running post-install script..."
mkdir -p ${ROCM_INSTALL_PATH}/lib
PYTHON_LIB_INSTALLED=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}')
ln -s \$PYTHON_LIB_INSTALLED ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
%postun
# Post-uninstallation script commands
echo "Running post-uninstallation script..."
PYTHON_LINK_BY_OPENCL=\$(ldconfig -p | awk '/libpython3/ { print \$NF; exit}' | awk -F'/' '{print \$NF}')
rm -f ${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL
rm -f ${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON
if [ -L "${ROCM_INSTALL_PATH}/lib/$LIB_AMD_PYTHON" ] || \
[ -L "${ROCM_INSTALL_PATH}/lib/\$PYTHON_LINK_BY_OPENCL" ] ; then
echo " some rocm-gdb requisite libs could not be removed"
else
echo " all requisite libs removed successfully "
fi
echo "post-uninstallation done."
%install
rm -rf \$RPM_BUILD_ROOT
mkdir -p \$RPM_BUILD_ROOT
@@ -307,24 +327,27 @@ cp -ar $BUILD_DIR/package/main/opt \$RPM_BUILD_ROOT/opt
## The file section is generated by walking the tree.
%files
EOF
# Now generate the files
find $BUILD_DIR/package/main/opt -type d | sed "s:$BUILD_DIR/package/main:%dir :" >> $specFile
find $BUILD_DIR/package/main/opt ! -type d | sed "s:$BUILD_DIR/package/main::" >> $specFile
rpmbuild --define "_topdir $packageRpm" -ba $specFile
# Now copy it to final location
mkdir -p "$PACKAGE_RPM" # e.g. out/ubuntu-16.04/16.04/rpm/rocm-gdb
mv $packageRpm/RPMS/x86_64/*.rpm "$PACKAGE_RPM"
}
package_rpm_tests(){
# TODO, use this to package the tests as well. In the mean time hard code the package
set -- rocm-gdb-tests
local packageDir="$BUILD_DIR/package_rpm/$1"
local specFile="$packageDir/$1.spec"
local packageRpm="$packageDir/rpm"
local packageDir="$BUILD_DIR/package_rpm/$1" # e.g. out/ubuntu-16.04/16.04/build/rocm-gdb/package_rpm/main
local specFile="$packageDir/$1.spec" # The generated spec file
local packageRpm="$packageDir/rpm" # The RPM infrastructure
# Extract version from build. If more than one line matches then
# expect failures. Solution is to find which version is the wanted one.
local VERSION
get_version 0.0.0
# add upgrade related version sub-field
VERSION=${VERSION}.${ROCM_LIBPATCH_VERSION}
local RELEASE=${CPACK_RPM_PACKAGE_RELEASE}%{?dist}
@@ -333,6 +356,8 @@ package_rpm_tests(){
mkdir -p "$packageRpm"
# Create the spec file.
# Allow comments in the generation of the specfile, may be overkill.
local ospost="$(echo '%define __os_install_post \'
rpm --showrc | sed '1,/^-14: __os_install_post/d;
/^-14:/,$d;/^%{nil}/!s/$/ \\/;
@@ -385,10 +410,15 @@ EOF
copy_testsuite_files
# rpm wont resolve the dependency if the unversioned python (#!/usr/bin/python) is used.
# /usr/bin/python - this file is not owned by any package.
# So find and append the version to the /usr/bin/python as /usr/bin/python3
# By updating this, the python3 requirement will be provided by the python3 package.
find $BUILD_DIR/package/tests/opt -type f -exec sed -i '1s:^#! */usr/bin/python\>:&3:' {} +
rpmbuild --define "_topdir $packageRpm" -ba $specFile
mkdir -p "$PACKAGE_RPM"
# Now copy it to final location
mkdir -p "$PACKAGE_RPM" # e.g. out/ubuntu-16.04/16.04/rpm/rocm-gdb
mv $packageRpm/RPMS/x86_64/*.rpm "$PACKAGE_RPM"
}
@@ -396,20 +426,29 @@ build() {
if [ ! -e "$ROCM_GDB_ROOT/configure" ]
then
toStdoutStderr "No $ROCM_GDB_ROOT/configure file, skippping rocm-gdb"
exit 0
exit 0 # This is not an error
fi
local pythonver=python3
if [[ "$DISTRO_ID" == "ubuntu-18.04" ]]; then
pythonver=python3.8
fi
# Workaround for rocm-gdb failure due to texlive on RHEL-9 & CentOS-9 SWDEV-339596
if [[ "$DISTRO_ID" == "centos-9" ]] || [[ "$DISTRO_ID" == "rhel-9.0" ]]; then
fmtutil-user --missing
fi
echo "Building $PROJ_NAME"
mkdir -p "$BUILD_DIR"
cd "$BUILD_DIR" || die "Failed to cd to '$BUILD_DIR'"
# Build instructions taken from the README.ROCM with the addition
# of --with-pkgversion
#
# The "new" way of specifying the path to the amd-dbgapi library is by
# setting PKG_CONFIG_PATH. The --with-amd-dbgapi flag is used to ensure
# that if amd-dbgapi is not found, configure fails.
#
# --with-rocm-dbgapi is kept for now, to ease the transition. It can be
# removed once it is determined that we don't need to build source trees
# using the "old" way.
$ROCM_GDB_ROOT/configure --program-prefix=roc --prefix="${ROCM_INSTALL_PATH}" \
--htmldir="\${prefix}/share/html" --pdfdir="\${prefix}/share/doc/rocgdb" \
--infodir="\${prefix}/share/info/rocgdb" \
@@ -441,24 +480,31 @@ build() {
LDFLAGS="$LDFLAGS"
LD_RUN_PATH='${ORIGIN}/../lib' make $MAKE_OPTS
REPLACE_LIB_NAME=$(ldd -d $BUILD_DIR/gdb/gdb |awk '/libpython/{print $1}')
echo "Replacing $REPLACE_LIB_NAME with $LIB_AMD_PYTHON"
patchelf --replace-needed $REPLACE_LIB_NAME $LIB_AMD_PYTHON $BUILD_DIR/gdb/gdb
if [[ "$DISTRO_ID" == "ubuntu"* ]]; then
#changing the python lib requirement in the built gdb for ubuntu builds
REPLACE_LIB_NAME=$(ldd -d $BUILD_DIR/gdb/gdb |awk '/libpython/{print $1}')
echo "Replacing $REPLACE_LIB_NAME with $LIB_AMD_PYTHON"
patchelf --replace-needed $REPLACE_LIB_NAME $LIB_AMD_PYTHON $BUILD_DIR/gdb/gdb
fi
mkdir -p $BUILD_DIR/package/main${ROCM_INSTALL_PATH}/{share/rocgdb,bin}
# Install gdb
make $MAKE_OPTS -C gdb DESTDIR=$BUILD_DIR/package/main install install-pdf install-html
# Install binutils for coremerge and coremerge manpage.
make $MAKE_OPTS -C binutils DESTDIR=$BUILD_DIR/package/main install
# Add in the AMD licences file
linkFiles $ROCM_GDB_ROOT/gdb/NOTICES.txt $BUILD_DIR/package/main${ROCM_INSTALL_PATH}/share/doc/rocgdb
keep_wanted_files
# If a variable CPACKGEN indicates only one type of packaging is required
# then don't bother making the other. This variable is set up in compute_utils.sh
# Default to building both package types if variable is not set
# Use "[ var = value ] || ..." rather than "[ var != value ] &&" so expression
# evaluates to true, and set -e does not cause script to exit.
[ "${CPACKGEN}" = "DEB" ] || package_rpm && package_rpm_tests
[ "${CPACKGEN}" = "RPM" ] || package_deb
}
# See if the code exists
print_output_directory() {
case ${PKGTYPE} in
("deb")
@@ -475,11 +521,14 @@ verifyEnvSetup
main(){
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,static,address_sanitizer,outdir:,package: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,static,wheel,address_sanitizer,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
ASAN_BUILD="no"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -490,9 +539,11 @@ do
(-a | --address_sanitizer)
set_asan_env_vars
set_address_sanitizer_on
ASAN_BUILD="yes" ; shift ;;
ASAN_BUILD="yes" ; shift ;;
(-s | --static)
ack_and_skip_static ;;
(-w | --wheel)
echo " Wheel"; WHEEL_PACKAGE=true ; shift;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package) #FIXME
@@ -505,6 +556,7 @@ do
esac
done
# If building with Clang, we need to build in C++17 mode, to avoid some build problems
if [[ $CXX == *"clang++" ]]
then
CXX="$CXX -std=gnu++17"
@@ -518,17 +570,28 @@ if [ $RET_CONFLICT -ge 30 ]; then
fi
case $TARGET in
("clean") clean ;;
("build") build ;;
("outdir") print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
("clean")
clean
;;
("build")
build
build_wheel "$BUILD_DIR" "$PROJ_NAME"
;;
("outdir")
print_output_directory
;;
(*)
die "Invalid target $TARGET"
;;
esac
echo "Operation complete"
}
# If this script is not being sourced, then run it.
if [ "$0" = "$BASH_SOURCE" ]
then
main "$@"
else
set +e
set +e # Undo the damage from compute_utils.sh
fi

View File

@@ -11,6 +11,8 @@ printUsage() {
echo " -r, --release Build non-debug version rocm_smi (default is debug)"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -w, --wheel Creates python wheel package of rocm-smi.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of type referred to by pkg_type"
echo " -p, --package <type> Specify packaging format"
echo " -32, Build 32b version (default is 64b)"
@@ -22,7 +24,10 @@ printUsage() {
return 0
}
# RSMI ==> deb/rpm package target
# ROCM_SMI ==> rocm_smi library target and header
# ROCM_SMI
PROJ_NAME="rsmi"
PACKAGE_ROOT="$(getPackageRoot)"
TARGET="build"
@@ -30,15 +35,18 @@ TARGET="build"
PACKAGE_LIB=$(getLibPath)
PACKAGE_INCLUDE="$(getIncludePath)"
# RSMI
RSMI_BUILD_DIR=$(getBuildPath rsmi)
RSMI_PACKAGE_DEB_DIR="$(getPackageRoot)/deb/$PROJ_NAME"
RSMI_PACKAGE_RPM_DIR="$(getPackageRoot)/rpm/$PROJ_NAME"
RSMI_BUILD_TYPE="debug"
BUILD_TYPE="Debug"
# BUILD ARGUMENTS
MAKETARGET="deb"
MAKEARG="$DASH_JAY O=$RSMI_BUILD_DIR"
RSMI_MAKE_OPTS="$DASH_JAY O=$RSMI_BUILD_DIR -C $RSMI_BUILD_DIR"
# The following should be 64 (default) 32 for 32b
ROCM_SMI_BLD_BITS=64
RSMI_PKG_NAME_ROOT="rocm-smi-lib"
RSMI_PKG_NAME="${RSMI_PKG_NAME_ROOT}${ROCM_SMI_BLD_BITS}"
@@ -46,11 +54,13 @@ SHARED_LIBS="ON"
CLEAN_OR_OUT=0;
PKGTYPE="deb"
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,static,address_sanitizer,outdir:,package: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,static,wheel,address_sanitizer,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -65,13 +75,15 @@ do
ADDRESS_SANITIZER=true ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package)
MAKETARGET="$2" ; shift 2;;
(-32)
ROCM_SMI_BLD_BITS="32"; shift ;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -109,6 +121,7 @@ build_rsmi() {
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
$(rocm_common_cmake_params) \
$(rocm_cmake_params) \
${GEN_NINJA} \
-DENABLE_LDCONFIG=OFF \
-DROCM_SMI_PACKAGE="${RSMI_PKG_NAME}" \
-DCPACK_PACKAGE_VERSION_MAJOR="1" \
@@ -116,14 +129,14 @@ build_rsmi() {
-DCPACK_PACKAGE_VERSION_PATCH="0" \
-DADDRESS_SANITIZER="$ADDRESS_SANITIZER" \
-DBUILD_TESTS=ON \
"$ROCM_SMI_LIB_ROOT"
-S "$ROCM_SMI_LIB_ROOT"
popd
fi
echo "Making rocm_smi package:"
cmake --build "$RSMI_BUILD_DIR" -- $RSMI_MAKE_OPTS
cmake --build "$RSMI_BUILD_DIR" -- $RSMI_MAKE_OPTS install
cmake --build "$RSMI_BUILD_DIR" -- $RSMI_MAKE_OPTS package
cmake --build "$RSMI_BUILD_DIR" -- $DASH_JAY
cmake --build "$RSMI_BUILD_DIR" -- $DASH_JAY install
cmake --build "$RSMI_BUILD_DIR" -- $DASH_JAY package
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$RSMI_PACKAGE_DEB_DIR" $RSMI_BUILD_DIR/*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$RSMI_PACKAGE_RPM_DIR" $RSMI_BUILD_DIR/*.rpm
@@ -145,10 +158,11 @@ verifyEnvSetup
case $TARGET in
(clean) clean_rsmi ;;
(build) build_rsmi ;;
(build) build_rsmi; build_wheel "$RSMI_BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac
echo "Operation complete"
exit 0

View File

@@ -10,7 +10,9 @@ printUsage() {
echo " -c, --clean Removes all rocminfo build artifacts"
echo " -r, --release Build non-debug version rocminfo (default is debug)"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -s, --static Supports static CI by accepting this param & not bailing out. No effect of the param though"
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
echo " -w, --wheel Creates python wheel package of rocminfo.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -h, --help Prints this help"
@@ -39,7 +41,8 @@ MAKETARGET="deb"
PKGTYPE="deb"
VALID_STR=`getopt -o hcraso:g: --long help,clean,release,static,address_sanitizer,outdir:,gpu_list: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo:g: --long help,clean,release,static,wheel,address_sanitizer,outdir:,gpu_list: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -56,11 +59,13 @@ do
set_address_sanitizer_on ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-g | --gpu_list)
GPU_LIST="$2" ; shift 2;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -124,7 +129,7 @@ print_output_directory() {
case $TARGET in
(clean) clean_rocminfo ;;
(build) build_rocminfo ;;
(build) build_rocminfo; build_wheel "$ROCMINFO_BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac

View File

@@ -1,10 +1,11 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src ROCmValidationSuite
# Internal RVS libraries need the extra RPATH
ROCM_RVS_LIB_RPATH="\$ORIGIN/.."
build_rocmvalidationsuite() {
@@ -14,6 +15,7 @@ build_rocmvalidationsuite() {
ack_and_skip_static
fi
CXX=$(set_build_variables __CXX__)
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
@@ -22,12 +24,12 @@ build_rocmvalidationsuite() {
cd "${COMPONENT_SRC}"
mkdir -p "$BUILD_DIR"
init_rocm_common_cmake_params
# rocm_common_cmake_params provides the default rpath for rocm executables and libraries
# Override cmake shared linker flags, since rvs libraries requires RUNPATH - $ORIGIN:$ORIGIN/..
cmake \
"${rocm_math_common_cmake_params[@]}" \
-DFETCH_ROCMPATH_FROM_ROCMCORE=ON \
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--build-id=sha1,--rpath,$ROCM_LIB_RPATH:$ROCM_RVS_LIB_RPATH" \
-DRVS_BUILD_TESTS=FALSE \
-B "$BUILD_DIR" \
"$COMPONENT_SRC"
@@ -50,7 +52,7 @@ clean_rocmvalidationsuite() {
stage2_command_args "$@"
case $TARGET in
build) build_rocmvalidationsuite ;;
build) build_rocmvalidationsuite; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocmvalidationsuite ;;
*) die "Invalid target ${TARGET}" ;;

View File

@@ -2,7 +2,7 @@
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src rocPRIM
@@ -10,12 +10,18 @@ build_rocprim() {
echo "Start build"
cd $COMPONENT_SRC
# Temporary Fix as suggested in #SWDEV-314510
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
#Set ASAN flags
set_asan_env_vars
set_address_sanitizer_on
# ASAN packaging is not required for rocPRIM, since its header only package
# Setting the asan_cmake_params to false will disable ASAN packaging
ASAN_CMAKE_PARAMS="false"
fi
# Enable/Disable Static Flag to be used for
# Package Dependecies during static/non-static builds
SHARED_LIBS="ON"
if [ "${ENABLE_STATIC_BUILDS}" == "true" ]; then
SHARED_LIBS="OFF"
@@ -23,18 +29,14 @@ build_rocprim() {
mkdir -p "$BUILD_DIR" && cd "$BUILD_DIR"
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
fi
#Removed GPU ARCHS from here as it will be part of compute_utils.sh ROCMOPS-7302 & ROCMOPS-8091
init_rocm_common_cmake_params
CXX="${ROCM_PATH}/bin/hipcc" \
CXX=$(set_build_variables __HIP_CC__) \
cmake \
${GEN_NINJA} \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
-DBUILD_BENCHMARK=OFF \
-DBUILD_TEST=ON \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
@@ -46,7 +48,7 @@ build_rocprim() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -60,7 +62,7 @@ clean_rocprim() {
stage2_command_args "$@"
case $TARGET in
build) build_rocprim ;;
build) build_rocprim; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocprim ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -26,6 +26,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME="rocprofiler-compute"
PROJ_NAME="$API_NAME"
LIB_NAME="lib${API_NAME}"
@@ -43,11 +44,13 @@ CLEAN_OR_OUT=0;
MAKETARGET="deb"
PKGTYPE="deb"
#parse the arguments
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,static,address_sanitizer,outdir:,package:,wheel -- "$@")
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
-h | --help)
printUsage ; exit 0;;
@@ -66,7 +69,7 @@ do
ack_and_skip_static ;;
-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
--) shift; break;;
--) shift; break;; # end delimiter
*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -108,13 +111,14 @@ build() {
cmake \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
${GEN_NINJA} \
-DCHECK_PYTHON_DEPS=NO \
-DPYTHON_DEPS=${BUILD_DIR}/python-libs \
"$ROCPROFILER_COMPUTE_ROOT"
-S "$ROCPROFILER_COMPUTE_ROOT"
fi
make $MAKE_OPTS
make $MAKE_OPTS install
make $MAKE_OPTS package
ninja $MAKE_OPTS
ninja $MAKE_OPTS install
ninja $MAKE_OPTS package
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
@@ -136,7 +140,7 @@ verifyEnvSetup
case "$TARGET" in
(clean) clean ;;
(build) build ;;
(build) build; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac

View File

@@ -12,6 +12,7 @@ printUsage() {
echo " -p, --package <type> Specify packaging format"
echo " -r, --release Make a release build instead of a debug build"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -w, --wheel Creates python wheel package of rocprofiler-register. It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -h, --help Prints this help"
@@ -24,6 +25,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME="rocprofiler-register"
PROJ_NAME="$API_NAME"
LIB_NAME="lib${API_NAME}"
@@ -44,11 +46,13 @@ MAKETARGET="deb"
PKGTYPE="deb"
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,static,address_sanitizer,outdir:,package: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,static,address_sanitizer,wheeloutdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -61,11 +65,13 @@ do
set_address_sanitizer_on ; shift ;;
(-s | --static)
SHARED_LIBS="OFF" ; shift ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package)
MAKETARGET="$2" ; shift 2;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -99,17 +105,18 @@ build() {
cmake \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
${GEN_NINJA} \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
-DENABLE_LDCONFIG=OFF \
-DROCPROFILER_REGISTER_BUILD_CI=1 \
-DROCPROFILER_REGISTER_BUILD_TESTS=1 \
-DROCPROFILER_REGISTER_BUILD_SAMPLES=1 \
"$ROCPROFILER_REGISTER_ROOT"
-S "$ROCPROFILER_REGISTER_ROOT"
popd
fi
make $MAKE_OPTS
make $MAKE_OPTS install
make $MAKE_OPTS package
ninja $MAKE_OPTS
ninja $MAKE_OPTS install
ninja $MAKE_OPTS package
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
@@ -131,7 +138,7 @@ verifyEnvSetup
case "$TARGET" in
(clean) clean ;;
(build) build ;;
(build) build; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $TARGET" ;;
esac

View File

@@ -26,6 +26,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME="rocprofiler-sdk"
PROJ_NAME="$API_NAME"
LIB_NAME="lib${API_NAME}"
@@ -35,8 +36,8 @@ PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_LIB="$(getLibPath)"
PACKAGE_INCLUDE="$(getIncludePath)"
BUILD_DIR="$(getBuildPath $API_NAME)"
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
PACKAGE_DEB="$PACKAGE_ROOT/deb/$PROJ_NAME"
PACKAGE_RPM="$PACKAGE_ROOT/rpm/$PROJ_NAME"
PACKAGE_PREFIX="$ROCM_INSTALL_PATH"
BUILD_TYPE="Debug"
MAKE_OPTS="$DASH_JAY"
@@ -44,14 +45,16 @@ SHARED_LIBS="ON"
CLEAN_OR_OUT=0
MAKETARGET="deb"
PKGTYPE="deb"
GPU_LIST="gfx900;gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1031;gfx1100;gfx1101;gfx1102"
# Handling GPU Targets for HSACO and HIP Executables
GPU_LIST="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1031;gfx1100;gfx1101;gfx1102"
ASAN=0
#parse the arguments
VALID_STR=$(getopt -o hcrawso:p: --long help,clean,release,static,address_sanitizer,wheel,outdir:,package: -- "$@")
eval set -- "$VALID_STR"
while true; do
#echo "parocessing $1"
case "$1" in
-h | --help)
printUsage
@@ -194,7 +197,7 @@ verifyEnvSetup
case "$TARGET" in
clean) clean ;;
build) build_rocprofiler-sdk ;;
build) build_rocprofiler-sdk; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
outdir) print_output_directory ;;
*) die "Invalid target $TARGET" ;;
esac

View File

@@ -26,6 +26,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME="rocprofiler-systems"
PROJ_NAME="$API_NAME"
LIB_NAME="lib${API_NAME}"
@@ -33,11 +34,11 @@ TARGET="build"
MAKETARGET="deb"
PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_LIB="$(getLibPath)"
# PACKAGE_INCLUDE="$(getIncludePath)"
BUILD_DIR="$(getBuildPath $API_NAME)"
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
# PACKAGE_PREFIX="$ROCM_INSTALL_PATH"
BUILD_TYPE="Debug"
MAKE_OPTS="-j 8"
SHARED_LIBS="ON"
@@ -46,6 +47,7 @@ MAKETARGET="deb"
PKGTYPE="deb"
ASAN=0
#parse the arguments
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,address_sanitizer,static,outdir:,package:,wheel -- "$@")
eval set -- "$VALID_STR"
@@ -121,6 +123,10 @@ clean() {
build_rocprofiler_systems() {
echo "Building $PROJ_NAME"
if [ "$DISTRO_ID" = "mariner-2.0" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ]; then
echo "Skip make and uploading packages for \"$PROJ_NAME\" on \"${DISTRO_ID}\" distro"
exit 0
fi
if [ $ASAN == 1 ]; then
echo "Skip make and uploading packages for rocprofiler-systems on ASAN build"
@@ -141,9 +147,9 @@ build_rocprofiler_systems() {
echo "Updating submodules"
git submodule init
# copy the new URL to your local config
git submodule sync --recursive
# force update the submodule from the new URL
git submodule update --init --recursive --force
echo "Updated submodule status"
@@ -187,6 +193,7 @@ build_rocprofiler_systems() {
else
cmake \
${GEN_NINJA} \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
-DROCPROFSYS_BUILD_{LIBUNWIND,DYNINST}=ON \
@@ -228,7 +235,7 @@ verifyEnvSetup
case "$TARGET" in
clean) clean ;;
build) build_rocprofiler_systems ;;
build) build_rocprofiler_systems; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
outdir) print_output_directory ;;
*) die "Invalid target $TARGET" ;;
esac

View File

@@ -12,6 +12,8 @@ printUsage() {
echo " -p, --package <type> Specify packaging format"
echo " -r, --release Make a release build instead of a debug build"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -w, --wheel Creates python wheel package of roc-profiler.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -h, --help Prints this help"
@@ -24,6 +26,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME="rocprofiler"
PROJ_NAME="$API_NAME"
LIB_NAME="lib${API_NAME}"
@@ -33,8 +36,8 @@ PACKAGE_ROOT="$(getPackageRoot)"
PACKAGE_LIB="$(getLibPath)"
PACKAGE_INCLUDE="$(getIncludePath)"
BUILD_DIR="$(getBuildPath $API_NAME)"
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
PACKAGE_DEB="$PACKAGE_ROOT/deb/$PROJ_NAME"
PACKAGE_RPM="$PACKAGE_ROOT/rpm/$PROJ_NAME"
PACKAGE_PREFIX="$ROCM_INSTALL_PATH"
BUILD_TYPE="Debug"
MAKE_OPTS="$DASH_JAY -C $BUILD_DIR"
@@ -42,12 +45,15 @@ SHARED_LIBS="ON"
CLEAN_OR_OUT=0
MAKETARGET="deb"
PKGTYPE="deb"
GPU_LIST="gfx900,gfx906,gfx908,gfx90a,gfx940,gfx941,gfx942,gfx1030,gfx1031,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201"
# Handling GPU Targets for HSACO and HIP Executables
GPU_LIST="gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1030,gfx1031,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201"
#parse the arguments
VALID_STR=$(getopt -o hcraswo:p: --long help,clean,release,static,wheel,address_sanitizer,outdir:,package: -- "$@")
eval set -- "$VALID_STR"
while true; do
#echo "parocessing $1"
case "$1" in
-h | --help)
printUsage
@@ -69,6 +75,9 @@ while true; do
;;
-s | --static)
ack_and_skip_static
;;
-w | --wheel)
WHEEL_PACKAGE=true
shift
;;
-o | --outdir)
@@ -85,7 +94,7 @@ while true; do
--)
shift
break
;;
;; # end delimiter
*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] " >&2
exit 20
@@ -115,9 +124,6 @@ clean() {
build_rocprofiler() {
echo "Building $PROJ_NAME"
sed -i 's/set(CPACK_GENERATOR "DEB" "RPM" "TGZ")/set(CPACK_GENERATOR "DEB" "TGZ")/' "${ROCPROFILER_ROOT}/CMakeLists.txt"
PACKAGE_CMAKE="$(getCmakePath)"
if [ ! -d "$BUILD_DIR" ]; then
mkdir -p "$BUILD_DIR"
@@ -173,7 +179,7 @@ verifyEnvSetup
case "$TARGET" in
clean) clean ;;
build) build_rocprofiler ;;
build) build_rocprofiler; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
outdir) print_output_directory ;;
*) die "Invalid target $TARGET" ;;
esac

View File

@@ -11,6 +11,8 @@ printUsage() {
echo " -p, --package <type> Specify packaging format"
echo " -r, --release Make a release build instead of a debug build"
echo " -a, --address_sanitizer Enable address sanitizer"
echo " -w, --wheel Creates python wheel package of rocr-debug-agent.
It needs to be used along with -r option"
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
type referred to by pkg_type"
echo " -h, --help Prints this help"
@@ -23,6 +25,7 @@ printUsage() {
return 0
}
## Build environment variables
API_NAME=rocm-debug-agent
PROJ_NAME=$API_NAME
LIB_NAME=lib${API_NAME}.so
@@ -33,12 +36,12 @@ PACKAGE_BIN="$(getBinPath)"
PACKAGE_LIB=$(getLibPath)
PACKAGE_INCLUDE=$(getIncludePath)
BUILD_DIR=$(getBuildPath $API_NAME)
PACKAGE_DEB=$(getPackageRoot)/deb/$API_NAME
PACKAGE_RPM=$(getPackageRoot)/rpm/$API_NAME
PACKAGE_DEB=$PACKAGE_ROOT/deb/$PROJ_NAME
PACKAGE_RPM=$PACKAGE_ROOT/rpm/$PROJ_NAME
PACKAGE_PREFIX=$ROCM_INSTALL_PATH
BUILD_TYPE=Debug
MAKE_OPTS="$DASH_JAY -C"
## Test variables
TEST_PACKAGE_DIR="$(getBinPath)/rocm-debug-agent-test"
PACKAGE_UTILS=$(getUtilsPath)
@@ -54,11 +57,13 @@ MAKETARGET="deb"
PKGTYPE="deb"
VALID_STR=`getopt -o hcraso:p: --long help,clean,release,static,address_sanitizer,outdir:,package: -- "$@"`
#parse the arguments
VALID_STR=`getopt -o hcraswo:p: --long help,clean,release,static,wheel,address_sanitizer,outdir:,package: -- "$@"`
eval set -- "$VALID_STR"
while true ;
do
#echo "parocessing $1"
case "$1" in
(-h | --help)
printUsage ; exit 0;;
@@ -71,11 +76,13 @@ do
set_address_sanitizer_on ; shift ;;
(-s | --static)
ack_and_skip_static ;;
(-w | --wheel)
WHEEL_PACKAGE=true ; shift ;;
(-o | --outdir)
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
(-p | --package)
MAKETARGET="$2" ; shift 2;;
--) shift; break;;
--) shift; break;; # end delimiter
(*)
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
esac
@@ -102,14 +109,17 @@ clean() {
build() {
echo "Building $PROJ_NAME"
# The cmake path is different for asan and non-asan builds.
# Fetch after getting build type. Default will be non-asan build
PACKAGE_CMAKE="$(getCmakePath)"
# If rocm is installed to an unconventional location, --rocm-path needs to be set.
export HIPCC_COMPILE_FLAGS_APPEND="--rocm-path=$ROCM_PATH"
if [ ! -d "$BUILD_DIR" ]; then
mkdir -p "$BUILD_DIR"
pushd "$BUILD_DIR"
cmake $(rocm_cmake_params) \
${GEN_NINJA} \
-DCMAKE_PREFIX_PATH="$PACKAGE_CMAKE/amd-dbgapi" \
-DCMAKE_MODULE_PATH="$PACKAGE_CMAKE/hip" \
$(rocm_common_cmake_params) \
@@ -118,15 +128,22 @@ build() {
popd
fi
cmake --build "$BUILD_DIR" -- $MAKE_OPTS $BUILD_DIR
cmake --build "$BUILD_DIR" -- $MAKE_OPTS $BUILD_DIR install
cmake --build "$BUILD_DIR" -- $MAKE_OPTS $BUILD_DIR package
cmake --build "$BUILD_DIR" -- $DASH_JAY
cmake --build "$BUILD_DIR" --target install -- $DASH_JAY
cmake --build "$BUILD_DIR" --target package -- $DASH_JAY
mkdir -p $PACKAGE_LIB
cp -R $BUILD_DIR/${LIB_NAME}* $PACKAGE_LIB
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DEB}" "$BUILD_DIR/${API_NAME}"*.deb
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_RPM}" "$BUILD_DIR/${API_NAME}"*.rpm
mkdir -p "$PACKAGE_UTILS"
progressCopy "$SCRIPT_ROOT/run_rocr_debug_agent_test.sh" "$PACKAGE_UTILS"
## Copy run test py script
echo "copying run-test.py to $PACKAGE_BIN"
progressCopy "$ROCR_DEBUG_AGENT_ROOT/test/run-test.py" "$PACKAGE_BIN"
}
print_output_directory() {
@@ -145,7 +162,7 @@ verifyEnvSetup
case $TARGET in
(clean) clean ;;
(build) build ;;
(build) build; build_wheel "$BUILD_DIR" "$PROJ_NAME" ;;
(outdir) print_output_directory ;;
(*) die "Invalid target $target" ;;
esac

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/compute_helper.sh"
source "$(dirname "${BASH_SOURCE[0]}")/compute_utils.sh"
set_component_src rocRAND
@@ -13,31 +13,37 @@ build_rocrand() {
SHARED_LIBS="OFF"
fi
#Removed GPU ARCHS from here as it will be part of compute_utils.sh ROCMOPS-7302 & ROCMOPS-8091
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
set_asan_env_vars
set_address_sanitizer_on
set_asan_env_vars
set_address_sanitizer_on
# updating GPU_ARCHS for ASAN build to supported gpu arch only SWDEV-479178
#GPU_ARCHS="gfx90a:xnack+;gfx942:xnack+"#This will be part of compute_utils.sh ROCMOPS-7302 & ROCMOPS-8091
fi
cd $COMPONENT_SRC && mkdir "$BUILD_DIR"
git config --global --add safe.directory "$COMPONENT_SRC"
# Rename the remote set by the repo tool to origin as
# git submodule update looks for the remote origin.
remote_name=$(git remote show | head -n 1)
echo "remote name: $remote_name"
[ "$remote_name" == "origin" ] || git remote rename "$remote_name" origin
git remote -v
git submodule update --init --force
if [ -n "$GPU_ARCHS" ]; then
GPU_TARGETS="$GPU_ARCHS"
else
GPU_TARGETS="gfx908:xnack-;gfx90a:xnack-;gfx90a:xnack+;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101"
# if GPU_ENABLE_GPU_ARCHARCH is set in env by Job parameter ENABLE_GPU_ARCH, then set GFX_ARCH to that value. This will override any of the case values above
if [ -n "$ENABLE_GPU_ARCH" ]; then
#setting gfx arch as part of rocm_common_cmake_params
set_gpu_arch "${ENABLE_GPU_ARCH}"
fi
init_rocm_common_cmake_params
CXX=$(set_build_variables CXX)\
CXX=$(set_build_variables __CXX__)\
cmake \
${LAUNCHER_FLAGS} \
"${rocm_math_common_cmake_params[@]}" \
-DBUILD_SHARED_LIBS=$SHARED_LIBS \
-DAMDGPU_TARGETS=${GPU_TARGETS} \
-DBUILD_TEST=ON \
-DBUILD_BENCHMARK=ON \
-DBUILD_CRUSH_TEST=ON \
@@ -53,7 +59,7 @@ build_rocrand() {
cmake --build "$BUILD_DIR" -- package
rm -rf _CPack_Packages/ && find -name '*.o' -delete
mkdir -p $PACKAGE_DIR && cp ${BUILD_DIR}/*.${PKGTYPE} $PACKAGE_DIR
copy_if "${PKGTYPE}" "${CPACKGEN:-"DEB;RPM"}" "${PACKAGE_DIR}" "${BUILD_DIR}"/*."${PKGTYPE}"
show_build_cache_stats
}
@@ -67,7 +73,7 @@ clean_rocrand() {
stage2_command_args "$@"
case $TARGET in
build) build_rocrand ;;
build) build_rocrand; build_wheel ;;
outdir) print_output_directory ;;
clean) clean_rocrand ;;
*) die "Invalid target $TARGET" ;;

View File

@@ -42,7 +42,7 @@ PKGTYPE="deb"
#parse the arguments
VALID_STR=`getopt -o hcrao:seg: --long help,clean,release,outdir:,static,address_sanitizer,emulator,gpu_list: -- "$@"`
VALID_STR=`getopt -o hcrawo:seg: --long help,clean,release,outdir:,static,wheel,address_sanitizer,emulator,gpu_list: -- "$@"`
eval set -- "$VALID_STR"
while true ;
@@ -61,6 +61,8 @@ do
exit ;;
(-s | --static)
ack_and_skip_static ;;
(-w | --wheel)
echo "wheel build option accepted and ignored" ; shift ;;
(-e | --emulator )
EMULATOR_BUILD=1 ; ((CLEAN_OR_OUT|=3)) ; shift ;;
(-g | --gpu_list )
@@ -92,6 +94,7 @@ build_rocrsamples() {
cmake -DTARGET_DEVICES=$GPU_LIST \
$(rocm_cmake_params) \
$(rocm_common_cmake_params) \
${GEN_NINJA} \
-DROCRTST_BLD_TYPE=$ROCRTST_SAMPLES_BUILD_TYPE \
-DROCM_DIR=$PACKAGE_ROOT \
-DLLVM_DIR="$ROCM_INSTALL_PATH/llvm/bin" \

Some files were not shown because too many files have changed in this diff Show More