mirror of
https://github.com/ROCm/ROCm.git
synced 2026-01-10 23:28:03 -05:00
Compare commits
27 Commits
docs_devic
...
rocm-6.3.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ae7f47a0a2 | ||
|
|
5e5f7d6bb7 | ||
|
|
da1125e228 | ||
|
|
e55b9f2a33 | ||
|
|
761a524d03 | ||
|
|
c895ee483c | ||
|
|
e049d952d4 | ||
|
|
ce41922bb5 | ||
|
|
2b53b40caa | ||
|
|
9250e1ba28 | ||
|
|
3c055ab65b | ||
|
|
44aaf1b57c | ||
|
|
822e789998 | ||
|
|
243ac78609 | ||
|
|
c2f483332f | ||
|
|
b35267b6bd | ||
|
|
deb4895b11 | ||
|
|
8c036531e8 | ||
|
|
484cbefc2e | ||
|
|
721b60d52f | ||
|
|
8ebe7be283 | ||
|
|
7e8947fdb4 | ||
|
|
66cac5301f | ||
|
|
9f3a1de117 | ||
|
|
0915fb17e8 | ||
|
|
0d3eb1d774 | ||
|
|
7a258cdba9 |
@@ -59,10 +59,14 @@ jobs:
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
- name: TENSILE_ROCM_ASSEMBLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/amdclang
|
||||
- name: CMAKE_CXX_COMPILER
|
||||
value: $(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
- name: TENSILE_ROCM_OFFLOAD_BUNDLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang-offload-bundler
|
||||
- name: ROCM_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm
|
||||
- name: TENSILE_ROCM_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
- name: PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin:$(Agent.BuildDirectory)/rocm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin
|
||||
- name: DAY_STRING
|
||||
value: $[format('{0:ddMMyyyy}', pipeline.startTime)]
|
||||
pool: ${{ variables.ULTRA_BUILD_POOL }}
|
||||
@@ -150,8 +154,9 @@ jobs:
|
||||
extraEnvVars:
|
||||
- HIP_ROCCLR_HOME:::/home/user/workspace/rocm
|
||||
- TENSILE_ROCM_ASSEMBLER_PATH:::/home/user/workspace/rocm/llvm/bin/amdclang
|
||||
- CMAKE_CXX_COMPILER:::/home/user/workspace/rocm/bin/hipcc
|
||||
- TENSILE_ROCM_OFFLOAD_BUNDLER_PATH:::/home/user/workspace/rocm/llvm/bin/clang-offload-bundler
|
||||
- ROCM_PATH:::/home/user/workspace/rocm
|
||||
- TENSILE_ROCM_PATH:::/home/user/workspace/rocm/bin/hipcc
|
||||
extraCopyDirectories:
|
||||
- deps
|
||||
|
||||
|
||||
@@ -64,10 +64,10 @@ jobs:
|
||||
value: $(Build.BinariesDirectory)/rocm
|
||||
- name: TENSILE_ROCM_ASSEMBLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang
|
||||
- name: CMAKE_CXX_COMPILER
|
||||
value: $(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
- name: TENSILE_ROCM_OFFLOAD_BUNDLER_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm/llvm/bin/clang-offload-bundler
|
||||
- name: ROCM_PATH
|
||||
value: $(Agent.BuildDirectory)/rocm
|
||||
pool: ${{ variables.MEDIUM_BUILD_POOL }}
|
||||
workspace:
|
||||
clean: all
|
||||
@@ -96,8 +96,8 @@ jobs:
|
||||
-DCMAKE_TOOLCHAIN_FILE=toolchain-linux.cmake
|
||||
-DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm/llvm;$(Agent.BuildDirectory)/rocm
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/bin/amdclang++
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/bin/amdclang
|
||||
-DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/bin/hipcc
|
||||
-DGPU_TARGETS=$(JOB_GPU_TARGET)
|
||||
-DTensile_CODE_OBJECT_VERSION=default
|
||||
-DTensile_LOGIC=asm_full
|
||||
@@ -125,8 +125,8 @@ jobs:
|
||||
extraEnvVars:
|
||||
- HIP_ROCCLR_HOME:::/home/user/workspace/rocm
|
||||
- TENSILE_ROCM_ASSEMBLER_PATH:::/home/user/workspace/rocm/llvm/bin/clang
|
||||
- CMAKE_CXX_COMPILER:::/home/user/workspace/rocm/bin/hipcc
|
||||
- TENSILE_ROCM_OFFLOAD_BUNDLER_PATH:::/home/user/workspace/rocm/llvm/bin/clang-offload-bundler
|
||||
- ROCM_PATH:::/home/user/workspace/rocm
|
||||
|
||||
- job: rocBLAS_testing
|
||||
dependsOn: rocBLAS
|
||||
|
||||
@@ -49,10 +49,21 @@ jobs:
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
# Since mesa-amdgpu-multimedia-devel is not directly available from apt, register it
|
||||
- task: Bash@3
|
||||
displayName: 'Register ROCm packages'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||
wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${{ variables.KEYRING_VERSION }}/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/${{ variables.KEYRING_VERSION }} jammy main" | sudo tee --append /etc/apt/sources.list.d/rocm.list
|
||||
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
sudo apt update
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
@@ -93,10 +104,21 @@ jobs:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
JOB_TEST_POOL: ${{ variables.GFX942_TEST_POOL }}
|
||||
steps:
|
||||
# Since mesa-amdgpu-multimedia-devel is not directly available from apt, register it
|
||||
- task: Bash@3
|
||||
displayName: 'Register ROCm packages'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||
wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${{ variables.KEYRING_VERSION }}/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/${{ variables.KEYRING_VERSION }} jammy main" | sudo tee --append /etc/apt/sources.list.d/rocm.list
|
||||
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
sudo apt update
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml
|
||||
|
||||
@@ -48,10 +48,21 @@ jobs:
|
||||
gfx942:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
steps:
|
||||
# Since mesa-amdgpu-multimedia-devel is not directly available from apt, register it
|
||||
- task: Bash@3
|
||||
displayName: 'Register ROCm packages'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||
wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${{ variables.KEYRING_VERSION }}/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/${{ variables.KEYRING_VERSION }} jammy main" | sudo tee --append /etc/apt/sources.list.d/rocm.list
|
||||
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
sudo apt update
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
@@ -95,10 +106,21 @@ jobs:
|
||||
JOB_GPU_TARGET: gfx942
|
||||
JOB_TEST_POOL: ${{ variables.GFX942_TEST_POOL }}
|
||||
steps:
|
||||
# Since mesa-amdgpu-multimedia-devel is not directly available from apt, register it
|
||||
- task: Bash@3
|
||||
displayName: 'Register ROCm packages'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||
wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${{ variables.KEYRING_VERSION }}/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/${{ variables.KEYRING_VERSION }} jammy main" | sudo tee --append /etc/apt/sources.list.d/rocm.list
|
||||
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
sudo apt update
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
registerROCmPackages: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
-DCPACK_GENERATOR=DEB
|
||||
-DCPACK_DEBIAN_PACKAGE_RELEASE="local.9999~99.99"
|
||||
-DCPACK_RPM_PACKAGE_RELEASE="local.9999"
|
||||
-DROCM_VERSION="$(NEXT_RELEASE_VERSION)"
|
||||
-DROCM_VERSION="$(next-release)"
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml
|
||||
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
registerROCmPackages: true
|
||||
registerRadeon: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
|
||||
@@ -37,8 +37,6 @@ parameters:
|
||||
- clr
|
||||
- llvm-project
|
||||
- rccl
|
||||
- rocDecode
|
||||
- rocJPEG
|
||||
- rocm-cmake
|
||||
- rocm-core
|
||||
- rocminfo
|
||||
@@ -62,7 +60,7 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
registerROCmPackages: true
|
||||
registerRadeon: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
@@ -101,7 +99,6 @@ jobs:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
registerROCmPackages: true
|
||||
|
||||
- job: rocprofiler_sdk_testing
|
||||
dependsOn: rocprofiler_sdk
|
||||
@@ -122,7 +119,7 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
registerROCmPackages: true
|
||||
registerRadeon: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
@@ -158,4 +155,3 @@ jobs:
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
environment: test
|
||||
gpuTarget: $(JOB_GPU_TARGET)
|
||||
registerROCmPackages: true
|
||||
|
||||
@@ -46,14 +46,13 @@ parameters:
|
||||
- name: rocmDependencies
|
||||
type: object
|
||||
default:
|
||||
- amdsmi
|
||||
- aomp
|
||||
- clr
|
||||
- llvm-project
|
||||
- rccl
|
||||
- rocDecode
|
||||
- rocJPEG
|
||||
- rocm-core
|
||||
- rocm_smi_lib
|
||||
- rocminfo
|
||||
- ROCR-Runtime
|
||||
- rocprofiler-register
|
||||
@@ -76,7 +75,7 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
registerROCmPackages: true
|
||||
registerRadeon: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
@@ -160,7 +159,7 @@ jobs:
|
||||
parameters:
|
||||
aptPackages: ${{ parameters.aptPackages }}
|
||||
pipModules: ${{ parameters.pipModules }}
|
||||
registerROCmPackages: true
|
||||
registerRadeon: true
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml
|
||||
- template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml
|
||||
parameters:
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
pool:
|
||||
vmImage: ${{ variables.BASE_BUILD_POOL }}
|
||||
container:
|
||||
image: rocm/dev-ubuntu-22.04:${{ variables.LATEST_RELEASE_VERSION }}
|
||||
image: ${{ variables.DOCKER_IMAGE_NAME }}:${{ variables.LATEST_DOCKER_VERSION }}
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
variables:
|
||||
- group: common
|
||||
- template: /.azuredevops/variables-global.yml
|
||||
|
||||
parameters:
|
||||
- name: checkoutRef
|
||||
type: string
|
||||
default: refs/tags/$(LATEST_RELEASE_TAG)
|
||||
|
||||
resources:
|
||||
repositories:
|
||||
- repository: pipelines_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/ROCm
|
||||
- repository: release_repo
|
||||
type: github
|
||||
endpoint: ROCm
|
||||
name: ROCm/TransferBench
|
||||
ref: ${{ parameters.checkoutRef }}
|
||||
|
||||
trigger: none
|
||||
pr: none
|
||||
|
||||
jobs:
|
||||
- template: ${{ variables.CI_COMPONENT_PATH }}/TransferBench.yml
|
||||
parameters:
|
||||
checkoutRepo: release_repo
|
||||
checkoutRef: ${{ parameters.checkoutRef }}
|
||||
@@ -4,13 +4,13 @@ steps:
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
export packageName=$(curl -s https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION)/pool/main/h/hsa-amd-aqlprofile/ | grep -oP "href=\"\K[^\"]*$(lsb_release -rs)[^\"]*\.deb")
|
||||
export packageName=$(curl -s https://repo.radeon.com/rocm/apt/latest/pool/main/h/hsa-amd-aqlprofile/ | grep -oP "href=\"\K[^\"]*$(lsb_release -rs)[^\"]*\.deb")
|
||||
echo "##vso[task.setvariable variable=packageName;isreadonly=true]$packageName"
|
||||
- task: Bash@3
|
||||
displayName: 'Download aqlprofile'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: wget -nv https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION)/pool/main/h/hsa-amd-aqlprofile/$(packageName)
|
||||
script: wget -nv https://repo.radeon.com/rocm/apt/latest/pool/main/h/hsa-amd-aqlprofile/$(packageName)
|
||||
workingDirectory: '$(Pipeline.Workspace)'
|
||||
- task: Bash@3
|
||||
displayName: 'Extract aqlprofile'
|
||||
|
||||
@@ -6,21 +6,21 @@ parameters:
|
||||
- name: pipModules
|
||||
type: object
|
||||
default: []
|
||||
- name: registerROCmPackages
|
||||
- name: registerRadeon
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
steps:
|
||||
- ${{ if eq(parameters.registerROCmPackages, true) }}:
|
||||
- ${{ if eq(parameters.registerRadeon, true) }}:
|
||||
- task: Bash@3
|
||||
displayName: 'Register AMDGPU & ROCm repos'
|
||||
displayName: 'Register repo.radeon packages'
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: |
|
||||
sudo mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||
wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/$(REPO_RADEON_VERSION)/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION) jammy main" | sudo tee --append /etc/apt/sources.list.d/rocm.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/latest/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/latest jammy main" | sudo tee --append /etc/apt/sources.list.d/rocm.list
|
||||
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
sudo apt update
|
||||
# firefox takes time to upgrade and is not needed for CI workloads, hold version
|
||||
|
||||
@@ -154,8 +154,8 @@ steps:
|
||||
script: |
|
||||
echo "RUN mkdir --parents --mode=0755 /etc/apt/keyrings" >> Dockerfile
|
||||
echo "RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null" >> Dockerfile
|
||||
echo "RUN echo \"deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/$(REPO_RADEON_VERSION)/ubuntu jammy main\" | tee /etc/apt/sources.list.d/amdgpu.list" >> Dockerfile
|
||||
echo "RUN echo \"deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/$(REPO_RADEON_VERSION) jammy main\" | tee --append /etc/apt/sources.list.d/rocm.list" >> Dockerfile
|
||||
echo "RUN echo \"deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/latest/ubuntu jammy main\" | tee /etc/apt/sources.list.d/amdgpu.list" >> Dockerfile
|
||||
echo "RUN echo \"deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/latest jammy main\" | tee --append /etc/apt/sources.list.d/rocm.list" >> Dockerfile
|
||||
echo "RUN printf 'Package: *\\nPin: release o=repo.radeon.com\\nPin-Priority: 600' > /etc/apt/preferences.d/rocm-pin-600" >> Dockerfile
|
||||
echo "RUN DEBIAN_FRONTEND=noninteractive apt-get --yes update" >> Dockerfile
|
||||
- ${{ if eq(parameters.registerCUDAPackages, true) }}:
|
||||
|
||||
@@ -27,14 +27,14 @@ variables:
|
||||
value: rocm-ci_larger_base_disk_pool
|
||||
- name: GFX942_TEST_POOL
|
||||
value: gfx942_test_pool
|
||||
- name: LATEST_RELEASE_VERSION
|
||||
value: 6.3.2
|
||||
- name: REPO_RADEON_VERSION
|
||||
value: 6.3.2
|
||||
- name: NEXT_RELEASE_VERSION
|
||||
value: 6.4.0
|
||||
- name: LATEST_RELEASE_TAG
|
||||
value: rocm-6.3.2
|
||||
value: rocm-6.1.0
|
||||
- name: DOCKER_IMAGE_NAME
|
||||
value: rocm/dev-ubuntu-22.04
|
||||
- name: LATEST_DOCKER_VERSION
|
||||
value: 6.1
|
||||
- name: KEYRING_VERSION
|
||||
value: 6.3
|
||||
- name: AMDMIGRAPHX_GFX942_TEST_PIPELINE_ID
|
||||
value: 197
|
||||
- name: AMDMIGRAPHX_PIPELINE_ID
|
||||
@@ -151,6 +151,8 @@ variables:
|
||||
value: 105
|
||||
- name: HIPTENSOR_TAGGED_PIPELINE_ID
|
||||
value: 56
|
||||
- name: LAST_RELEASE
|
||||
value: 6.1.0
|
||||
- name: LLVM_PROJECT_PIPELINE_ID
|
||||
value: 2
|
||||
- name: LLVM_PROJECT_TAGGED_PIPELINE_ID
|
||||
@@ -181,6 +183,10 @@ variables:
|
||||
value: 100
|
||||
- name: RDC_TAGGED_PIPELINE_ID
|
||||
value: 59
|
||||
- name: REIMAGE_ORG
|
||||
value: AGS-ROCm-CI
|
||||
- name: REIMAGE_REPO
|
||||
value: cirrascale-reimage-automation
|
||||
- name: ROCAL_PIPELINE_ID
|
||||
value: 151
|
||||
- name: ROCALUTION_GFX942_TEST_PIPELINE_ID
|
||||
|
||||
4
.github/workflows/issue_retrieval.yml
vendored
4
.github/workflows/issue_retrieval.yml
vendored
@@ -2,7 +2,7 @@ name: Issue retrieval
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, edited]
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
auto-retrieve:
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
app_id: ${{ secrets.ACTION_APP_ID }}
|
||||
private_key: ${{ secrets.ACTION_PEM }}
|
||||
- name: 'Retrieve Issue'
|
||||
uses: harkgill-amd/rocm_issue_management@main
|
||||
uses: abhimeda/rocm_issue_management@main
|
||||
with:
|
||||
authentication-token: ${{ steps.generate_token.outputs.token }}
|
||||
github-organization: 'ROCm'
|
||||
|
||||
@@ -74,7 +74,6 @@ Conda
|
||||
ConnectX
|
||||
CuPy
|
||||
Dashboarding
|
||||
DBRX
|
||||
DDR
|
||||
DF
|
||||
DGEMM
|
||||
@@ -93,7 +92,6 @@ DataFrame
|
||||
DataLoader
|
||||
DataParallel
|
||||
Debian
|
||||
DeepSeek
|
||||
DeepSpeed
|
||||
Dependabot
|
||||
Deprecations
|
||||
@@ -131,7 +129,6 @@ GDS
|
||||
GEMM
|
||||
GEMMs
|
||||
GFortran
|
||||
Gemma
|
||||
GiB
|
||||
GIM
|
||||
GL
|
||||
@@ -154,6 +151,7 @@ HCA
|
||||
HGX
|
||||
HIPCC
|
||||
HIPExtension
|
||||
HIPification
|
||||
HIPIFY
|
||||
HIPification
|
||||
HIPify
|
||||
|
||||
@@ -29,7 +29,8 @@ The release notes provide a summary of notable changes since the previous ROCm r
|
||||
- [ROCm upcoming changes](#rocm-upcoming-changes)
|
||||
|
||||
```{note}
|
||||
If you’re using Radeon™ PRO or Radeon GPUs in a workstation setting with a display connected, see the [Use ROCm on Radeon GPUs](https://rocm.docs.amd.com/projects/radeon/en/latest/docs/compatibility/native_linux/native_linux_compatibility.html)
|
||||
If you’re using Radeon™ PRO or Radeon GPUs in a workstation setting with a
|
||||
display connected, continue to use ROCm 6.2.3. See the [Use ROCm on Radeon GPUs](https://rocm.docs.amd.com/projects/radeon/en/latest/index.html)
|
||||
documentation to verify compatibility and system requirements.
|
||||
```
|
||||
## Release highlights
|
||||
|
||||
@@ -62,7 +62,7 @@ additional licenses. Please review individual repositories for more information.
|
||||
| [rocJPEG](https://github.com/ROCm/rocJPEG/) | [MIT](https://github.com/ROCm/rocJPEG/blob/develop/LICENSE) |
|
||||
| [ROCK-Kernel-Driver](https://github.com/ROCm/ROCK-Kernel-Driver/) | [GPL 2.0 WITH Linux-syscall-note](https://github.com/ROCm/ROCK-Kernel-Driver/blob/master/COPYING) |
|
||||
| [rocminfo](https://github.com/ROCm/rocminfo/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocminfo/blob/amd-staging/License.txt) |
|
||||
| [ROCm Bandwidth Test](https://github.com/ROCm/rocm_bandwidth_test/) | [MIT](https://github.com/ROCm/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [ROCm Bandwidth Test](https://github.com/ROCm/rocm_bandwidth_test/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [ROCm CMake](https://github.com/ROCm/rocm-cmake/) | [MIT](https://github.com/ROCm/rocm-cmake/blob/develop/LICENSE) |
|
||||
| [ROCm Communication Collectives Library (RCCL)](https://github.com/ROCm/rccl/) | [Custom](https://github.com/ROCm/rccl/blob/develop/LICENSE.txt) |
|
||||
| [ROCm-Core](https://github.com/ROCm/rocm-core) | [MIT](https://github.com/ROCm/rocm-core/blob/master/copyright) |
|
||||
|
||||
916
docs/compatibility/pytorch-compatibility.rst
Normal file
916
docs/compatibility/pytorch-compatibility.rst
Normal file
@@ -0,0 +1,916 @@
|
||||
.. meta::
|
||||
:description: PyTorch compatibility
|
||||
:keywords: GPU, PyTorch compatibility
|
||||
|
||||
********************************************************************************
|
||||
PyTorch compatibility
|
||||
********************************************************************************
|
||||
|
||||
`PyTorch <https://pytorch.org/>`_ is an open-source tensor library designed for
|
||||
deep learning. PyTorch on ROCm provides mixed-precision and large-scale training
|
||||
using `MIOpen <https://github.com/ROCm/MIOpen>`_ and
|
||||
`RCCL <https://github.com/ROCm/rccl>`_ libraries.
|
||||
|
||||
ROCm support for PyTorch is upstreamed into the official PyTorch repository. Due to independent
|
||||
compatibility considerations, this results in two distinct release cycles for PyTorch on ROCm:
|
||||
|
||||
- ROCm PyTorch release:
|
||||
|
||||
- Provides the latest version of ROCm but doesn't immediately support the latest stable PyTorch
|
||||
version.
|
||||
|
||||
- Offers :ref:`Docker images <pytorch-docker-compat>` with ROCm and PyTorch
|
||||
pre-installed.
|
||||
|
||||
- ROCm PyTorch repository: `<https://github.com/rocm/pytorch>`__
|
||||
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>` to get started.
|
||||
|
||||
- Official PyTorch release:
|
||||
|
||||
- Provides the latest stable version of PyTorch but doesn't immediately support the latest ROCm version.
|
||||
|
||||
- Official PyTorch repository: `<https://github.com/pytorch/pytorch>`__
|
||||
|
||||
- See the `Nightly and latest stable version installation guide <https://pytorch.org/get-started/locally/>`_
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_ to get started.
|
||||
|
||||
The upstream PyTorch includes an automatic HIPification solution that automatically generates HIP
|
||||
source code from the CUDA backend. This approach allows PyTorch to support ROCm without requiring
|
||||
manual code modifications.
|
||||
|
||||
ROCm's development is aligned with the stable release of PyTorch while upstream PyTorch testing uses
|
||||
the stable release of ROCm to maintain consistency.
|
||||
|
||||
.. _pytorch-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
AMD validates and publishes ready-made `PyTorch <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
images with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are validated for `ROCm 6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_.
|
||||
|
||||
.. list-table:: PyTorch Docker image components
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- Apex
|
||||
- torchvision
|
||||
- TensorBoard
|
||||
- MAGMA
|
||||
- UCX
|
||||
- OMPI
|
||||
- OFED
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu24.04_py3.12_pytorch_release_2.4.0/images/sha256-98ddf20333bd01ff749b8092b1190ee369a75d3b8c71c2fac80ffdcb1a98d529?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 24.04
|
||||
- `3.12 <https://www.python.org/downloads/release/python-3128/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.4.0/images/sha256-402c9b4f1a6b5a81c634a1932b56cbe01abb699cfcc7463d226276997c6cf8ea?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_2.4.0/images/sha256-e0608b55d408c3bfe5c19fdd57a4ced3e0eb3a495b74c309980b60b156c526dd?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-652cf25263d05b1de548222970aeb76e60b12de101de66751264709c0d0ff9d8?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.3.0 <https://github.com/ROCm/pytorch/tree/release/2.3>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.3.0 <https://github.com/ROCm/apex/tree/release/1.3.0>`_
|
||||
- `0.18.0 <https://github.com/pytorch/vision/tree/v0.18.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.2.1/images/sha256-051976f26beab8f9aa65d999e3ad546c027b39240a0cc3ee81b114a9024f2912?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_2.2.1/images/sha256-88c839a364d109d3748c100385bfa100d28090d25118cc723fd0406390ab2f7e?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_1.13.1/images/sha256-994424ed07a63113f79dd9aa72159124c00f5fbfe18127151e6658f7d0b6f821?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_1.13.1/images/sha256-7b8139fe40a9aeb4bca3aecd15c22c1fa96e867d93479fa3a24fdeeeeafa1219?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Critical ROCm libraries for PyTorch
|
||||
================================================================================
|
||||
|
||||
The functionality of PyTorch with ROCm is shaped by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
|
||||
- 1.1.0
|
||||
- Enables faster execution of core operations like matrix multiplication
|
||||
(GEMM), convolutions and transformations.
|
||||
- Speeds up ``torch.permute``, ``torch.view``, ``torch.matmul``,
|
||||
``torch.mm``, ``torch.bmm``, ``torch.nn.Conv2d``, ``torch.nn.Conv3d``
|
||||
and ``torch.nn.MultiheadAttention``.
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations like matrix multiplication, matrix-vector products,
|
||||
and tensor contractions. Utilized in both dense and batched linear
|
||||
algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- 0.10.0
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
|
||||
matrix multiplications used in convolutional and linear layers.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
|
||||
and ``torch.topk``. Operations on sparse tensors or tensors with
|
||||
irregular shapes often involve scanning, sorting, and filtering, which
|
||||
hipCUB handles efficiently.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- 1.0.17
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
- Used in functions like the ``torch.fft`` module.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- 2.11.0
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
|
||||
``torch.nn.Dropout``.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
- Supports functions like ``torch.linalg.solve``,
|
||||
``torch.linalg.eig``, and ``torch.linalg.svd``.
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- 3.1.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- 0.2.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipTensor <https://github.com/ROCm/hipTensor>`_
|
||||
- 1.4.0
|
||||
- Optimizes for high-performance tensor operations, such as contractions.
|
||||
- Accelerates tensor algebra, especially in deep learning and scientific
|
||||
computing.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- 3.3.0
|
||||
- Optimizes deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs), recurrent neural
|
||||
networks (RNNs), and other layers. Used in operations like
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`_
|
||||
- 2.11.0
|
||||
- Add graph-level optimizations, ONNX models and mixed precision support
|
||||
and enable Ahead-of-Time (AOT) Compilation.
|
||||
- Speeds up inference models and executes ONNX models for
|
||||
compatibility with other frameworks.
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`_
|
||||
- 3.1.0
|
||||
- Optimizes acceleration for computer vision and AI workloads like
|
||||
preprocessing, augmentation, and inferencing.
|
||||
- Faster data preprocessing and augmentation pipelines for datasets like
|
||||
ImageNet or COCO and easy to integrate into PyTorch's ``torch.utils.data``
|
||||
and ``torchvision`` workflows.
|
||||
* - `rocAL <https://github.com/ROCm/rocAL>`_
|
||||
- 2.1.0
|
||||
- Accelerates the data pipeline by offloading intensive preprocessing and
|
||||
augmentation tasks. rocAL is part of MIVisionX.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- 2.21.5
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
- Distributed data parallel training (``torch.nn.parallel.DistributedDataParallel``).
|
||||
Handles communication in multi-GPU setups.
|
||||
* - `rocDecode <https://github.com/ROCm/rocDecode>`_
|
||||
- 0.8.0
|
||||
- Provide hardware-accelerated data decoding capabilities, particularly
|
||||
for image, video, and other dataset formats.
|
||||
- Can be integrated in ``torch.utils.data``, ``torchvision.transforms``
|
||||
and ``torch.distributed``.
|
||||
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`_
|
||||
- 0.6.0
|
||||
- Provide hardware-accelerated JPEG image decoding and encoding.
|
||||
- GPU accelerated ``torchvision.io.decode_jpeg`` and
|
||||
``torchvision.io.encode_jpeg`` and can be integrated in
|
||||
``torch.utils.data`` and ``torchvision``.
|
||||
* - `RPP <https://github.com/ROCm/RPP>`_
|
||||
- 1.9.1
|
||||
- Speed up data augmentation, transformation, and other preprocessing step.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Utilized in backend operations for tensor computations requiring
|
||||
parallel processing.
|
||||
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`_
|
||||
- 1.6.0
|
||||
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
|
||||
multiplication (GEMM) and accumulation operations with mixed precision
|
||||
support.
|
||||
- Linear layers (``torch.nn.Linear``), convolutional layers
|
||||
(``torch.nn.Conv2d``), attention layers, general tensor operations that
|
||||
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
|
||||
more.
|
||||
|
||||
Supported and unsupported features
|
||||
================================================================================
|
||||
|
||||
The following section maps GPU-accelerated PyTorch features to their supported
|
||||
ROCm and PyTorch versions.
|
||||
|
||||
torch
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
`torch <https://pytorch.org/docs/stable/index.html>`_ is the central module of
|
||||
PyTorch, providing data structures for multi-dimensional tensors and
|
||||
implementing mathematical operations on them. It also includes utilities for
|
||||
efficient serialization of tensors and arbitrary data types, along with various
|
||||
other tools.
|
||||
|
||||
Tensor data types
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or argument, and PyTorch supports a wide range of data types for different use cases.
|
||||
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_'s single data types:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.float8_e4m3fn``
|
||||
- 8-bit floating point, e4m3
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float8_e5m2``
|
||||
- 8-bit floating point, e5m2
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float16`` or ``torch.half``
|
||||
- 16-bit floating point
|
||||
- 0.1.6
|
||||
- 2.0
|
||||
* - ``torch.bfloat16``
|
||||
- 16-bit floating point
|
||||
- 1.6
|
||||
- 2.6
|
||||
* - ``torch.float32`` or ``torch.float``
|
||||
- 32-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.float64`` or ``torch.double``
|
||||
- 64-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.complex32`` or ``torch.chalf``
|
||||
- PyTorch provides native support for 32-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex64`` or ``torch.cfloat``
|
||||
- PyTorch provides native support for 64-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex128`` or ``torch.cdouble``
|
||||
- PyTorch provides native support for 128-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.uint8``
|
||||
- 8-bit integer (unsigned)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.uint16``
|
||||
- 16-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint32``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint64``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.int8``
|
||||
- 8-bit integer (signed)
|
||||
- 1.12
|
||||
- 5.0
|
||||
* - ``torch.int16`` or ``torch.short``
|
||||
- 16-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int32`` or ``torch.int``
|
||||
- 32-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int64`` or ``torch.long``
|
||||
- 64-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.bool``
|
||||
- Boolean
|
||||
- 1.2
|
||||
- 2.0
|
||||
* - ``torch.quint8``
|
||||
- Quantized 8-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint8``
|
||||
- Quantized 8-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint32``
|
||||
- Quantized 32-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.quint4x2``
|
||||
- Quantized 4-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
|
||||
.. note::
|
||||
|
||||
Unsigned types aside from ``uint8`` are currently only have limited support in
|
||||
eager mode (they primarily exist to assist usage with ``torch.compile``).
|
||||
|
||||
The :doc:`ROCm precision support page <rocm:reference/precision-support>`
|
||||
collected the native HW support of different data types.
|
||||
|
||||
torch.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.cuda`` in PyTorch is a module that provides utilities and functions for
|
||||
managing and utilizing AMD and NVIDIA GPUs. It enables GPU-accelerated
|
||||
computations, memory management, and efficient execution of tensor operations,
|
||||
leveraging ROCm and CUDA as the underlying frameworks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Device management
|
||||
- Utilities for managing and interacting with GPUs.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Tensor operations on GPU
|
||||
- Perform tensor operations such as addition and matrix multiplications on
|
||||
the GPU.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Streams and events
|
||||
- Streams allow overlapping computation and communication for optimized
|
||||
performance, events enable synchronization.
|
||||
- 1.6.0
|
||||
- 3.8
|
||||
* - Memory management
|
||||
- Functions to manage and inspect memory usage like
|
||||
``torch.cuda.memory_allocated()``, ``torch.cuda.max_memory_allocated()``,
|
||||
``torch.cuda.memory_reserved()`` and ``torch.cuda.empty_cache()``.
|
||||
- 0.3.0
|
||||
- 1.9.2
|
||||
* - Running process lists of memory management
|
||||
- Return a human-readable printout of the running processes and their GPU
|
||||
memory use for a given device with functions like
|
||||
``torch.cuda.memory_stats()`` and ``torch.cuda.memory_summary()``.
|
||||
- 1.8.0
|
||||
- 4.0
|
||||
* - Communication collectives
|
||||
- A set of APIs that enable efficient communication between multiple GPUs,
|
||||
allowing for distributed computing and data parallelism.
|
||||
- 1.9.0
|
||||
- 5.0
|
||||
* - ``torch.cuda.CUDAGraph``
|
||||
- Graphs capture sequences of GPU operations to minimize kernel launch
|
||||
overhead and improve performance.
|
||||
- 1.10.0
|
||||
- 5.3
|
||||
* - TunableOp
|
||||
- A mechanism that allows certain operations to be more flexible and
|
||||
optimized for performance. It enables automatic tuning of kernel
|
||||
configurations and other settings to achieve the best possible
|
||||
performance based on the specific hardware (GPU) and workload.
|
||||
- 2.0
|
||||
- 5.4
|
||||
* - NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.8.0
|
||||
- ❌
|
||||
* - Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.13.0
|
||||
- ❌
|
||||
* - Jiterator (beta)
|
||||
- Jiterator allows asynchronous data streaming into computation streams
|
||||
during training loops.
|
||||
- 1.13.0
|
||||
- 5.2
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.backends.cuda`` is a PyTorch module that provides configuration options
|
||||
and flags to control the behavior of CUDA or ROCm operations. It is part of the
|
||||
PyTorch backend configuration system, which allows users to fine-tune how
|
||||
PyTorch interacts with the CUDA or ROCm environment.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``cufft_plan_cache``
|
||||
- Manages caching of GPU FFT plans to optimize repeated FFT computations.
|
||||
- 1.7.0
|
||||
- 5.0
|
||||
* - ``matmul.allow_tf32``
|
||||
- Enables or disables the use of TensorFloat-32 (TF32) precision for
|
||||
faster matrix multiplications on GPUs with Tensor Cores.
|
||||
- 1.10.0
|
||||
- ❌
|
||||
* - ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions (e.g., with fp16 accumulation type) are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cudnn
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Supported ``torch`` options:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions on NVIDIA
|
||||
Ampere or newer GPUs.
|
||||
- 1.12.0
|
||||
- ❌
|
||||
* - ``deterministic``
|
||||
- A bool that, if True, causes cuDNN to only use deterministic
|
||||
convolution algorithms.
|
||||
- 1.12.0
|
||||
- 6.0
|
||||
|
||||
Automatic mixed precision: torch.amp
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
PyTorch that automates the process of using both 16-bit (half-precision,
|
||||
float16) and 32-bit (single-precision, float32) floating-point types in model
|
||||
training and inference.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Autocasting
|
||||
- Instances of autocast serve as context managers or decorators that allow
|
||||
regions of your script to run in mixed precision.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - Gradient scaling
|
||||
- To prevent underflow, “gradient scaling” multiplies the network’s
|
||||
loss(es) by a scale factor and invokes a backward pass on the scaled
|
||||
loss(es). Gradients flowing backward through the network are then
|
||||
scaled by the same factor. In other words, gradient values have a
|
||||
larger magnitude, so they don’t flush to zero.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - CUDA op-specific behavior
|
||||
- These ops always go through autocasting whether they are invoked as part
|
||||
of a ``torch.nn.Module``, as a function, or as a ``torch.Tensor`` method. If
|
||||
functions are exposed in multiple namespaces, they go through
|
||||
autocasting regardless of the namespace.
|
||||
- 1.9
|
||||
- 2.5
|
||||
|
||||
Distributed library features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The PyTorch distributed library includes a collective of parallelism modules, a
|
||||
communications layer, and infrastructure for launching and debugging large
|
||||
training jobs. See :ref:`rocm-for-ai-pytorch-distributed` for more information.
|
||||
|
||||
The Distributed Library feature in PyTorch provides tools and APIs for building
|
||||
and running distributed machine learning workflows. It allows training models
|
||||
across multiple processes, GPUs, or nodes in a cluster, enabling efficient use
|
||||
of computational resources and scalability for large-scale tasks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - TensorPipe
|
||||
- TensorPipe is a point-to-point communication library integrated into
|
||||
PyTorch for distributed training. It is designed to handle tensor data
|
||||
transfers efficiently between different processes or devices, including
|
||||
those on separate machines.
|
||||
- 1.8
|
||||
- 5.4
|
||||
* - Gloo
|
||||
- Gloo is designed for multi-machine and multi-GPU setups, enabling
|
||||
efficient communication and synchronization between processes. Gloo is
|
||||
one of the default backends for PyTorch's Distributed Data Parallel
|
||||
(DDP) and RPC frameworks, alongside other backends like NCCL and MPI.
|
||||
- 1.0
|
||||
- 2.0
|
||||
|
||||
torch.compiler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.compiler`` (AOT Autograd)
|
||||
- Autograd captures not only the user-level code, but also backpropagation,
|
||||
which results in capturing the backwards pass “ahead-of-time”. This
|
||||
enables acceleration of both forwards and backwards pass using
|
||||
``TorchInductor``.
|
||||
- 2.0
|
||||
- 5.3
|
||||
* - ``torch.compiler`` (TorchInductor)
|
||||
- The default ``torch.compile`` deep learning compiler that generates fast
|
||||
code for multiple accelerators and backends. You need to use a backend
|
||||
compiler to make speedups through ``torch.compile`` possible. For AMD,
|
||||
NVIDIA, and Intel GPUs, it leverages OpenAI Triton as the key building block.
|
||||
- 2.0
|
||||
- 5.3
|
||||
|
||||
torchaudio
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchaudio <https://pytorch.org/audio/stable/index.html>`_ library provides
|
||||
utilities for processing audio data in PyTorch, such as audio loading,
|
||||
transformations, and feature extraction.
|
||||
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to move audio
|
||||
data (waveform tensor) explicitly to GPU using ``.to('cuda')``.
|
||||
|
||||
The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since torchaudio version
|
||||
- Since ROCm
|
||||
* - ``torchaudio.transforms.Spectrogram``
|
||||
- Generate spectrogram of an input waveform using STFT.
|
||||
- 0.6.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MelSpectrogram``
|
||||
- Generate the mel-scale spectrogram of raw audio signals.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MFCC``
|
||||
- Extract of MFCC features.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.Resample``
|
||||
- Resample a signal from one frequency to another
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
|
||||
torchvision
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchvision <https://pytorch.org/vision/stable/index.html>`_ library
|
||||
provide datasets, model architectures, and common image transformations for
|
||||
computer vision.
|
||||
|
||||
The following ``torchvision`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Features
|
||||
- Description
|
||||
- Since torchvision version
|
||||
- Since ROCm
|
||||
* - ``torchvision.transforms.functional``
|
||||
- Provides GPU-compatible transformations for image preprocessing like
|
||||
resize, normalize, rotate and crop.
|
||||
- 0.2.0
|
||||
- 4.0
|
||||
* - ``torchvision.ops``
|
||||
- GPU-accelerated operations for object detection and segmentation tasks.
|
||||
``torchvision.ops.roi_align``, ``torchvision.ops.nms`` and
|
||||
``box_convert``.
|
||||
- 0.6.0
|
||||
- 3.3
|
||||
* - ``torchvision.models`` with ``.to('cuda')``
|
||||
- ``torchvision`` provides several pre-trained models (ResNet, Faster
|
||||
R-CNN, Mask R-CNN, ...) that can run on CUDA for faster inference and
|
||||
training.
|
||||
- 0.1.6
|
||||
- 2.x
|
||||
* - ``torchvision.io``
|
||||
- Video decoding and frame extraction using GPU acceleration with NVIDIA’s
|
||||
NVDEC and nvJPEG (rocJPEG) on CUDA-enabled GPUs.
|
||||
- 0.4.0
|
||||
- 6.3
|
||||
|
||||
torchtext
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtext <https://pytorch.org/text/stable/index.html>`_ library provides
|
||||
utilities for processing and working with text data in PyTorch, including
|
||||
tokenization, vocabulary management, and text embeddings. torchtext supports
|
||||
preprocessing pipelines and integration with PyTorch models, simplifying the
|
||||
implementation of natural language processing (NLP) tasks.
|
||||
|
||||
To leverage GPU acceleration in torchtext, you need to move tensors
|
||||
explicitly to the GPU using ``.to('cuda')``.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchtune
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtune <https://pytorch.org/torchtune/stable/index.html>`_ library for
|
||||
authoring, fine-tuning and experimenting with LLMs.
|
||||
|
||||
* Usage: It works out-of-the-box, enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchserve
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchserve <https://pytorch.org/torchserve/>`_ is a PyTorch domain library
|
||||
for common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchrec
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchrec <https://pytorch.org/torchrec/>`_ is a PyTorch domain library for
|
||||
common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
Unsupported PyTorch features
|
||||
----------------------------
|
||||
|
||||
The following are GPU-accelerated PyTorch features not currently supported by ROCm.
|
||||
|
||||
.. list-table::
|
||||
:widths: 30, 60, 10
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
* - APEX batch norm
|
||||
- Use APEX batch norm instead of PyTorch batch norm.
|
||||
- 1.6.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_tf32``
|
||||
- A bool that controls whether TensorFloat-32 tensor cores may be used in
|
||||
matrix multiplications.
|
||||
- 1.7
|
||||
* - ``torch.cuda`` / NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.7.0
|
||||
* - ``torch.cuda`` / Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.8.0
|
||||
* - ``torch-tensorrt``
|
||||
- Integrate TensorRT library for optimizing and deploying PyTorch models.
|
||||
ROCm does not have equialent library for TensorRT.
|
||||
- 1.9.0
|
||||
* - ``torch.backends`` / ``cudnn.allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions.
|
||||
- 1.10.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions with fp16 accumulation type are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.nn.functional`` / ``scaled_dot_product_attention``
|
||||
- Flash attention backend for SDPA to accelerate attention computation in
|
||||
transformer-based models.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
* - Dynamic parallelism
|
||||
- PyTorch itself does not directly expose dynamic parallelism as a core
|
||||
feature. Dynamic parallelism allow GPU threads to launch additional
|
||||
threads which can be reached using custom operations via the
|
||||
``torch.utils.cpp_extension`` module.
|
||||
- Not a core feature
|
||||
* - Unified memory support in PyTorch
|
||||
- Unified Memory is not directly exposed in PyTorch's core API, it can be
|
||||
utilized effectively through custom CUDA extensions or advanced
|
||||
workflows.
|
||||
- Not a core feature
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/train-a-model>` provides
|
||||
guidance on how to leverage the ROCm platform for training AI models. It covers the steps, tools, and best practices
|
||||
for optimizing training workflows on AMD GPUs using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning and inference of
|
||||
machine learning models, particularly large language models (LLMs), on systems with a single AMD
|
||||
Instinct MI300X accelerator. This page provides a detailed guide for setting up, optimizing, and
|
||||
executing fine-tuning and inference workflows in such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/llm-fine-tuning-optimization/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning models on systems
|
||||
with multi MI300X accelerators.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/tuning-guides/mi300x/workload>` provides detailed
|
||||
guidance on optimizing workloads for the AMD Instinct MI300X accelerator using ROCm. This guide is aimed at helping
|
||||
users achieve optimal performance for deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the use of PyTorch on the ROCm platform and
|
||||
focuses on how to efficiently leverage AMD GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_
|
||||
@@ -16,9 +16,6 @@ Throughout the following topics, this guide discusses the goals and :ref:`challe
|
||||
model <fine-tuning-llms-concept-challenge>` like Llama 2. In the
|
||||
sections that follow, you'll find practical guides on libraries and tools to accelerate your fine-tuning.
|
||||
|
||||
The AI Developer Hub contains `AMD ROCm tutorials <https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/>`_ for
|
||||
training, fine-tuning, and inference. It leverages popular machine learning frameworks on AMD GPUs.
|
||||
|
||||
- :doc:`Conceptual overview of fine-tuning LLMs <overview>`
|
||||
|
||||
- :doc:`Fine-tuning and inference <fine-tuning-and-inference>` using a
|
||||
|
||||
@@ -12,9 +12,6 @@ You can use ROCm to perform distributed training, which enables you to train mod
|
||||
|
||||
Overall, ROCm can be used to improve the performance and efficiency of your AI applications. With its training, fine-tuning, and inference support, ROCm provides a complete solution for optimizing AI workflows and achieving the optimum results possible on AMD GPUs.
|
||||
|
||||
The AI Developer Hub contains `AMD ROCm tutorials <https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/>`_ for
|
||||
training, fine-tuning, and inference. It leverages popular machine learning frameworks on AMD GPUs.
|
||||
|
||||
In this guide, you'll learn how to use ROCm for AI:
|
||||
|
||||
- :doc:`Training <training/index>`
|
||||
|
||||
@@ -277,7 +277,7 @@ Installing FBGEMM_GPU
|
||||
Installing FBGEMM_GPU consists of the following steps:
|
||||
|
||||
* Set up an isolated Miniconda environment
|
||||
* Install ROCm using Docker or the :doc:`package manager <rocm-install-on-linux:install/install-methods/package-manager-index>`
|
||||
* Install ROCm using Docker or the :doc:`package manager <rocm-install-on-linux:install/native-install/index>`
|
||||
* Install the nightly `PyTorch <https://pytorch.org/>`_ build
|
||||
* Complete the pre-build and build tasks
|
||||
|
||||
|
||||
@@ -11,9 +11,6 @@ Understanding the ROCm™ software platform’s architecture and capabilities is
|
||||
|
||||
Throughout the following topics, this section provides a comprehensive guide to setting up and deploying AI inference on AMD GPUs. This includes instructions on how to install ROCm, how to use Hugging Face Transformers to manage pre-trained models for natural language processing (NLP) tasks, how to validate vLLM on AMD Instinct™ MI300X accelerators and illustrate how to deploy trained models in production environments.
|
||||
|
||||
The AI Developer Hub contains `AMD ROCm tutorials <https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/>`_ for
|
||||
training, fine-tuning, and inference. It leverages popular machine learning frameworks on AMD GPUs.
|
||||
|
||||
- :doc:`Installing ROCm and machine learning frameworks <install>`
|
||||
|
||||
- :doc:`Running models from Hugging Face <hugging-face-models>`
|
||||
|
||||
@@ -26,7 +26,7 @@ If you’re using a Radeon GPU for graphics-accelerated applications, refer to t
|
||||
|
||||
ROCm supports multiple :doc:`installation methods <rocm-install-on-linux:install/install-overview>`:
|
||||
|
||||
* :doc:`Using your Linux distribution's package manager <rocm-install-on-linux:install/install-methods/package-manager-index>`
|
||||
* :doc:`Using your Linux distribution's package manager <rocm-install-on-linux:install/native-install/index>`
|
||||
|
||||
* :doc:`Using the AMDGPU installer <rocm-install-on-linux:install/amdgpu-install>`
|
||||
|
||||
|
||||
@@ -10,22 +10,49 @@ LLM inference performance validation on AMD Instinct MI300X
|
||||
.. _vllm-benchmark-unified-docker:
|
||||
|
||||
The `ROCm vLLM Docker <https://hub.docker.com/r/rocm/vllm/tags>`_ image offers
|
||||
a prebuilt, optimized environment for validating large language model (LLM)
|
||||
inference performance on the AMD Instinct™ MI300X accelerator. This ROCm vLLM
|
||||
Docker image integrates vLLM and PyTorch tailored specifically for the MI300X
|
||||
accelerator and includes the following components:
|
||||
a prebuilt, optimized environment designed for validating large language model
|
||||
(LLM) inference performance on the AMD Instinct™ MI300X accelerator. This
|
||||
ROCm vLLM Docker image integrates vLLM and PyTorch tailored specifically for the
|
||||
MI300X accelerator and includes the following components:
|
||||
|
||||
* `ROCm 6.3.1 <https://github.com/ROCm/ROCm>`_
|
||||
* `ROCm 6.2.1 <https://github.com/ROCm/ROCm>`_
|
||||
|
||||
* `vLLM 0.6.6 <https://docs.vllm.ai/en/latest>`_
|
||||
* `vLLM 0.6.4 <https://docs.vllm.ai/en/latest>`_
|
||||
|
||||
* `PyTorch 2.7.0 (2.7.0a0+git3a58512) <https://github.com/pytorch/pytorch>`_
|
||||
* `PyTorch 2.5.0 <https://github.com/pytorch/pytorch>`_
|
||||
|
||||
* Tuning files (in CSV format)
|
||||
|
||||
With this Docker image, you can quickly validate the expected inference
|
||||
performance numbers for the MI300X accelerator. This topic also provides tips on
|
||||
optimizing performance with popular AI models. For more information, see the lists of
|
||||
:ref:`available models for MAD-integrated benchmarking <vllm-benchmark-mad-models>`
|
||||
and :ref:`standalone benchmarking <vllm-benchmark-standalone-options>`.
|
||||
performance numbers on the MI300X accelerator. This topic also provides tips on
|
||||
optimizing performance with popular AI models.
|
||||
|
||||
.. hlist::
|
||||
:columns: 6
|
||||
|
||||
* Llama 3.1 8B
|
||||
|
||||
* Llama 3.1 70B
|
||||
|
||||
* Llama 3.1 405B
|
||||
|
||||
* Llama 2 7B
|
||||
|
||||
* Llama 2 70B
|
||||
|
||||
* Mixtral 8x7B
|
||||
|
||||
* Mixtral 8x22B
|
||||
|
||||
* Mixtral 7B
|
||||
|
||||
* Qwen2 7B
|
||||
|
||||
* Qwen2 72B
|
||||
|
||||
* JAIS 13B
|
||||
|
||||
* JAIS 30B
|
||||
|
||||
.. _vllm-benchmark-vllm:
|
||||
|
||||
@@ -64,9 +91,9 @@ MI300X accelerator with the prebuilt vLLM Docker image.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker pull rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
|
||||
docker pull rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
|
||||
|
||||
Once the setup is complete, choose between two options to reproduce the
|
||||
Once setup is complete, you can choose between two options to reproduce the
|
||||
benchmark results:
|
||||
|
||||
- :ref:`MAD-integrated benchmarking <vllm-benchmark-mad>`
|
||||
@@ -103,89 +130,45 @@ Although the following models are preconfigured to collect latency and
|
||||
throughput performance data, you can also change the benchmarking parameters.
|
||||
Refer to the :ref:`Standalone benchmarking <vllm-benchmark-standalone>` section.
|
||||
|
||||
.. _vllm-benchmark-mad-models:
|
||||
|
||||
Available models
|
||||
----------------
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 2, 3
|
||||
.. hlist::
|
||||
:columns: 3
|
||||
|
||||
* - Model name
|
||||
- Tag
|
||||
* ``pyt_vllm_llama-3.1-8b``
|
||||
|
||||
* - `Llama 3.1 8B <https://huggingface.co/meta-llama/Llama-3.1-8B>`_
|
||||
- ``pyt_vllm_llama-3.1-8b``
|
||||
* ``pyt_vllm_llama-3.1-70b``
|
||||
|
||||
* - `Llama 3.1 70B <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
|
||||
- ``pyt_vllm_llama-3.1-70b``
|
||||
* ``pyt_vllm_llama-3.1-405b``
|
||||
|
||||
* - `Llama 3.1 405B <https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct>`_
|
||||
- ``pyt_vllm_llama-3.1-405b``
|
||||
* ``pyt_vllm_llama-2-7b``
|
||||
|
||||
* - `Llama 3.2 11B Vision <https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct>`_
|
||||
- ``pyt_vllm_llama-3.2-11b-vision-instruct``
|
||||
* ``pyt_vllm_llama-2-70b``
|
||||
|
||||
* - `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-7b-chat-hf>`_
|
||||
- ``pyt_vllm_llama-2-7b``
|
||||
* ``pyt_vllm_mixtral-8x7b``
|
||||
|
||||
* - `Llama 2 70B <https://huggingface.co/meta-llama/Llama-2-70b-chat-hf>`_
|
||||
- ``pyt_vllm_llama-2-70b``
|
||||
* ``pyt_vllm_mixtral-8x22b``
|
||||
|
||||
* - `Mixtral MoE 8x7B <https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1>`_
|
||||
- ``pyt_vllm_mixtral-8x7b``
|
||||
* ``pyt_vllm_mistral-7b``
|
||||
|
||||
* - `Mixtral MoE 8x22B <https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1>`_
|
||||
- ``pyt_vllm_mixtral-8x22b``
|
||||
* ``pyt_vllm_qwen2-7b``
|
||||
|
||||
* - `Mistral 7B <https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3>`_
|
||||
- ``pyt_vllm_mistral-7b``
|
||||
* ``pyt_vllm_qwen2-72b``
|
||||
|
||||
* - `Qwen2 7B <https://huggingface.co/Qwen/Qwen2-7B-Instruct>`_
|
||||
- ``pyt_vllm_qwen2-7b``
|
||||
* ``pyt_vllm_jais-13b``
|
||||
|
||||
* - `Qwen2 72B <https://huggingface.co/Qwen/Qwen2-72B-Instruct>`_
|
||||
- ``pyt_vllm_qwen2-72b``
|
||||
* ``pyt_vllm_jais-30b``
|
||||
|
||||
* - `JAIS 13B <https://huggingface.co/core42/jais-13b-chat>`_
|
||||
- ``pyt_vllm_jais-13b``
|
||||
* ``pyt_vllm_llama-3.1-8b_fp8``
|
||||
|
||||
* - `JAIS 30B <https://huggingface.co/core42/jais-30b-chat-v3>`_
|
||||
- ``pyt_vllm_jais-30b``
|
||||
* ``pyt_vllm_llama-3.1-70b_fp8``
|
||||
|
||||
* - `DBRX Instruct <https://huggingface.co/databricks/dbrx-instruct>`_
|
||||
- ``pyt_vllm_dbrx-instruct``
|
||||
* ``pyt_vllm_llama-3.1-405b_fp8``
|
||||
|
||||
* - `Gemma 2 27B <https://huggingface.co/google/gemma-2-27b>`_
|
||||
- ``pyt_vllm_gemma-2-27b``
|
||||
* ``pyt_vllm_mixtral-8x7b_fp8``
|
||||
|
||||
* - `C4AI Command R+ 08-2024 <https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024>`_
|
||||
- ``pyt_vllm_c4ai-command-r-plus-08-2024``
|
||||
|
||||
* - `DeepSeek MoE 16B <https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat>`_
|
||||
- ``pyt_vllm_deepseek-moe-16b-chat``
|
||||
|
||||
* - `Llama 3.1 70B FP8 <https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV>`_
|
||||
- ``pyt_vllm_llama-3.1-70b_fp8``
|
||||
|
||||
* - `Llama 3.1 405B FP8 <https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV>`_
|
||||
- ``pyt_vllm_llama-3.1-405b_fp8``
|
||||
|
||||
* - `Mixtral MoE 8x7B FP8 <https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV>`_
|
||||
- ``pyt_vllm_mixtral-8x7b_fp8``
|
||||
|
||||
* - `Mixtral MoE 8x22B FP8 <https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV>`_
|
||||
- ``pyt_vllm_mixtral-8x22b_fp8``
|
||||
|
||||
* - `Mistral 7B FP8 <https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV>`_
|
||||
- ``pyt_vllm_mistral-7b_fp8``
|
||||
|
||||
* - `DBRX Instruct FP8 <https://huggingface.co/amd/dbrx-instruct-FP8-KV>`_
|
||||
- ``pyt_vllm_dbrx_fp8``
|
||||
|
||||
* - `C4AI Command R+ 08-2024 FP8 <https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV>`_
|
||||
- ``pyt_vllm_command-r-plus_fp8``
|
||||
* ``pyt_vllm_mixtral-8x22b_fp8``
|
||||
|
||||
.. _vllm-benchmark-standalone:
|
||||
|
||||
@@ -198,8 +181,8 @@ snippet.
|
||||
|
||||
.. code-block::
|
||||
|
||||
docker pull rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
|
||||
docker run -it --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 16G --security-opt seccomp=unconfined --security-opt apparmor=unconfined --cap-add=SYS_PTRACE -v $(pwd):/workspace --env HUGGINGFACE_HUB_CACHE=/workspace --name vllm_v0.6.6 rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
|
||||
docker pull rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
|
||||
docker run -it --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128G --security-opt seccomp=unconfined --security-opt apparmor=unconfined --cap-add=SYS_PTRACE -v $(pwd):/workspace --env HUGGINGFACE_HUB_CACHE=/workspace --name vllm_v0.6.4 rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
|
||||
|
||||
In the Docker container, clone the ROCm MAD repository and navigate to the
|
||||
benchmark scripts directory at ``~/MAD/scripts/vllm``.
|
||||
@@ -241,8 +224,8 @@ See the :ref:`examples <vllm-benchmark-run-benchmark>` for more information.
|
||||
|
||||
.. _vllm-benchmark-standalone-options:
|
||||
|
||||
Options and available models
|
||||
----------------------------
|
||||
Options
|
||||
-------
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
@@ -265,100 +248,72 @@ Options and available models
|
||||
- Measure both throughput and latency
|
||||
|
||||
* - ``$model_repo``
|
||||
- ``meta-llama/Llama-3.1-8B-Instruct``
|
||||
- `Llama 3.1 8B <https://huggingface.co/meta-llama/Llama-3.1-8B>`_
|
||||
- ``meta-llama/Meta-Llama-3.1-8B-Instruct``
|
||||
- Llama 3.1 8B
|
||||
|
||||
* - (``float16``)
|
||||
- ``meta-llama/Llama-3.1-70B-Instruct``
|
||||
- `Llama 3.1 70B <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
|
||||
- ``meta-llama/Meta-Llama-3.1-70B-Instruct``
|
||||
- Llama 3.1 70B
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-3.1-405B-Instruct``
|
||||
- `Llama 3.1 405B <https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct>`_
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-3.2-11B-Vision-Instruct``
|
||||
- `Llama 3.2 11B Vision <https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct>`_
|
||||
- ``meta-llama/Meta-Llama-3.1-405B-Instruct``
|
||||
- Llama 3.1 405B
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-2-7b-chat-hf``
|
||||
- `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-7b-chat-hf>`_
|
||||
- Llama 2 7B
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-2-70b-chat-hf``
|
||||
- `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-70b-chat-hf>`_
|
||||
- Llama 2 70B
|
||||
|
||||
* -
|
||||
- ``mistralai/Mixtral-8x7B-Instruct-v0.1``
|
||||
- `Mixtral MoE 8x7B <https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1>`_
|
||||
- Mixtral 8x7B
|
||||
|
||||
* -
|
||||
- ``mistralai/Mixtral-8x22B-Instruct-v0.1``
|
||||
- `Mixtral MoE 8x22B <https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1>`_
|
||||
- Mixtral 8x22B
|
||||
|
||||
* -
|
||||
- ``mistralai/Mistral-7B-Instruct-v0.3``
|
||||
- `Mistral 7B <https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3>`_
|
||||
- Mixtral 7B
|
||||
|
||||
* -
|
||||
- ``Qwen/Qwen2-7B-Instruct``
|
||||
- `Qwen2 7B <https://huggingface.co/Qwen/Qwen2-7B-Instruct>`_
|
||||
- Qwen2 7B
|
||||
|
||||
* -
|
||||
- ``Qwen/Qwen2-72B-Instruct``
|
||||
- `Qwen2 72B <https://huggingface.co/Qwen/Qwen2-72B-Instruct>`_
|
||||
- Qwen2 72B
|
||||
|
||||
* -
|
||||
- ``core42/jais-13b-chat``
|
||||
- `JAIS 13B <https://huggingface.co/core42/jais-13b-chat>`_
|
||||
- JAIS 13B
|
||||
|
||||
* -
|
||||
- ``core42/jais-30b-chat-v3``
|
||||
- `JAIS 30B <https://huggingface.co/core42/jais-30b-chat-v3>`_
|
||||
|
||||
* -
|
||||
- ``databricks/dbrx-instruct``
|
||||
- `DBRX Instruct <https://huggingface.co/databricks/dbrx-instruct>`_
|
||||
|
||||
* -
|
||||
- ``google/gemma-2-27b``
|
||||
- `Gemma 2 27B <https://huggingface.co/google/gemma-2-27b>`_
|
||||
|
||||
* -
|
||||
- ``CohereForAI/c4ai-command-r-plus-08-2024``
|
||||
- `C4AI Command R+ 08-2024 <https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024>`_
|
||||
|
||||
* -
|
||||
- ``deepseek-ai/deepseek-moe-16b-chat``
|
||||
- `DeepSeek MoE 16B <https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat>`_
|
||||
- JAIS 30B
|
||||
|
||||
* - ``$model_repo``
|
||||
- ``amd/Llama-3.1-70B-Instruct-FP8-KV``
|
||||
- `Llama 3.1 70B FP8 <https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV>`_
|
||||
- ``amd/Meta-Llama-3.1-8B-Instruct-FP8-KV``
|
||||
- Llama 3.1 8B
|
||||
|
||||
* - (``float8``)
|
||||
- ``amd/Llama-3.1-405B-Instruct-FP8-KV``
|
||||
- `Llama 3.1 405B FP8 <https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV>`_
|
||||
- ``amd/Meta-Llama-3.1-70B-Instruct-FP8-KV``
|
||||
- Llama 3.1 70B
|
||||
|
||||
* -
|
||||
- ``amd/Meta-Llama-3.1-405B-Instruct-FP8-KV``
|
||||
- Llama 3.1 405B
|
||||
|
||||
* -
|
||||
- ``amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV``
|
||||
- `Mixtral MoE 8x7B FP8 <https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV>`_
|
||||
- Mixtral 8x7B
|
||||
|
||||
* -
|
||||
- ``amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV``
|
||||
- `Mixtral MoE 8x22B FP8 <https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/Mistral-7B-v0.1-FP8-KV``
|
||||
- `Mistral 7B FP8 <https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/dbrx-instruct-FP8-KV``
|
||||
- `DBRX Instruct FP8 <https://huggingface.co/amd/dbrx-instruct-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/c4ai-command-r-plus-FP8-KV``
|
||||
- `C4AI Command R+ 08-2024 FP8 <https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV>`_
|
||||
- Mixtral 8x22B
|
||||
|
||||
* - ``$num_gpu``
|
||||
- 1 or 8
|
||||
@@ -380,34 +335,34 @@ options and their descriptions.
|
||||
Example 1: latency benchmark
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to benchmark the latency of the Llama 3.1 70B model on eight GPUs with the ``float16`` and ``float8`` data types.
|
||||
Use this command to benchmark the latency of the Llama 3.1 8B model on one GPU with the ``float16`` and ``float8`` data types.
|
||||
|
||||
.. code-block::
|
||||
|
||||
./vllm_benchmark_report.sh -s latency -m meta-llama/Llama-3.1-70B-Instruct -g 8 -d float16
|
||||
./vllm_benchmark_report.sh -s latency -m amd/Llama-3.1-70B-Instruct-FP8-KV -g 8 -d float8
|
||||
./vllm_benchmark_report.sh -s latency -m meta-llama/Meta-Llama-3.1-8B-Instruct -g 1 -d float16
|
||||
./vllm_benchmark_report.sh -s latency -m amd/Meta-Llama-3.1-8B-Instruct-FP8-KV -g 1 -d float8
|
||||
|
||||
Find the latency reports at:
|
||||
|
||||
- ``./reports_float16/summary/Llama-3.1-70B-Instruct_latency_report.csv``
|
||||
- ``./reports_float16/summary/Meta-Llama-3.1-8B-Instruct_latency_report.csv``
|
||||
|
||||
- ``./reports_float8/summary/Llama-3.1-70B-Instruct-FP8-KV_latency_report.csv``
|
||||
- ``./reports_float8/summary/Meta-Llama-3.1-8B-Instruct-FP8-KV_latency_report.csv``
|
||||
|
||||
Example 2: throughput benchmark
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to benchmark the throughput of the Llama 3.1 70B model on eight GPUs with the ``float16`` and ``float8`` data types.
|
||||
Use this command to benchmark the throughput of the Llama 3.1 8B model on one GPU with the ``float16`` and ``float8`` data types.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
./vllm_benchmark_report.sh -s throughput -m meta-llama/Llama-3.1-70B-Instruct -g 8 -d float16
|
||||
./vllm_benchmark_report.sh -s throughput -m amd/Llama-3.1-70B-Instruct-FP8-KV -g 8 -d float8
|
||||
./vllm_benchmark_report.sh -s throughput -m meta-llama/Meta-Llama-3.1-8B-Instruct -g 1 -d float16
|
||||
./vllm_benchmark_report.sh -s throughput -m amd/Meta-Llama-3.1-8B-Instruct-FP8-KV -g 1 -d float8
|
||||
|
||||
Find the throughput reports at:
|
||||
|
||||
- ``./reports_float16/summary/Llama-3.1-70B-Instruct_throughput_report.csv``
|
||||
- ``./reports_float16/summary/Meta-Llama-3.1-8B-Instruct_throughput_report.csv``
|
||||
|
||||
- ``./reports_float8/summary/Llama-3.1-70B-Instruct-FP8-KV_throughput_report.csv``
|
||||
- ``./reports_float8/summary/Meta-Llama-3.1-8B-Instruct-FP8-KV_throughput_report.csv``
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -439,40 +394,17 @@ Further reading
|
||||
MI300X accelerators, see :doc:`../../system-optimization/mi300x`.
|
||||
|
||||
- To learn how to run LLM models from Hugging Face or your own model, see
|
||||
:doc:`Running models from Hugging Face <hugging-face-models>`.
|
||||
:doc:`Using ROCm for AI <../index>`.
|
||||
|
||||
- To learn how to optimize inference on LLMs, see
|
||||
:doc:`Inference optimization <../inference-optimization/index>`.
|
||||
|
||||
<<<<<<< HEAD:docs/how-to/performance-validation/mi300x/vllm-benchmark.rst
|
||||
=======
|
||||
- To learn how to fine-tune LLMs, see
|
||||
:doc:`Fine-tuning LLMs <../fine-tuning/index>`.
|
||||
|
||||
Previous versions
|
||||
=================
|
||||
>>>>>>> develop:docs/how-to/rocm-for-ai/inference/vllm-benchmark.rst
|
||||
- To compare with the previous version of the ROCm vLLM Docker image for performance validation, refer to
|
||||
`LLM inference performance validation on AMD Instinct MI300X (ROCm 6.2.0) <https://rocm.docs.amd.com/en/docs-6.2.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_.
|
||||
|
||||
This table lists previous versions of the ROCm vLLM Docker image for inference
|
||||
performance validation. For detailed information about available models for
|
||||
benchmarking, see the version-specific documentation.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:stub-columns: 1
|
||||
|
||||
* - ROCm version
|
||||
- vLLM version
|
||||
- PyTorch version
|
||||
- Resources
|
||||
|
||||
* - 6.2.1
|
||||
- 0.6.4
|
||||
- 2.5.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4/images/sha256-ccbb74cc9e7adecb8f7bdab9555f7ac6fc73adb580836c2a35ca96ff471890d8>`_
|
||||
|
||||
* - 6.2.0
|
||||
- 0.4.3
|
||||
- 2.4.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.2.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.2_mi300_ubuntu22.04_py3.9_vllm_7c5fd50/images/sha256-9e4dd4788a794c3d346d7d0ba452ae5e92d39b8dfac438b2af8efdc7f15d22c0>`_
|
||||
|
||||
@@ -14,9 +14,6 @@ Training models on AMD GPUs with the ROCm™ software platform allows you to use
|
||||
|
||||
The ROCm software platform makes it easier to train models on AMD GPUs while maintaining compatibility with existing code and tools. The platform also provides features like multi-GPU support, allowing for scaling and parallelization of model training across multiple GPUs to enhance performance.
|
||||
|
||||
The AI Developer Hub contains `AMD ROCm tutorials <https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/>`_ for
|
||||
training, fine-tuning, and inference. It leverages popular machine learning frameworks on AMD GPUs.
|
||||
|
||||
In this guide, you'll learn about:
|
||||
|
||||
- :doc:`Training a model <train-a-model>`
|
||||
|
||||
@@ -12,7 +12,7 @@ myst:
|
||||
This chapter reviews system settings that are required to configure the system
|
||||
for ROCm virtualization on RDNA2-based AMD Radeon™ PRO GPUs. Installing ROCm on
|
||||
Bare Metal follows the routine ROCm
|
||||
{doc}`installation procedure<rocm-install-on-linux:install/install-methods/package-manager-index>`.
|
||||
{doc}`installation procedure<rocm-install-on-linux:install/native-install/index>`.
|
||||
|
||||
To enable ROCm virtualization on V620, one has to setup Single Root I/O
|
||||
Virtualization (SR-IOV) in the BIOS via setting found in the following
|
||||
@@ -166,4 +166,4 @@ First, assign GPU virtual function (VF) to VM using the following steps.
|
||||
Then start the VM.
|
||||
|
||||
Finally install ROCm on the virtual machine (VM). For detailed instructions,
|
||||
refer to the {doc}`Linux install guide<rocm-install-on-linux:install/install-methods/package-manager-index>`.
|
||||
refer to the {doc}`Linux install guide<rocm-install-on-linux:install/native-install/index>`.
|
||||
|
||||
@@ -38,7 +38,6 @@ ROCm documentation is organized into the following categories:
|
||||
:class-body: rocm-card-banner rocm-hue-12
|
||||
|
||||
* [Use ROCm for AI](./how-to/rocm-for-ai/index.rst)
|
||||
* [AI tutorials](https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/)
|
||||
* [Use ROCm for HPC](./how-to/rocm-for-hpc/index.rst)
|
||||
* [System optimization](./how-to/system-optimization/index.rst)
|
||||
* [AMD Instinct MI300X performance validation and tuning](./how-to/tuning-guides/mi300x/index.rst)
|
||||
|
||||
@@ -21,8 +21,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Model
|
||||
- Architecture
|
||||
- LLVM target name
|
||||
- Device Major version
|
||||
- Device Minor version
|
||||
- VRAM (GiB)
|
||||
- Compute Units
|
||||
- Wavefront Size
|
||||
@@ -38,8 +36,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI325X
|
||||
- CDNA3
|
||||
- gfx942
|
||||
- 9
|
||||
- 4
|
||||
- 256
|
||||
- 304 (38 per XCD)
|
||||
- 64
|
||||
@@ -55,8 +51,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI300X
|
||||
- CDNA3
|
||||
- gfx942
|
||||
- 9
|
||||
- 4
|
||||
- 192
|
||||
- 304 (38 per XCD)
|
||||
- 64
|
||||
@@ -72,8 +66,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI300A
|
||||
- CDNA3
|
||||
- gfx942
|
||||
- 9
|
||||
- 4
|
||||
- 128
|
||||
- 228 (38 per XCD)
|
||||
- 64
|
||||
@@ -89,8 +81,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI250X
|
||||
- CDNA2
|
||||
- gfx90a
|
||||
- 9
|
||||
- 0
|
||||
- 128
|
||||
- 220 (110 per GCD)
|
||||
- 64
|
||||
@@ -106,8 +96,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI250
|
||||
- CDNA2
|
||||
- gfx90a
|
||||
- 9
|
||||
- 0
|
||||
- 128
|
||||
- 208 (104 per GCD)
|
||||
- 64
|
||||
@@ -123,8 +111,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI210
|
||||
- CDNA2
|
||||
- gfx90a
|
||||
- 9
|
||||
- 0
|
||||
- 64
|
||||
- 104
|
||||
- 64
|
||||
@@ -140,8 +126,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI100
|
||||
- CDNA
|
||||
- gfx908
|
||||
- 9
|
||||
- 0
|
||||
- 32
|
||||
- 120
|
||||
- 64
|
||||
@@ -157,8 +141,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI60
|
||||
- GCN5.1
|
||||
- gfx906
|
||||
- 9
|
||||
- 0
|
||||
- 32
|
||||
- 64
|
||||
- 64
|
||||
@@ -174,8 +156,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI50 (32GB)
|
||||
- GCN5.1
|
||||
- gfx906
|
||||
- 9
|
||||
- 0
|
||||
- 32
|
||||
- 60
|
||||
- 64
|
||||
@@ -191,8 +171,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI50 (16GB)
|
||||
- GCN5.1
|
||||
- gfx906
|
||||
- 9
|
||||
- 0
|
||||
- 16
|
||||
- 60
|
||||
- 64
|
||||
@@ -208,8 +186,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI25
|
||||
- GCN5.0
|
||||
- gfx900
|
||||
- 9
|
||||
- 0
|
||||
- 16
|
||||
- 64
|
||||
- 64
|
||||
@@ -225,8 +201,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI8
|
||||
- GCN3.0
|
||||
- gfx803
|
||||
- 8
|
||||
- 0
|
||||
- 4
|
||||
- 64
|
||||
- 64
|
||||
@@ -242,8 +216,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- MI6
|
||||
- GCN4.0
|
||||
- gfx803
|
||||
- 8
|
||||
- 0
|
||||
- 16
|
||||
- 36
|
||||
- 64
|
||||
@@ -266,8 +238,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Model
|
||||
- Architecture
|
||||
- LLVM target name
|
||||
- Device Major version
|
||||
- Device Minor version
|
||||
- VRAM (GiB)
|
||||
- Compute Units
|
||||
- Wavefront Size
|
||||
@@ -284,8 +254,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO V710
|
||||
- RDNA3
|
||||
- gfx1101
|
||||
- 11
|
||||
- 0
|
||||
- 28
|
||||
- 54
|
||||
- 32
|
||||
@@ -302,8 +270,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO W7900 Dual Slot
|
||||
- RDNA3
|
||||
- gfx1100
|
||||
- 11
|
||||
- 0
|
||||
- 48
|
||||
- 96
|
||||
- 32
|
||||
@@ -320,8 +286,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO W7900
|
||||
- RDNA3
|
||||
- gfx1100
|
||||
- 11
|
||||
- 0
|
||||
- 48
|
||||
- 96
|
||||
- 32
|
||||
@@ -338,8 +302,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO W7800
|
||||
- RDNA3
|
||||
- gfx1100
|
||||
- 11
|
||||
- 0
|
||||
- 32
|
||||
- 70
|
||||
- 32
|
||||
@@ -356,8 +318,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO W7700
|
||||
- RDNA3
|
||||
- gfx1101
|
||||
- 11
|
||||
- 0
|
||||
- 16
|
||||
- 48
|
||||
- 32
|
||||
@@ -374,8 +334,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO W6800
|
||||
- RDNA2
|
||||
- gfx1030
|
||||
- 10
|
||||
- 3
|
||||
- 32
|
||||
- 60
|
||||
- 32
|
||||
@@ -392,8 +350,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO W6600
|
||||
- RDNA2
|
||||
- gfx1032
|
||||
- 10
|
||||
- 3
|
||||
- 8
|
||||
- 28
|
||||
- 32
|
||||
@@ -410,8 +366,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon PRO V620
|
||||
- RDNA2
|
||||
- gfx1030
|
||||
- 10
|
||||
- 3
|
||||
- 32
|
||||
- 72
|
||||
- 32
|
||||
@@ -428,8 +382,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon Pro W5500
|
||||
- RDNA
|
||||
- gfx1012
|
||||
- 10
|
||||
- 1
|
||||
- 8
|
||||
- 22
|
||||
- 32
|
||||
@@ -446,8 +398,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon Pro VII
|
||||
- GCN5.1
|
||||
- gfx906
|
||||
- 9
|
||||
- 0
|
||||
- 16
|
||||
- 60
|
||||
- 64
|
||||
@@ -471,8 +421,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Model
|
||||
- Architecture
|
||||
- LLVM target name
|
||||
- Device Major version
|
||||
- Device Minor version
|
||||
- VRAM (GiB)
|
||||
- Compute Units
|
||||
- Wavefront Size
|
||||
@@ -489,8 +437,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 7900 XTX
|
||||
- RDNA3
|
||||
- gfx1100
|
||||
- 11
|
||||
- 0
|
||||
- 24
|
||||
- 96
|
||||
- 32
|
||||
@@ -507,8 +453,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 7900 XT
|
||||
- RDNA3
|
||||
- gfx1100
|
||||
- 11
|
||||
- 0
|
||||
- 20
|
||||
- 84
|
||||
- 32
|
||||
@@ -525,8 +469,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 7900 GRE
|
||||
- RDNA3
|
||||
- gfx1100
|
||||
- 11
|
||||
- 0
|
||||
- 16
|
||||
- 80
|
||||
- 32
|
||||
@@ -543,8 +485,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 7800 XT
|
||||
- RDNA3
|
||||
- gfx1101
|
||||
- 11
|
||||
- 0
|
||||
- 16
|
||||
- 60
|
||||
- 32
|
||||
@@ -561,8 +501,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 7700 XT
|
||||
- RDNA3
|
||||
- gfx1101
|
||||
- 11
|
||||
- 0
|
||||
- 12
|
||||
- 54
|
||||
- 32
|
||||
@@ -579,8 +517,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 7600
|
||||
- RDNA3
|
||||
- gfx1102
|
||||
- 11
|
||||
- 0
|
||||
- 8
|
||||
- 32
|
||||
- 32
|
||||
@@ -597,8 +533,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6950 XT
|
||||
- RDNA2
|
||||
- gfx1030
|
||||
- 10
|
||||
- 3
|
||||
- 16
|
||||
- 80
|
||||
- 32
|
||||
@@ -615,8 +549,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6900 XT
|
||||
- RDNA2
|
||||
- gfx1030
|
||||
- 10
|
||||
- 3
|
||||
- 16
|
||||
- 80
|
||||
- 32
|
||||
@@ -633,8 +565,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6800 XT
|
||||
- RDNA2
|
||||
- gfx1030
|
||||
- 10
|
||||
- 3
|
||||
- 16
|
||||
- 72
|
||||
- 32
|
||||
@@ -651,8 +581,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6800
|
||||
- RDNA2
|
||||
- gfx1030
|
||||
- 10
|
||||
- 3
|
||||
- 16
|
||||
- 60
|
||||
- 32
|
||||
@@ -669,8 +597,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6750 XT
|
||||
- RDNA2
|
||||
- gfx1031
|
||||
- 10
|
||||
- 3
|
||||
- 12
|
||||
- 40
|
||||
- 32
|
||||
@@ -687,8 +613,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6700 XT
|
||||
- RDNA2
|
||||
- gfx1031
|
||||
- 10
|
||||
- 3
|
||||
- 12
|
||||
- 40
|
||||
- 32
|
||||
@@ -706,8 +630,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- RDNA2
|
||||
- gfx1031
|
||||
- 10
|
||||
- 3
|
||||
- 10
|
||||
- 36
|
||||
- 32
|
||||
- 128
|
||||
@@ -723,8 +645,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6650 XT
|
||||
- RDNA2
|
||||
- gfx1032
|
||||
- 10
|
||||
- 3
|
||||
- 8
|
||||
- 32
|
||||
- 32
|
||||
@@ -741,8 +661,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6600 XT
|
||||
- RDNA2
|
||||
- gfx1032
|
||||
- 10
|
||||
- 3
|
||||
- 8
|
||||
- 32
|
||||
- 32
|
||||
@@ -759,8 +677,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon RX 6600
|
||||
- RDNA2
|
||||
- gfx1032
|
||||
- 10
|
||||
- 3
|
||||
- 8
|
||||
- 28
|
||||
- 32
|
||||
@@ -777,8 +693,6 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil
|
||||
- Radeon VII
|
||||
- GCN5.1
|
||||
- gfx906
|
||||
- 9
|
||||
- 0
|
||||
- 16
|
||||
- 60
|
||||
- 64
|
||||
@@ -796,7 +710,7 @@ Glossary
|
||||
========
|
||||
|
||||
For more information about the terms used, see the
|
||||
:ref:`specific documents and guides <gpu-arch-documentation>`, or
|
||||
:ref:`specific documents and guides <gpu-arch-documentation>`, or
|
||||
:doc:`Understanding the HIP programming model<hip:understand/programming_model>`.
|
||||
|
||||
**LLVM target name**
|
||||
@@ -804,18 +718,6 @@ For more information about the terms used, see the
|
||||
Argument to pass to clang in ``--offload-arch`` to compile code for the given
|
||||
architecture.
|
||||
|
||||
**Device major version**
|
||||
|
||||
Indicates the core instruction set of the GPU architecture. For example, a value
|
||||
of 11 would correspond to Navi III (RDNA3).
|
||||
|
||||
**Device minor version**
|
||||
|
||||
Indicates a particular configuration, feature set, or variation within the group
|
||||
represented by the device compute version. For example, different models within
|
||||
the same major version might have varying levels of support for certain features
|
||||
or optimizations.
|
||||
|
||||
**VRAM**
|
||||
|
||||
Amount of memory available on the GPU.
|
||||
|
||||
@@ -1,23 +1,19 @@
|
||||
.. meta::
|
||||
:description: Supported data types of AMD GPUs and libraries in ROCm.
|
||||
:keywords: precision, data types, HIP types, int8, float8, float8 (E4M3),
|
||||
float8 (E5M2), bfloat8, float16, half, bfloat16, tensorfloat32,
|
||||
float, float32, float64, double, AMD data types, HIP data types,
|
||||
ROCm precision, ROCm data types
|
||||
:description: Supported data types in ROCm
|
||||
:keywords: int8, float8, float8 (E4M3), float8 (E5M2), bfloat8, float16, half, bfloat16, tensorfloat32, float,
|
||||
float32, float64, double, AMD, ROCm, AMDGPU
|
||||
|
||||
*************************************************************
|
||||
Data types and precision support
|
||||
Precision support
|
||||
*************************************************************
|
||||
|
||||
This topic lists the supported data types of AMD GPUs and ROCm libraries.
|
||||
Corresponding :doc:`HIP <hip:index>` data types are also noted.
|
||||
Use the following sections to identify data types and HIP types ROCm™ supports.
|
||||
|
||||
Integral types
|
||||
==========================================
|
||||
|
||||
The signed and unsigned integral types supported by ROCm are listed in
|
||||
the following table, along with their corresponding HIP type and a short
|
||||
description.
|
||||
The signed and unsigned integral types that are supported by ROCm are listed in the following table,
|
||||
together with their corresponding HIP type and a short description.
|
||||
|
||||
|
||||
.. list-table::
|
||||
@@ -50,8 +46,8 @@ description.
|
||||
Floating-point types
|
||||
==========================================
|
||||
|
||||
The floating-point types supported by ROCm are listed in the following
|
||||
table, along with their corresponding HIP type and a short description.
|
||||
The floating-point types that are supported by ROCm are listed in the following table, together with
|
||||
their corresponding HIP type and a short description.
|
||||
|
||||
.. image:: ../data/about/compatibility/floating-point-data-types.png
|
||||
:alt: Supported floating-point types
|
||||
@@ -67,62 +63,43 @@ table, along with their corresponding HIP type and a short description.
|
||||
*
|
||||
- float8 (E4M3)
|
||||
- ``-``
|
||||
- An 8-bit floating-point number that mostly follows IEEE-754 conventions
|
||||
and **S1E4M3** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ ,
|
||||
with expanded range and no infinity or signed zero. NaN is
|
||||
represented as negative zero.
|
||||
- An 8-bit floating-point number that mostly follows IEEE-754 conventions and **S1E4M3** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ , with expanded range and with no infinity or signed zero. NaN is represented as negative zero.
|
||||
*
|
||||
- float8 (E5M2)
|
||||
- ``-``
|
||||
- An 8-bit floating-point number mostly following IEEE-754 conventions and
|
||||
**S1E5M2** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ ,
|
||||
with expanded range and no infinity or signed zero. NaN is
|
||||
represented as negative zero.
|
||||
- An 8-bit floating-point number mostly following IEEE-754 conventions and **S1E5M2** bit layout, as described in `8-bit Numerical Formats for Deep Neural Networks <https://arxiv.org/abs/2206.02915>`_ , with expanded range and with no infinity or signed zero. NaN is represented as negative zero.
|
||||
*
|
||||
- float16
|
||||
- ``half``
|
||||
- A 16-bit floating-point number that conforms to the IEEE 754-2008
|
||||
half-precision storage format.
|
||||
- A 16-bit floating-point number that conforms to the IEEE 754-2008 half-precision storage format.
|
||||
*
|
||||
- bfloat16
|
||||
- ``bfloat16``
|
||||
- A shortened 16-bit version of the IEEE 754 single-precision storage
|
||||
format.
|
||||
- A shortened 16-bit version of the IEEE 754 single-precision storage format.
|
||||
*
|
||||
- tensorfloat32
|
||||
- ``-``
|
||||
- A floating-point number that occupies 32 bits or less of storage,
|
||||
providing improved range compared to half (16-bit) format, at
|
||||
(potentially) greater throughput than single-precision (32-bit) formats.
|
||||
- A floating-point number that occupies 32 bits or less of storage, providing improved range compared to half (16-bit) format, at (potentially) greater throughput than single-precision (32-bit) formats.
|
||||
*
|
||||
- float32
|
||||
- ``float``
|
||||
- A 32-bit floating-point number that conforms to the IEEE 754
|
||||
single-precision storage format.
|
||||
- A 32-bit floating-point number that conforms to the IEEE 754 single-precision storage format.
|
||||
*
|
||||
- float64
|
||||
- ``double``
|
||||
- A 64-bit floating-point number that conforms to the IEEE 754
|
||||
double-precision storage format.
|
||||
- A 64-bit floating-point number that conforms to the IEEE 754 double-precision storage format.
|
||||
|
||||
.. note::
|
||||
|
||||
* The float8 and tensorfloat32 types are internal types used in calculations
|
||||
in Matrix Cores and can be stored in any type of the same size.
|
||||
|
||||
* The encodings for FP8 (E5M2) and FP8 (E4M3) that the
|
||||
MI300 series natively supports differ from the FP8 (E5M2) and FP8 (E4M3)
|
||||
encodings used in NVIDIA H100
|
||||
(`FP8 Formats for Deep Learning <https://arxiv.org/abs/2209.05433>`_).
|
||||
|
||||
* The float8 and tensorfloat32 types are internal types used in calculations in Matrix Cores and can be stored in any type of the same size.
|
||||
* The encodings for FP8 (E5M2) and FP8 (E4M3) that are natively supported by MI300 differ from the FP8 (E5M2) and FP8 (E4M3) encodings used in H100 (`FP8 Formats for Deep Learning <https://arxiv.org/abs/2209.05433>`_).
|
||||
* In some AMD documents and articles, float8 (E5M2) is referred to as bfloat8.
|
||||
|
||||
ROCm support icons
|
||||
==========================================
|
||||
|
||||
In the following sections, icons represent the level of support. These
|
||||
icons, described in the following table, are also used in the library data type
|
||||
support pages.
|
||||
In the following sections, we use icons to represent the level of support. These icons, described in the
|
||||
following table, are also used on the library data type support pages.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
@@ -144,27 +121,14 @@ support pages.
|
||||
|
||||
.. note::
|
||||
|
||||
* Full support means that the type is supported natively or with hardware
|
||||
emulation.
|
||||
* Full support means that the type is supported natively or with hardware emulation.
|
||||
* Native support means that the operations for that type are implemented in hardware. Types that are not natively supported are emulated with the available hardware. The performance of non-natively supported types can differ from the full instruction throughput rate. For example, 16-bit integer operations can be performed on the 32-bit integer ALUs at full rate; however, 64-bit integer operations might need several instructions on the 32-bit integer ALUs.
|
||||
* Any type can be emulated by software, but this page does not cover such cases.
|
||||
|
||||
* Native support means that the operations for that type are implemented in
|
||||
hardware. Types that are not natively supported are emulated with the
|
||||
available hardware. The performance of non-natively supported types can
|
||||
differ from the full instruction throughput rate. For example, 16-bit
|
||||
integer operations can be performed on the 32-bit integer ALUs at full rate;
|
||||
however, 64-bit integer operations might need several instructions on the
|
||||
32-bit integer ALUs.
|
||||
|
||||
* Any type can be emulated by software, but this page does not cover such
|
||||
cases.
|
||||
|
||||
Hardware data type support
|
||||
Hardware type support
|
||||
==========================================
|
||||
|
||||
The following tables provide information about AMD Instinct accelerators support
|
||||
for various data types. The MI200 series GPUs, which include MI210, MI250, and
|
||||
MI250X, are based on the CDNA2 architecture. The MI300 series GPUs, consisting
|
||||
of MI300A, MI300X, and MI325X, are built on the CDNA3 architecture.
|
||||
AMD GPU hardware support for data types is listed in the following tables.
|
||||
|
||||
Compute units support
|
||||
-------------------------------------------------------------------------------
|
||||
@@ -411,23 +375,21 @@ The following table lists data type support for atomic operations.
|
||||
|
||||
.. note::
|
||||
|
||||
You can emulate atomic operations using software for cases that are not
|
||||
natively supported. Software-emulated atomic operations have a high negative
|
||||
performance impact when they frequently access the same memory address.
|
||||
For cases that are not natively supported, you can emulate atomic operations using software.
|
||||
Software-emulated atomic operations have high negative performance impact when they frequently
|
||||
access the same memory address.
|
||||
|
||||
Data type support in ROCm libraries
|
||||
Data Type support in ROCm Libraries
|
||||
==========================================
|
||||
|
||||
ROCm library support for int8, float8 (E4M3), float8 (E5M2), int16, float16,
|
||||
bfloat16, int32, tensorfloat32, float32, int64, and float64 is listed in the
|
||||
following tables.
|
||||
ROCm library support for int8, float8 (E4M3), float8 (E5M2), int16, float16, bfloat16, int32,
|
||||
tensorfloat32, float32, int64, and float64 is listed in the following tables.
|
||||
|
||||
Libraries input/output type support
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following tables list ROCm library support for specific input and output
|
||||
data types. Refer to the corresponding library data type support page for a
|
||||
detailed description.
|
||||
The following tables list ROCm library support for specific input and output data types. For a detailed
|
||||
description, refer to the corresponding library data type support page.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
@@ -554,9 +516,8 @@ detailed description.
|
||||
Libraries internal calculations type support
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The following tables list ROCm library support for specific internal data types.
|
||||
Refer to the corresponding library data type support page for a detailed
|
||||
description.
|
||||
The following tables list ROCm library support for specific internal data types. For a detailed
|
||||
description, refer to the corresponding library data type support page.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
|
||||
@@ -89,10 +89,7 @@ subtrees:
|
||||
title: Profile and debug
|
||||
- file: how-to/rocm-for-ai/inference-optimization/workload.rst
|
||||
title: Workload tuning
|
||||
|
||||
- url: https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/
|
||||
title: AI tutorials
|
||||
|
||||
|
||||
- file: how-to/rocm-for-hpc/index.rst
|
||||
title: Use ROCm for HPC
|
||||
- file: how-to/system-optimization/index.rst
|
||||
@@ -129,7 +126,6 @@ subtrees:
|
||||
- url: https://github.com/amd/rocm-examples
|
||||
title: ROCm examples
|
||||
|
||||
|
||||
- caption: Conceptual
|
||||
entries:
|
||||
- file: conceptual/gpu-arch.md
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
rocm-docs-core==1.15.0
|
||||
rocm-docs-core==1.13.0
|
||||
sphinx-reredirects
|
||||
sphinx-sitemap
|
||||
|
||||
@@ -8,13 +8,6 @@ accessible-pygments==0.0.5
|
||||
# via pydata-sphinx-theme
|
||||
alabaster==1.0.0
|
||||
# via sphinx
|
||||
asttokens==3.0.0
|
||||
# via stack-data
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# jsonschema
|
||||
# jupyter-cache
|
||||
# referencing
|
||||
babel==2.16.0
|
||||
# via
|
||||
# pydata-sphinx-theme
|
||||
@@ -32,17 +25,9 @@ cffi==1.17.1
|
||||
charset-normalizer==3.4.0
|
||||
# via requests
|
||||
click==8.1.7
|
||||
# via
|
||||
# jupyter-cache
|
||||
# sphinx-external-toc
|
||||
comm==0.2.2
|
||||
# via ipykernel
|
||||
# via sphinx-external-toc
|
||||
cryptography==43.0.3
|
||||
# via pyjwt
|
||||
debugpy==1.8.12
|
||||
# via ipykernel
|
||||
decorator==5.1.1
|
||||
# via ipython
|
||||
deprecated==1.2.15
|
||||
# via pygithub
|
||||
docutils==0.21.2
|
||||
@@ -51,103 +36,34 @@ docutils==0.21.2
|
||||
# myst-parser
|
||||
# pydata-sphinx-theme
|
||||
# sphinx
|
||||
exceptiongroup==1.2.2
|
||||
# via ipython
|
||||
executing==2.2.0
|
||||
# via stack-data
|
||||
fastjsonschema==2.20.0
|
||||
# via
|
||||
# nbformat
|
||||
# rocm-docs-core
|
||||
# via rocm-docs-core
|
||||
gitdb==4.0.11
|
||||
# via gitpython
|
||||
gitpython==3.1.43
|
||||
# via rocm-docs-core
|
||||
greenlet==3.1.1
|
||||
# via sqlalchemy
|
||||
idna==3.10
|
||||
# via requests
|
||||
imagesize==1.4.1
|
||||
# via sphinx
|
||||
importlib-metadata==8.6.1
|
||||
# via
|
||||
# jupyter-cache
|
||||
# myst-nb
|
||||
ipykernel==6.29.5
|
||||
# via myst-nb
|
||||
ipython==8.31.0
|
||||
# via
|
||||
# ipykernel
|
||||
# myst-nb
|
||||
jedi==0.19.2
|
||||
# via ipython
|
||||
jinja2==3.1.5
|
||||
# via
|
||||
# myst-parser
|
||||
# sphinx
|
||||
jsonschema==4.23.0
|
||||
# via nbformat
|
||||
jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
jupyter-cache==1.0.1
|
||||
# via myst-nb
|
||||
jupyter-client==8.6.3
|
||||
# via
|
||||
# ipykernel
|
||||
# nbclient
|
||||
jupyter-core==5.7.2
|
||||
# via
|
||||
# ipykernel
|
||||
# jupyter-client
|
||||
# nbclient
|
||||
# nbformat
|
||||
markdown-it-py==3.0.0
|
||||
# via
|
||||
# mdit-py-plugins
|
||||
# myst-parser
|
||||
markupsafe==3.0.2
|
||||
# via jinja2
|
||||
matplotlib-inline==0.1.7
|
||||
# via
|
||||
# ipykernel
|
||||
# ipython
|
||||
mdit-py-plugins==0.4.2
|
||||
# via myst-parser
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
myst-nb==1.1.2
|
||||
# via rocm-docs-core
|
||||
myst-parser==4.0.0
|
||||
# via myst-nb
|
||||
nbclient==0.10.2
|
||||
# via
|
||||
# jupyter-cache
|
||||
# myst-nb
|
||||
nbformat==5.10.4
|
||||
# via
|
||||
# jupyter-cache
|
||||
# myst-nb
|
||||
# nbclient
|
||||
nest-asyncio==1.6.0
|
||||
# via ipykernel
|
||||
# via rocm-docs-core
|
||||
packaging==24.2
|
||||
# via
|
||||
# ipykernel
|
||||
# sphinx
|
||||
parso==0.8.4
|
||||
# via jedi
|
||||
pexpect==4.9.0
|
||||
# via ipython
|
||||
platformdirs==4.3.6
|
||||
# via jupyter-core
|
||||
prompt-toolkit==3.0.50
|
||||
# via ipython
|
||||
psutil==6.1.1
|
||||
# via ipykernel
|
||||
ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
pure-eval==0.2.3
|
||||
# via stack-data
|
||||
# via sphinx
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pydata-sphinx-theme==0.16.0
|
||||
@@ -159,42 +75,23 @@ pygithub==2.5.0
|
||||
pygments==2.18.0
|
||||
# via
|
||||
# accessible-pygments
|
||||
# ipython
|
||||
# pydata-sphinx-theme
|
||||
# sphinx
|
||||
pyjwt[crypto]==2.10.0
|
||||
# via pygithub
|
||||
pynacl==1.5.0
|
||||
# via pygithub
|
||||
python-dateutil==2.9.0.post0
|
||||
# via jupyter-client
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# jupyter-cache
|
||||
# myst-nb
|
||||
# myst-parser
|
||||
# rocm-docs-core
|
||||
# sphinx-external-toc
|
||||
pyzmq==26.2.0
|
||||
# via
|
||||
# ipykernel
|
||||
# jupyter-client
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
requests==2.32.3
|
||||
# via
|
||||
# pygithub
|
||||
# sphinx
|
||||
rocm-docs-core==1.15.0
|
||||
rocm-docs-core==1.13.0
|
||||
# via -r requirements.in
|
||||
rpds-py==0.22.3
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
six==1.17.0
|
||||
# via python-dateutil
|
||||
smmap==5.0.1
|
||||
# via gitdb
|
||||
snowballstemmer==2.2.0
|
||||
@@ -204,7 +101,6 @@ soupsieve==2.6
|
||||
sphinx==8.1.3
|
||||
# via
|
||||
# breathe
|
||||
# myst-nb
|
||||
# myst-parser
|
||||
# pydata-sphinx-theme
|
||||
# rocm-docs-core
|
||||
@@ -241,43 +137,15 @@ sphinxcontrib-qthelp==2.0.0
|
||||
# via sphinx
|
||||
sphinxcontrib-serializinghtml==2.0.0
|
||||
# via sphinx
|
||||
sqlalchemy==2.0.37
|
||||
# via jupyter-cache
|
||||
stack-data==0.6.3
|
||||
# via ipython
|
||||
tabulate==0.9.0
|
||||
# via jupyter-cache
|
||||
tomli==2.1.0
|
||||
# via sphinx
|
||||
tornado==6.4.2
|
||||
# via
|
||||
# ipykernel
|
||||
# jupyter-client
|
||||
traitlets==5.14.3
|
||||
# via
|
||||
# comm
|
||||
# ipykernel
|
||||
# ipython
|
||||
# jupyter-client
|
||||
# jupyter-core
|
||||
# matplotlib-inline
|
||||
# nbclient
|
||||
# nbformat
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# ipython
|
||||
# myst-nb
|
||||
# pydata-sphinx-theme
|
||||
# pygithub
|
||||
# referencing
|
||||
# sqlalchemy
|
||||
urllib3==2.2.3
|
||||
# via
|
||||
# pygithub
|
||||
# requests
|
||||
wcwidth==0.2.13
|
||||
# via prompt-toolkit
|
||||
wrapt==1.17.0
|
||||
# via deprecated
|
||||
zipp==3.21.0
|
||||
# via importlib-metadata
|
||||
|
||||
@@ -68,85 +68,6 @@ set_address_sanitizer_off() {
|
||||
export LDFLAGS=""
|
||||
}
|
||||
|
||||
build_miopen_ckProf() {
|
||||
ENABLE_ADDRESS_SANITIZER=false
|
||||
echo "Start Building Composable Kernel Profiler"
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
else
|
||||
unset_asan_env_vars
|
||||
set_address_sanitizer_off
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
cd "$BUILD_DIR"
|
||||
rm -rf *
|
||||
|
||||
architectures='gfx10 gfx11 gfx90 gfx94'
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
architectures=$(echo ${GPU_ARCHS} | awk -F';' '{for(i=1;i<=NF;i++) a[substr($i,1,5)]} END{for(i in a) printf i" "}')
|
||||
fi
|
||||
|
||||
for arch in ${architectures}
|
||||
do
|
||||
if [ "${ASAN_CMAKE_PARAMS}" == "true" ] ; then
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH%-*}/lib/cmake;${ROCM_PATH%-*}/$ASAN_LIBDIR;${ROCM_PATH%-*}/llvm;${ROCM_PATH%-*}" \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE:-'RelWithDebInfo'} \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_LIB_RPATH" \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_EXE_RPATH" \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH="${ROCM_PATH}" \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DPROFILER_ONLY=ON \
|
||||
-DENABLE_ASAN_PACKAGING=true \
|
||||
-DGPU_ARCH="${arch}" \
|
||||
"$COMPONENT_SRC"
|
||||
else
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH%-*}" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN' \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN/../lib' \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH="${ROCM_PATH}" \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DPROFILER_ONLY=ON \
|
||||
-DGPU_ARCH="${arch}" \
|
||||
"$COMPONENT_SRC"
|
||||
fi
|
||||
|
||||
cmake --build . -- -j${PROC} package
|
||||
cp ./*ckprofiler*.${PKGTYPE} $PACKAGE_DIR
|
||||
rm -rf *
|
||||
done
|
||||
rm -rf _CPack_Packages/ && find -name '*.o' -delete
|
||||
|
||||
echo "Finished building Composable Kernel"
|
||||
show_build_cache_stats
|
||||
}
|
||||
|
||||
clean_miopen_ck() {
|
||||
echo "Cleaning MIOpen-CK build directory: ${BUILD_DIR} ${PACKAGE_DIR}"
|
||||
rm -rf "$BUILD_DIR" "$PACKAGE_DIR"
|
||||
|
||||
@@ -42,7 +42,6 @@ DEB_PATH="$(getDebPath $PROJ_NAME)"
|
||||
RPM_PATH="$(getRpmPath $PROJ_NAME)"
|
||||
INSTALL_PATH="${ROCM_INSTALL_PATH}/lib/llvm"
|
||||
LLVM_ROOT_LCL="${LLVM_ROOT}"
|
||||
ROCM_WHEEL_DIR="${BUILD_PATH}/_wheel"
|
||||
|
||||
TARGET="all"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
@@ -150,7 +149,6 @@ ENABLE_RUNTIMES="$ENABLE_RUNTIMES;libcxx;libcxxabi"
|
||||
BOOTSTRAPPING_BUILD_LIBCXX=1
|
||||
|
||||
clean_lightning() {
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf "$BUILD_PATH"
|
||||
rm -rf "$DEB_PATH"
|
||||
rm -rf "$RPM_PATH"
|
||||
@@ -332,15 +330,6 @@ build_lightning() {
|
||||
echo "End Workaround for race condition"
|
||||
cmake --build . -- $MAKEOPTS
|
||||
|
||||
case "$DISTRO_ID" in
|
||||
(rhel*|centos*)
|
||||
RHEL_BUILD=1
|
||||
;;
|
||||
(*)
|
||||
RHEL_BUILD=0
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ $SKIP_LIT_TESTS -eq 0 ]; then
|
||||
if [ $RHEL_BUILD -eq 1 ]; then
|
||||
cmake --build . -- $MAKEOPTS check-lld check-mlir
|
||||
@@ -1158,9 +1147,4 @@ case $TARGET in
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -w, --wheel Creates python wheel package of omniperf.
|
||||
It needs to be used along with -r option"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="omniperf"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
ROCM_WHEEL_DIR="${BUILD_DIR}/_wheel"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="$DASH_JAY -C $BUILD_DIR"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
WHEEL_PACKAGE=false
|
||||
|
||||
|
||||
#parse the arguments
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,static,address_sanitizer,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="Release" ; shift ;;
|
||||
-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2" ; shift 2 ;;
|
||||
-w | --wheel)
|
||||
WHEEL_PACKAGE=true ; shift ;;
|
||||
--) shift; break;; # end delimiter
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars "$API_NAME" "$TARGET" "$BUILD_TYPE" "$SHARED_LIBS" "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build() {
|
||||
echo "Building $PROJ_NAME"
|
||||
if [ "$DISTRO_ID" = centos-7 ]; then
|
||||
echo "Skip make and uploading packages for Omniperf on Centos7 distro, due to python dependency"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
|
||||
print_lib_type $SHARED_LIBS
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCHECK_PYTHON_DEPS=NO \
|
||||
-DPYTHON_DEPS=${BUILD_DIR}/python-libs \
|
||||
-DMOD_INSTALL_PATH=${BUILD_DIR}/modulefiles \
|
||||
"$OMNIPERF_ROOT"
|
||||
fi
|
||||
|
||||
make $MAKE_OPTS
|
||||
make $MAKE_OPTS install
|
||||
make $MAKE_OPTS package
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
echo "Creating Omniperf wheel package"
|
||||
|
||||
# Copy the setup.py generator to build folder
|
||||
mkdir -p "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/generate_setup_py.py "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/repackage_wheel.sh "$ROCM_WHEEL_DIR"
|
||||
cd "$ROCM_WHEEL_DIR" || exit
|
||||
|
||||
# Currently only supports python3.6
|
||||
./repackage_wheel.sh "$BUILD_DIR"/*.rpm python3.6
|
||||
|
||||
# Copy the wheel created to RPM folder which will be uploaded to artifactory
|
||||
copy_if WHL "WHL" "$PACKAGE_RPM" "$ROCM_WHEEL_DIR"/dist/*.whl
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo "${PACKAGE_DEB}";;
|
||||
("rpm")
|
||||
echo "${PACKAGE_RPM}";;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
(clean) clean ;;
|
||||
(build) build ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -1,191 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -w, --wheel Creates python wheel package of omnitrace.
|
||||
It needs to be used along with -r option"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="omnitrace"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="-j 8"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
ASAN=0
|
||||
|
||||
#parse the arguments
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,address_sanitizer,static,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage
|
||||
exit 0
|
||||
;;
|
||||
-c | --clean)
|
||||
TARGET="clean"
|
||||
((CLEAN_OR_OUT |= 1))
|
||||
shift
|
||||
;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="RelWithDebInfo"
|
||||
shift
|
||||
;;
|
||||
-a | --address_sanitizer)
|
||||
ack_and_ignore_asan
|
||||
|
||||
ASAN=1
|
||||
shift
|
||||
;;
|
||||
-s | --static)
|
||||
SHARED_LIBS="OFF"
|
||||
shift
|
||||
;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"
|
||||
PKGTYPE=$2
|
||||
((CLEAN_OR_OUT |= 2))
|
||||
shift 2
|
||||
;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2"
|
||||
shift 2
|
||||
;;
|
||||
-w | --wheel)
|
||||
echo "omnitrace: wheel build option accepted and ignored"
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] " >&2
|
||||
exit 20
|
||||
;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build_omnitrace() {
|
||||
echo "Building $PROJ_NAME"
|
||||
if [ "$DISTRO_ID" = "mariner-2.0" ] || [ "$DISTRO_ID" = "ubuntu-24.04" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ]; then
|
||||
echo "Skip make and uploading packages for Omnitrace on \"${DISTRO_ID}\" distro"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Skip make and uploading packages for Omnitrace on ASAN build"
|
||||
exit 0
|
||||
fi
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
echo "Created build directory: $BUILD_DIR"
|
||||
fi
|
||||
|
||||
echo "Build directory: $BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
print_lib_type $SHARED_LIBS
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Address Sanitizer path"
|
||||
|
||||
else
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DOMNITRACE_BUILD_{LIBUNWIND,DYNINST}=ON \
|
||||
-DDYNINST_BUILD_{TBB,BOOST,ELFUTILS,LIBIBERTY}=ON \
|
||||
"$OMNITRACE_ROOT"
|
||||
fi
|
||||
|
||||
|
||||
popd || exit
|
||||
|
||||
echo "Make Options: $MAKE_OPTS"
|
||||
cmake --build "$BUILD_DIR" --target all -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target install -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target package -- $MAKE_OPTS
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
"deb")
|
||||
echo "${PACKAGE_DEB}"
|
||||
;;
|
||||
"rpm")
|
||||
echo "${PACKAGE_RPM}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
clean) clean ;;
|
||||
build) build_omnitrace ;;
|
||||
outdir) print_output_directory ;;
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -1,141 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
PROJ_NAME=OpenCL-ICD-Loader
|
||||
TARGET="build"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
BUILD_TYPE="Debug"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_DEB="$PACKAGE_ROOT/deb/${PROJ_NAME,,}"
|
||||
PACKAGE_RPM="$PACKAGE_ROOT/rpm/${PROJ_NAME,,}"
|
||||
CLEAN_OR_OUT=0;
|
||||
PKGTYPE="deb"
|
||||
MAKETARGET="deb"
|
||||
API_NAME="rocm-opencl-icd-loader"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -o, --outdir Print path of output directory containing packages"
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
return 0
|
||||
}
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $TARGET $BUILD_TYPE $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean_opencl_icd_loader() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME,,}"
|
||||
}
|
||||
|
||||
copy_pkg_files_to_rocm() {
|
||||
local comp_folder=$1
|
||||
local comp_pkg_name=$2
|
||||
|
||||
cd "${OUT_DIR}/${PKGTYPE}/${comp_folder}"|| exit 2
|
||||
if [ "${PKGTYPE}" = 'deb' ]; then
|
||||
dpkg-deb -x ${comp_pkg_name}_*.deb pkg/
|
||||
else
|
||||
mkdir pkg && pushd pkg/ || exit 2
|
||||
if [[ "${comp_pkg_name}" != *-dev* ]]; then
|
||||
rpm2cpio ../${comp_pkg_name}-*.rpm | cpio -idmv
|
||||
else
|
||||
rpm2cpio ../${comp_pkg_name}el-*.rpm | cpio -idmv
|
||||
fi
|
||||
popd || exit 2
|
||||
fi
|
||||
ls ./pkg -alt
|
||||
cp -r ./pkg/*/rocm*/* "${ROCM_PATH}" || exit 2
|
||||
rm -rf pkg/
|
||||
}
|
||||
|
||||
build_opencl_icd_loader() {
|
||||
echo "Downloading $PROJ_NAME" package
|
||||
if [ "$DISTRO_NAME" = ubuntu ]; then
|
||||
mkdir -p "$PACKAGE_DEB"
|
||||
local rocm_ver=${ROCM_VERSION}
|
||||
if [ ${ROCM_VERSION##*.} = 0 ]; then
|
||||
rocm_ver=${ROCM_VERSION%.*}
|
||||
fi
|
||||
local url="https://repo.radeon.com/rocm/apt/${rocm_ver}/pool/main/r/${API_NAME}/"
|
||||
local package
|
||||
package=$(curl -s "$url" | grep -Po 'href="\K[^"]*' | grep "${DISTRO_RELEASE}" | head -n 1)
|
||||
|
||||
if [ -z "$package" ]; then
|
||||
echo "No package found for Ubuntu version $DISTRO_RELEASE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
wget -t3 -P "$PACKAGE_DEB" "${url}${package}"
|
||||
copy_pkg_files_to_rocm ${PROJ_NAME,,} ${API_NAME}
|
||||
else
|
||||
echo "$DISTRO_ID is not supported..."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Installing $PROJ_NAME" package
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo ${PACKAGE_DEB};;
|
||||
("rpm")
|
||||
echo ${PACKAGE_RPM};;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
VALID_STR=`getopt -o hcraswlo:p: --long help,clean,release,outdir:,package: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
(-c | --clean )
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
(-r | --release )
|
||||
BUILD_TYPE="RelWithDebInfo" ; shift ;;
|
||||
(-h | --help )
|
||||
printUsage ; exit 0 ;;
|
||||
(-a | --address_sanitizer)
|
||||
ack_and_ignore_asan ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
MAKETARGET="$2" ; shift 2;;
|
||||
(-s | --static)
|
||||
echo "-s parameter accepted but ignored" ; shift ;;
|
||||
--) shift; break;;
|
||||
(*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
done
|
||||
|
||||
case $TARGET in
|
||||
(clean) clean_opencl_icd_loader ;;
|
||||
(build) build_opencl_icd_loader ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -32,7 +32,6 @@ ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
|
||||
ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
|
||||
ROCM_CMAKE_PACKAGE_DEB="$(getPackageRoot)/deb/rocm-cmake"
|
||||
ROCM_CMAKE_PACKAGE_RPM="$(getPackageRoot)/rpm/rocm-cmake"
|
||||
ROCM_WHEEL_DIR="${ROCM_CMAKE_BUILD_DIR}/_wheel"
|
||||
ROCM_CMAKE_BUILD_TYPE="debug"
|
||||
BUILD_TYPE="Debug"
|
||||
SHARED_LIBS="ON"
|
||||
@@ -56,8 +55,6 @@ do
|
||||
ack_and_ignore_asan ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
(-w | --wheel)
|
||||
WHEEL_PACKAGE=true ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
@@ -78,7 +75,6 @@ fi
|
||||
|
||||
|
||||
clean_rocm_cmake() {
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf $ROCM_CMAKE_BUILD_DIR
|
||||
rm -rf $ROCM_CMAKE_PACKAGE_DEB
|
||||
rm -rf $ROCM_CMAKE_PACKAGE_RPM
|
||||
@@ -106,19 +102,6 @@ build_rocm_cmake() {
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$ROCM_CMAKE_PACKAGE_RPM" $ROCM_CMAKE_BUILD_DIR/rocm-cmake*.rpm
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
echo "Creating rocm-cmake wheel package"
|
||||
# Copy the setup.py generator to build folder
|
||||
mkdir -p $ROCM_WHEEL_DIR
|
||||
cp -f $SCRIPT_ROOT/generate_setup_py.py $ROCM_WHEEL_DIR
|
||||
cp -f $SCRIPT_ROOT/repackage_wheel.sh $ROCM_WHEEL_DIR
|
||||
cd $ROCM_WHEEL_DIR
|
||||
# Currently only supports python3.6
|
||||
./repackage_wheel.sh $ROCM_CMAKE_BUILD_DIR/rocm-cmake*.rpm python3.6
|
||||
# Copy the wheel created to RPM folder which will be uploaded to artifactory
|
||||
copy_if WHL "WHL" "$ROCM_CMAKE_PACKAGE_RPM" "$ROCM_WHEEL_DIR"/dist/*.whl
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
@@ -138,9 +121,4 @@ case $TARGET in
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
|
||||
@@ -7,7 +7,6 @@ bison
|
||||
bridge-utils
|
||||
build-essential
|
||||
bzip2
|
||||
ccache
|
||||
check
|
||||
chrpath
|
||||
cifs-utils
|
||||
@@ -121,11 +120,9 @@ python3-yaml
|
||||
python3.8-dev
|
||||
re2c
|
||||
redis-tools
|
||||
# Eventually we should be able to remove rpm for debian builds.
|
||||
rpm
|
||||
rsync
|
||||
ssh
|
||||
# This makes life more pleasent inside the container
|
||||
strace
|
||||
sudo
|
||||
systemtap-sdt-dev
|
||||
|
||||
@@ -1,285 +0,0 @@
|
||||
#! /usr/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
apt-get -y update
|
||||
DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install --no-install-recommends -y $(sed 's/#.*//' /tmp/packages)
|
||||
apt-get clean
|
||||
rm -rf /var/cache/apt/ /var/lib/apt/lists/* /etc/apt/apt.conf.d/01proxy
|
||||
|
||||
#Install 2.17.1 version of git as we are seeing issues with 2.25 , where it was not allowing to add git submodules if the user is different for parent git directory
|
||||
curl -o git.tar.gz https://cdn.kernel.org/pub/software/scm/git/git-2.17.1.tar.gz
|
||||
tar -zxf git.tar.gz
|
||||
cd git-*
|
||||
make prefix=/usr/local all
|
||||
make prefix=/usr/local install
|
||||
git --version
|
||||
|
||||
#install argparse and CppHeaderParser python modules for roctracer and rocprofiler
|
||||
#install rocm-docs-core for the docs-as-code project. Only needed on one OS
|
||||
# CppHeader needs setuptools. setuptools needs wheel.
|
||||
# Looks like I need them as seperate commands
|
||||
# Sigh, install both python2 and python 3 version
|
||||
pip3 install --no-cache-dir setuptools wheel tox
|
||||
pip3 install --no-cache-dir CppHeaderParser argparse requests lxml barectf recommonmark jinja2==3.0.0 websockets matplotlib numpy scipy minimal msgpack pytest sphinx joblib PyYAML rocm-docs-core cmake==3.25.2 pandas myst-parser
|
||||
|
||||
# Allow sudo for everyone user
|
||||
echo 'ALL ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/everyone
|
||||
|
||||
# Install OCaml packages to build LLVM's OCaml bindings to be used in lightning compiler test pipeline
|
||||
wget -nv https://sourceforge.net/projects/opam.mirror/files/2.1.4/opam-2.1.4-x86_64-linux -O /usr/local/bin/opam
|
||||
chmod +x /usr/local/bin/opam
|
||||
opam init --yes --disable-sandboxing
|
||||
opam install ctypes --yes
|
||||
|
||||
# Install and modify git-repo (#!/usr/bin/env python -> #!/usr/bin/env python3)
|
||||
curl https://storage.googleapis.com/git-repo-downloads/repo > /usr/bin/repo
|
||||
chmod a+x /usr/bin/repo
|
||||
|
||||
# Build ccache from the source
|
||||
cd /tmp
|
||||
git clone https://github.com/ccache/ccache -b v4.7.5
|
||||
cd ccache
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make
|
||||
make install
|
||||
cd /tmp
|
||||
rm -rf ccache
|
||||
|
||||
# Install sharp from MLNX_OFED_LINUX as dependency for rccl-rdma-sharp-plugins
|
||||
cd /var/tmp
|
||||
mkdir mlnx
|
||||
wget -O mlnx/tar.tgz https://content.mellanox.com/ofed/MLNX_OFED-24.01-0.3.3.1/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.tgz
|
||||
tar -xz -C mlnx -f mlnx/tar.tgz
|
||||
apt-key add mlnx/*/RPM-GPG-KEY-Mellanox
|
||||
echo "deb [arch=amd64] file:$(echo $PWD/mlnx/*/DEBS) ./" > /etc/apt/sources.list.d/sharp.list
|
||||
apt update
|
||||
apt install -y sharp
|
||||
apt clean
|
||||
rm -rf /var/cache/apt/ /var/lib/apt/lists/* mlnx /etc/apt/sources.list.d/sharp.list
|
||||
|
||||
apt update
|
||||
apt -y install libunwind-dev
|
||||
apt -y install libgoogle-glog-dev
|
||||
|
||||
# Install python3.8 from source
|
||||
curl -LO https://www.python.org/ftp/python/3.8.13/Python-3.8.13.tar.xz
|
||||
tar -xvf Python-3.8.13.tar.xz
|
||||
pwd
|
||||
ls /var/tmp/
|
||||
ls Python-3.8.13
|
||||
mv Python-3.8.13 /opt/
|
||||
apt install build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libsqlite3-dev libreadline-dev libffi-dev curl libbz2-dev pkg-config make -y
|
||||
cd /opt/Python-3.8.13/
|
||||
./configure --enable-optimizations --enable-shared
|
||||
make
|
||||
make -j 6
|
||||
make altinstall
|
||||
ldconfig /opt/Python3.8.13
|
||||
python3.8 --version
|
||||
|
||||
# roctracer and rocprofiler needs this python3.8
|
||||
python3.8 -m pip install setuptools wheel
|
||||
python3.8 -m pip install CppHeaderParser argparse requests lxml PyYAML joblib
|
||||
|
||||
#Install older version of hwloc-devel package for rocrtst
|
||||
curl -lO https://download.open-mpi.org/release/hwloc/v1.11/hwloc-1.11.13.tar.bz2
|
||||
tar -xvf hwloc-1.11.13.tar.bz2
|
||||
cd hwloc-1.11.13
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
cp /usr/local/lib/libhwloc.so.5 /usr/lib
|
||||
hwloc-info --version
|
||||
|
||||
# Install gtest
|
||||
mkdir -p /tmp/gtest
|
||||
cd /tmp/gtest
|
||||
wget https://github.com/google/googletest/archive/refs/tags/v1.14.0.zip -O googletest.zip
|
||||
unzip googletest.zip
|
||||
cd googletest-1.14.0/
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$(nproc)
|
||||
make install
|
||||
rm -rf /tmp/gtest
|
||||
|
||||
## Install gRPC from source
|
||||
## RDC Pre-requisites
|
||||
GRPC_ARCHIVE=grpc-1.61.0.tar.gz
|
||||
mkdir /tmp/grpc
|
||||
mkdir /usr/grpc
|
||||
cd /tmp
|
||||
git clone --recurse-submodules -b v1.61.0 https://github.com/grpc/grpc
|
||||
cd grpc
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake -DgRPC_INSTALL=ON -DBUILD_SHARED_LIBS=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX=/usr/grpc -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=14 -DCMAKE_SHARED_LINKER_FLAGS_INIT=-Wl,--enable-new-dtags,--build-id=sha1,--rpath,'$ORIGIN' ..
|
||||
make -j $(nproc) install
|
||||
rm -rf /tmp/grpc
|
||||
|
||||
## rocBLAS Pre-requisites
|
||||
## Download prebuilt AMD multithreaded blis (2.0)
|
||||
## Reference : https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/install.sh#L403
|
||||
mkdir -p /tmp/blis
|
||||
cd /tmp/blis
|
||||
wget -O - https://github.com/amd/blis/releases/download/2.0/aocl-blis-mt-ubuntu-2.0.tar.gz | tar xfz -
|
||||
mv amd-blis-mt /usr/blis
|
||||
cd /
|
||||
rm -rf /tmp/blis
|
||||
|
||||
## rocBLAS Pre-requisites(SWDEV-404612)
|
||||
## Download aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
mkdir -p /tmp/aocl
|
||||
cd /tmp/aocl
|
||||
wget -nv https://download.amd.com/developer/eula/aocl/aocl-4-2/aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
apt install ./aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
rm -rf /tmp/aocl
|
||||
|
||||
## hipBLAS Pre-requisites
|
||||
## lapack(3.9.1v)
|
||||
## Reference https://github.com/ROCmSoftwarePlatform/rocSOLVER/blob/develop/install.sh#L174
|
||||
lapack_version=3.9.1
|
||||
lapack_srcdir=lapack-$lapack_version
|
||||
lapack_blddir=lapack-$lapack_version-bld
|
||||
mkdir -p /tmp/lapack
|
||||
cd /tmp/lapack
|
||||
rm -rf "$lapack_srcdir" "$lapack_blddir"
|
||||
wget -O - https://github.com/Reference-LAPACK/lapack/archive/refs/tags/v3.9.1.tar.gz | tar xzf -
|
||||
cmake -H$lapack_srcdir -B$lapack_blddir -DCMAKE_BUILD_TYPE=Release -DCMAKE_Fortran_FLAGS=-fno-optimize-sibling-calls -DBUILD_TESTING=OFF -DCBLAS=ON -DLAPACKE=OFF
|
||||
make -j$(nproc) -C "$lapack_blddir"
|
||||
make -C "$lapack_blddir" install
|
||||
cd $lapack_blddir
|
||||
cp -r ./include/* /usr/local/include/
|
||||
cp -r ./lib/* /usr/local/lib
|
||||
cd /
|
||||
rm -rf /tmp/lapack
|
||||
|
||||
## rocSOLVER Pre-requisites
|
||||
## FMT(7.1.3v)
|
||||
## Reference https://github.com/ROCmSoftwarePlatform/rocSOLVER/blob/develop/install.sh#L152
|
||||
fmt_version=7.1.3
|
||||
fmt_srcdir=fmt-$fmt_version
|
||||
fmt_blddir=fmt-$fmt_version-bld
|
||||
mkdir -p /tmp/fmt
|
||||
cd /tmp/fmt
|
||||
rm -rf "$fmt_srcdir" "$fmt_blddir"
|
||||
wget -O - https://github.com/fmtlib/fmt/archive/refs/tags/7.1.3.tar.gz | tar xzf -
|
||||
cmake -H$fmt_srcdir -B$fmt_blddir -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_CXX_EXTENSIONS=OFF -DCMAKE_CXX_STANDARD_REQUIRED=ON -DFMT_DOC=OFF -DFMT_TEST=OFF
|
||||
make -j$(nproc) -C "$fmt_blddir"
|
||||
make -C "$fmt_blddir" install
|
||||
|
||||
# Build and install libjpeg-turbo
|
||||
mkdir -p /tmp/libjpeg-turbo
|
||||
cd /tmp/libjpeg-turbo
|
||||
wget -nv https://github.com/rrawther/libjpeg-turbo/archive/refs/heads/2.0.6.2.zip -O libjpeg-turbo-2.0.6.2.zip
|
||||
unzip libjpeg-turbo-2.0.6.2.zip
|
||||
cd libjpeg-turbo-2.0.6.2
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=RELEASE -DENABLE_STATIC=FALSE -DCMAKE_INSTALL_DEFAULT_LIBDIR=lib ..
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/libjpeg-turbo
|
||||
|
||||
# Get released ninja from source
|
||||
mkdir -p /tmp/ninja
|
||||
cd /tmp/ninja
|
||||
wget -nv https://codeload.github.com/Kitware/ninja/zip/refs/tags/v1.11.1.g95dee.kitware.jobserver-1 -O ninja.zip
|
||||
unzip ninja.zip
|
||||
cd ninja-1.11.1.g95dee.kitware.jobserver-1
|
||||
./configure.py --bootstrap
|
||||
cp ninja /usr/local/bin/
|
||||
rm -rf /tmp/ninja
|
||||
|
||||
# Install FFmpeg and dependencies
|
||||
# Build NASM
|
||||
mkdir -p /tmp/nasm-2.15.05
|
||||
cd /tmp
|
||||
wget -qO- "https://distfiles.macports.org/nasm/nasm-2.15.05.tar.bz2" | tar -xvj
|
||||
cd nasm-2.15.05
|
||||
./autogen.sh
|
||||
./configure --prefix="/usr/local"
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/nasm-2.15.05
|
||||
|
||||
# Build YASM
|
||||
mkdir -p /tmp/yasm-1.3.0
|
||||
cd /tmp
|
||||
wget -qO- "http://www.tortall.net/projects/yasm/releases/yasm-1.3.0.tar.gz" | tar -xvz
|
||||
cd yasm-1.3.0
|
||||
./configure --prefix="/usr/local"
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/yasm-1.3.0
|
||||
|
||||
# Build x264
|
||||
mkdir -p /tmp/x264-snapshot-20191217-2245-stable
|
||||
cd /tmp
|
||||
wget -qO- "https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-20191217-2245-stable.tar.bz2" | tar -xvj
|
||||
cd /tmp/x264-snapshot-20191217-2245-stable
|
||||
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig" ./configure --prefix="/usr/local" --enable-shared
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/x264-snapshot-20191217-2245-stable
|
||||
|
||||
# Build x265
|
||||
mkdir -p /tmp/x265_2.7
|
||||
cd /tmp
|
||||
wget -qO- "https://get.videolan.org/x265/x265_2.7.tar.gz" | tar -xvz
|
||||
cd /tmp/x265_2.7/build/linux
|
||||
cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="/usr/local" -DENABLE_SHARED:bool=on ../../source
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/x265_2.7
|
||||
|
||||
# Build fdk-aac
|
||||
mkdir -p /tmp/fdk-aac-2.0.2
|
||||
cd /tmp
|
||||
wget -qO- "https://sourceforge.net/projects/opencore-amr/files/fdk-aac/fdk-aac-2.0.2.tar.gz" | tar -xvz
|
||||
cd /tmp/fdk-aac-2.0.2
|
||||
autoreconf -fiv
|
||||
./configure --prefix="/usr/local" --enable-shared --disable-static
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/fdk-aac-2.0.2
|
||||
|
||||
# Build FFmpeg
|
||||
cd /tmp
|
||||
git clone -b release/4.4 https://git.ffmpeg.org/ffmpeg.git ffmpeg
|
||||
cd ffmpeg
|
||||
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig"
|
||||
./configure --prefix="/usr/local" --extra-cflags="-I/usr/local/include" --extra-ldflags="-L/usr/local/lib" --extra-libs=-lpthread --extra-libs=-lm --enable-shared --disable-static --enable-libx264 --enable-libx265 --enable-libfdk-aac --enable-gpl --enable-nonfree
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/ffmpeg
|
||||
|
||||
cp /tmp/local-pin-600 /etc/apt/preferences.d
|
||||
|
||||
command -v lbzip2
|
||||
ln -sf $(command -v lbzip2) /usr/local/bin/compressor || ln -sf $(command -v bzip2) /usr/local/bin/compressor
|
||||
|
||||
# Install Google Benchmark
|
||||
mkdir -p /tmp/Gbenchmark
|
||||
cd /tmp/Gbenchmark
|
||||
wget -qO- https://github.com/google/benchmark/archive/refs/tags/v1.6.1.tar.gz | tar xz
|
||||
cmake -Sbenchmark-1.6.1 -Bbuild -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DBENCHMARK_ENABLE_TESTING=OFF -DCMAKE_CXX_STANDARD=14
|
||||
make -j -C build
|
||||
cd /tmp/Gbenchmark/build
|
||||
make install
|
||||
|
||||
# Build boost-1.85.0 from source for RPP
|
||||
# Installing in a non-standard location since the test packages of hipFFT and rocFFT pick up the version of
|
||||
# the installed Boost library and declare a package dependency on that specific version of Boost.
|
||||
# For example, if this was installed in the standard location it would declare a dependency on libboost-dev(el)1.85.0
|
||||
# which is not available as a package in any distro.
|
||||
# Once this is fixed, we can remove the Boost package from the requirements list and install this
|
||||
# in the standard location
|
||||
mkdir -p /tmp/boost-1.85.0
|
||||
cd /tmp/boost-1.85.0
|
||||
wget -nv https://sourceforge.net/projects/boost/files/boost/1.85.0/boost_1_85_0.tar.bz2 -O ./boost_1_85_0.tar.bz2
|
||||
tar -xf boost_1_85_0.tar.bz2 --use-compress-program="/usr/local/bin/compressor"
|
||||
cd boost_1_85_0
|
||||
./bootstrap.sh --prefix=${RPP_DEPS_LOCATION} --with-python=python3
|
||||
./b2 stage -j$(nproc) threading=multi link=shared cxxflags="-std=c++11"
|
||||
./b2 install threading=multi link=shared --with-system --with-filesystem
|
||||
./b2 stage -j$(nproc) threading=multi link=static cxxflags="-std=c++11 -fpic" cflags="-fpic"
|
||||
./b2 install threading=multi link=static --with-system --with-filesystem
|
||||
rm -rf /tmp/boost-1.85.0
|
||||
@@ -7,7 +7,6 @@ bison
|
||||
bridge-utils
|
||||
build-essential
|
||||
bzip2
|
||||
ccache
|
||||
check
|
||||
chrpath
|
||||
cifs-utils
|
||||
|
||||
Reference in New Issue
Block a user