diff --git a/.azuredevops/components/ROCR-Runtime.yml b/.azuredevops/components/ROCR-Runtime.yml index 0358dd335..fea5ae3d0 100644 --- a/.azuredevops/components/ROCR-Runtime.yml +++ b/.azuredevops/components/ROCR-Runtime.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocr-runtime - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -45,6 +64,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: ROCR_Runtime_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: vmImage: 'ubuntu-22.04' ${{ if eq(job.os, 'almalinux8') }}: @@ -65,14 +88,18 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} aggregatePipeline: ${{ parameters.aggregatePipeline }} os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -82,105 +109,112 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml # parameters: # aptPackages: ${{ parameters.aptPackages }} -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ROCR_Runtime_test_${{ job.os }}_${{ job.target }} - dependsOn: ROCR_Runtime_build_${{ job.os }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - packageManager: ${{ job.packageManager }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - parameters: - runRocminfo: false - - task: Bash@3 - displayName: Build kfdtest - inputs: - targetType: 'inline' - workingDirectory: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest - script: | - if [ -e /opt/rh/gcc-toolset-14/enable ]; then - source /opt/rh/gcc-toolset-14/enable - fi - mkdir build && cd build - cmake -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm .. - make - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: kfdtest - testExecutable: BIN_DIR=$(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/build ./run_kfdtest.sh - testParameters: '-p core --gtest_output=xml:./test_output.xml --gtest_color=yes' - testDir: $(Build.SourcesDirectory)/libhsakmt/tests/kfdtest/scripts - os: ${{ job.os }} - - task: Bash@3 - displayName: Build rocrtst - inputs: - targetType: 'inline' - workingDirectory: $(Build.SourcesDirectory)/rocrtst/suites/test_common - script: | - echo $(Build.SourcesDirectory)/rocrtst/thirdparty/lib | sudo tee -a /etc/ld.so.conf.d/rocm-ci.conf - sudo cat /etc/ld.so.conf.d/rocm-ci.conf - sudo ldconfig -v - ldconfig -p - if [ -e /opt/rh/gcc-toolset-14/enable ]; then - source /opt/rh/gcc-toolset-14/enable - fi - BASE_CLANG_DIR=$(Agent.BuildDirectory)/rocm/llvm/lib/clang - export NEWEST_CLANG_VER=$(ls -1 $BASE_CLANG_DIR | sort -V | tail -n 1) - mkdir build && cd build - cmake .. \ - -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm \ - -DTARGET_DEVICES=${{ job.target }} \ - -DROCM_DIR=$(Agent.BuildDirectory)/rocm \ - -DLLVM_DIR=$(Agent.BuildDirectory)/rocm/llvm/bin \ - -DOPENCL_INC_DIR=$BASE_CLANG_DIR/$NEWEST_CLANG_VER/include - make - make rocrtst_kernels - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocrtst - testExecutable: ./rocrtst64 - testParameters: '--gtest_filter="-rocrtstNeg.Memory_Negative_Tests:rocrtstFunc.Memory_Max_Mem" --gtest_output=xml:./test_output.xml --gtest_color=yes' - testDir: $(Build.SourcesDirectory)/rocrtst/suites/test_common/build/${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - environment: test - gpuTarget: ${{ job.target }} - # docker image will be missing libhwloc5 +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ROCR_Runtime_test_${{ job.os }}_${{ job.target }} + dependsOn: ROCR_Runtime_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + parameters: + runRocminfo: false + - task: Bash@3 + displayName: Build kfdtest + inputs: + targetType: 'inline' + workingDirectory: $(Agent.BuildDirectory)/s/libhsakmt/tests/kfdtest + script: | + if [ -e /opt/rh/gcc-toolset-14/enable ]; then + source /opt/rh/gcc-toolset-14/enable + fi + mkdir build && cd build + cmake -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm .. + make + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: kfdtest + testExecutable: BIN_DIR=$(Agent.BuildDirectory)/s/libhsakmt/tests/kfdtest/build ./run_kfdtest.sh + testParameters: '-p core --gtest_output=xml:./test_output.xml --gtest_color=yes' + testDir: $(Agent.BuildDirectory)/s/libhsakmt/tests/kfdtest/scripts + os: ${{ job.os }} + - task: Bash@3 + displayName: Build rocrtst + inputs: + targetType: 'inline' + workingDirectory: $(Agent.BuildDirectory)/s/rocrtst/suites/test_common + script: | + echo $(Agent.BuildDirectory)/s/rocrtst/thirdparty/lib | sudo tee -a /etc/ld.so.conf.d/rocm-ci.conf + sudo cat /etc/ld.so.conf.d/rocm-ci.conf + sudo ldconfig -v + ldconfig -p + if [ -e /opt/rh/gcc-toolset-14/enable ]; then + source /opt/rh/gcc-toolset-14/enable + fi + BASE_CLANG_DIR=$(Agent.BuildDirectory)/rocm/llvm/lib/clang + export NEWEST_CLANG_VER=$(ls -1 $BASE_CLANG_DIR | sort -V | tail -n 1) + mkdir build && cd build + cmake .. \ + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm \ + -DTARGET_DEVICES=${{ job.target }} \ + -DROCM_DIR=$(Agent.BuildDirectory)/rocm \ + -DLLVM_DIR=$(Agent.BuildDirectory)/rocm/llvm/bin \ + -DOPENCL_INC_DIR=$BASE_CLANG_DIR/$NEWEST_CLANG_VER/include + make + make rocrtst_kernels + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: rocrtst + testExecutable: ./rocrtst64 + testParameters: '--gtest_filter="-rocrtstNeg.Memory_Negative_Tests:rocrtstFunc.Memory_Max_Mem" --gtest_output=xml:./test_output.xml --gtest_color=yes' + testDir: $(Agent.BuildDirectory)/s//rocrtst/suites/test_common/build/${{ job.target }} + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + environment: test + gpuTarget: ${{ job.target }} + # docker image will be missing libhwloc5 diff --git a/.azuredevops/components/hip-tests.yml b/.azuredevops/components/hip-tests.yml index c88465a6d..388ac4170 100644 --- a/.azuredevops/components/hip-tests.yml +++ b/.azuredevops/components/hip-tests.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: hip-tests - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -60,6 +79,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: hip_tests_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -76,15 +99,18 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} # compile hip-tests - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: - componentName: hip-tests + componentName: ${{ parameters.componentName }} cmakeSourceDir: '../catch' customBuildTarget: build_tests extraBuildFlags: >- @@ -96,9 +122,12 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml @@ -108,52 +137,56 @@ jobs: extraEnvVars: - HIP_ROCCLR_HOME:::/home/user/workspace/rocm -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: hip_tests_test_${{ job.target }} - timeoutInMinutes: 240 - dependsOn: hip_tests_build_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - gpuTarget: ${{ job.target }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - - task: Bash@3 - displayName: Symlink rocm_agent_enumerator - inputs: - targetType: inline - script: | - # Assuming that /opt is no longer persistent across runs, test environments are fully ephemeral - sudo mkdir -p /opt/rocm/bin - sudo ln -s $(Agent.BuildDirectory)/rocm/bin/rocm_agent_enumerator /opt/rocm/bin/rocm_agent_enumerator - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: hip_tests - testDir: $(Agent.BuildDirectory)/rocm/share/hip - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - environment: test - gpuTarget: ${{ job.target }} - optSymLink: true +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: hip_tests_test_${{ job.target }} + timeoutInMinutes: 240 + dependsOn: hip_tests_build_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + gpuTarget: ${{ job.target }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - task: Bash@3 + displayName: Symlink rocm_agent_enumerator + inputs: + targetType: inline + script: | + # Assuming that /opt is no longer persistent across runs, test environments are fully ephemeral + sudo mkdir -p /opt/rocm/bin + sudo ln -s $(Agent.BuildDirectory)/rocm/bin/rocm_agent_enumerator /opt/rocm/bin/rocm_agent_enumerator + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: $(Agent.BuildDirectory)/rocm/share/hip + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + environment: test + gpuTarget: ${{ job.target }} + optSymLink: true diff --git a/.azuredevops/components/hipBLASLt.yml b/.azuredevops/components/hipBLASLt.yml index d8bfbd0bb..6364380a5 100644 --- a/.azuredevops/components/hipBLASLt.yml +++ b/.azuredevops/components/hipBLASLt.yml @@ -35,6 +35,8 @@ parameters: - ccache - gfortran - git + - libboost-filesystem-dev + - libboost-program-options-dev - libdrm-dev - liblapack-dev - libmsgpack-dev @@ -176,7 +178,7 @@ jobs: mkdir -p $(Agent.BuildDirectory)/temp-deps cd $(Agent.BuildDirectory)/temp-deps # position-independent LAPACK is required for almalinux8 builds - cmake -DBUILD_GTEST=OFF -DBUILD_LAPACK=ON -DCMAKE_POSITION_INDEPENDENT_CODE=ON $(Agent.BuildDirectory)/s/deps + cmake -DBUILD_GTEST=OFF -DBUILD_LAPACK=ON -DCMAKE_POSITION_INDEPENDENT_CODE=ON $(Agent.BuildDirectory)/sparse/projects/hipblaslt/deps make -j sudo make install - script: | @@ -195,6 +197,8 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} + cmakeSourceDir: $(Agent.BuildDirectory)/sparse/projects/hipblaslt + cmakeBuildDir: $(Agent.BuildDirectory)/sparse/projects/hipblaslt/build extraBuildFlags: >- -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/vendor -DCMAKE_INCLUDE_PATH=$(Agent.BuildDirectory)/rocm/llvm/include diff --git a/.azuredevops/components/hipSPARSELt.yml b/.azuredevops/components/hipSPARSELt.yml index 104e0ee6c..02e258f78 100644 --- a/.azuredevops/components/hipSPARSELt.yml +++ b/.azuredevops/components/hipSPARSELt.yml @@ -44,6 +44,7 @@ parameters: type: object default: - joblib + - msgpack - name: rocmDependencies type: object default: diff --git a/.azuredevops/components/origami.yml b/.azuredevops/components/origami.yml new file mode 100644 index 000000000..b55cd67aa --- /dev/null +++ b/.azuredevops/components/origami.yml @@ -0,0 +1,236 @@ +parameters: +- name: componentName + type: string + default: origami +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false +# set to true if doing full build of ROCm stack +# and dependencies are pulled from same pipeline +- name: aggregatePipeline + type: boolean + default: false +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + - wget + - python3 + - python3-dev + - python3-pip +- name: pipModules + type: object + default: + - nanobind>=2.0.0 +- name: rocmDependencies + type: object + default: + - clr + - llvm-project + - rocm-cmake + - rocminfo + - ROCR-Runtime + - rocprofiler-register +- name: rocmTestDependencies + type: object + default: + - clr + - llvm-project + - rocm-cmake + - rocminfo + - ROCR-Runtime + - rocprofiler-register + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt } + - { os: almalinux8, packageManager: dnf } + testJobs: + - { os: ubuntu2204, packageManager: apt, target: gfx942 } + - { os: ubuntu2204, packageManager: apt, target: gfx90a } +- name: downstreamComponentMatrix + type: object + default: + - hipBLASLt: + name: hipBLASLt + sparseCheckoutDir: projects/hipblaslt + skipUnifiedBuild: 'false' + buildDependsOn: + - origami_build + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: origami_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + - name: ROCM_PATH + value: $(Agent.BuildDirectory)/rocm + pool: + vmImage: ${{ variables.BASE_BUILD_POOL }} + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-cmake-latest.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmDependencies }} + os: ${{ job.os }} + aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + extraBuildFlags: >- + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DCMAKE_CXX_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang++ + -DORIGAMI_BUILD_SHARED_LIBS=ON + -DORIGAMI_ENABLE_PYTHON=ON + -DORIGAMI_BUILD_TESTING=ON + -GNinja + - ${{ if ne(job.os, 'almalinux8') }}: + - task: PublishPipelineArtifact@1 + displayName: 'Publish Build Directory Artifact' + inputs: + targetPath: '$(Agent.BuildDirectory)/s/build' + artifact: '${{ parameters.componentName }}_${{ job.os }}_build_dir' + publishLocation: 'pipeline' + - task: PublishPipelineArtifact@1 + displayName: 'Publish Python Source Artifact' + inputs: + targetPath: '$(Agent.BuildDirectory)/s/python' + artifact: '${{ parameters.componentName }}_${{ job.os }}_python_src' + publishLocation: 'pipeline' + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml + parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} + componentName: ${{ parameters.componentName }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml + +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: origami_test_${{ job.os }}_${{ job.target }} + timeoutInMinutes: 120 + dependsOn: origami_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + preTargetFilter: ${{ parameters.componentName }} + os: ${{ job.os }} + - task: DownloadPipelineArtifact@2 + displayName: 'Download Build Directory Artifact' + inputs: + artifact: '${{ parameters.componentName }}_${{ job.os }}_build_dir' + path: '$(Agent.BuildDirectory)/s/build' + - task: DownloadPipelineArtifact@2 + displayName: 'Download Python Source Artifact' + inputs: + artifact: '${{ parameters.componentName }}_${{ job.os }}_python_src' + path: '$(Agent.BuildDirectory)/s/python' + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + os: ${{ job.os }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - script: | + export PYTHONPATH=$(Agent.BuildDirectory)/s/build/python:$PYTHONPATH + + echo "--- Running origami_test.py ---" + python3 $(Agent.BuildDirectory)/s/python/origami_test.py + + echo "--- Running origami_grid_test.py ---" + python3 $(Agent.BuildDirectory)/s/python/origami_grid_test.py + displayName: 'Run Python Binding Tests' + condition: succeeded() + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} + +- ${{ if parameters.triggerDownstreamJobs }}: + - ${{ each component in parameters.downstreamComponentMatrix }}: + - ${{ if not(and(parameters.unifiedBuild, eq(component.skipUnifiedBuild, 'true'))) }}: + - template: /.azuredevops/components/${{ component.name }}.yml@pipelines_repo + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ component.sparseCheckoutDir }} + buildDependsOn: ${{ component.buildDependsOn }} + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }}+${{ parameters.componentName }} + triggerDownstreamJobs: true + unifiedBuild: ${{ parameters.unifiedBuild }} diff --git a/.azuredevops/components/rocBLAS.yml b/.azuredevops/components/rocBLAS.yml index 6aab7ebb3..ca3577b5b 100644 --- a/.azuredevops/components/rocBLAS.yml +++ b/.azuredevops/components/rocBLAS.yml @@ -179,6 +179,8 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} + cmakeSourceDir: $(Agent.BuildDirectory)/sparse/projects/rocblas + cmakeBuildDir: $(Agent.BuildDirectory)/sparse/projects/rocblas/build extraBuildFlags: >- -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm/llvm;$(Agent.BuildDirectory)/rocm;$(Agent.BuildDirectory)/vendor -DCMAKE_BUILD_TYPE=Release diff --git a/.azuredevops/components/rocDecode.yml b/.azuredevops/components/rocDecode.yml index ee1d5ccfc..3b4bc6a71 100644 --- a/.azuredevops/components/rocDecode.yml +++ b/.azuredevops/components/rocDecode.yml @@ -8,6 +8,25 @@ parameters: - name: checkoutRef type: string default: '' +- name: rocPyDecodeRepo + type: string + default: rocpydecode_repo +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -56,10 +75,23 @@ parameters: testJobs: - { os: ubuntu2204, packageManager: apt, target: gfx942 } - { os: ubuntu2204, packageManager: apt, target: gfx90a } +- name: downstreamComponentMatrix + type: object + default: + - rocPyDecode: + name: rocPyDecode + sparseCheckoutDir: '' + skipUnifiedBuild: 'false' + buildDependsOn: + - rocDecode_build jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: ${{ parameters.componentName }}_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -83,12 +115,15 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} os: ${{ job.os }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: os: ${{ job.os }} @@ -169,3 +204,15 @@ jobs: registerROCmPackages: true environment: test gpuTarget: ${{ job.target }} + +- ${{ if parameters.triggerDownstreamJobs }}: + - ${{ each component in parameters.downstreamComponentMatrix }}: + - ${{ if not(and(parameters.unifiedBuild, eq(component.skipUnifiedBuild, 'true'))) }}: + - template: /.azuredevops/components/${{ component.name }}.yml@pipelines_repo + parameters: + checkoutRepo: ${{ parameters.rocPyDecodeRepo }} + sparseCheckoutDir: ${{ component.sparseCheckoutDir }} + buildDependsOn: ${{ component.buildDependsOn }} + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }}+${{ parameters.componentName }} + triggerDownstreamJobs: true + unifiedBuild: ${{ parameters.unifiedBuild }} diff --git a/.azuredevops/components/rocPyDecode.yml b/.azuredevops/components/rocPyDecode.yml index 6e85a43ef..615148a49 100644 --- a/.azuredevops/components/rocPyDecode.yml +++ b/.azuredevops/components/rocPyDecode.yml @@ -5,6 +5,22 @@ parameters: - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -47,19 +63,19 @@ parameters: type: object default: buildJobs: - - gfx942: - target: gfx942 - - gfx90a: - target: gfx90a + - { os: ubuntu2204, packageManager: apt, target: gfx942 } + - { os: ubuntu2204, packageManager: apt, target: gfx90a } testJobs: - - gfx942: - target: gfx942 - - gfx90a: - target: gfx90a + - { os: ubuntu2204, packageManager: apt, target: gfx942 } + - { os: ubuntu2204, packageManager: apt, target: gfx90a } jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocPyDecode_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -74,16 +90,20 @@ jobs: parameters: aptPackages: ${{ parameters.aptPackages }} pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} gpuTarget: ${{ job.target }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - task: Bash@3 displayName: 'Save Python Package Paths' inputs: diff --git a/.azuredevops/components/rocm-core.yml b/.azuredevops/components/rocm-core.yml index f36252320..714518781 100644 --- a/.azuredevops/components/rocm-core.yml +++ b/.azuredevops/components/rocm-core.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocm-core - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -27,6 +46,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocm_core_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: ${{ if eq(job.os, 'ubuntu2404') }}: vmImage: 'ubuntu-24.04' @@ -50,8 +73,10 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -65,9 +90,12 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml diff --git a/.azuredevops/components/rocm_smi_lib.yml b/.azuredevops/components/rocm_smi_lib.yml index 31459a868..138bc559e 100644 --- a/.azuredevops/components/rocm_smi_lib.yml +++ b/.azuredevops/components/rocm_smi_lib.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocm-smi-lib - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -32,6 +51,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocm_smi_lib_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: ${{ if eq(job.os, 'ubuntu2404') }}: vmImage: 'ubuntu-24.04' @@ -55,8 +78,10 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -65,51 +90,56 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml # parameters: # aptPackages: ${{ parameters.aptPackages }} -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocm_smi_lib_test_${{ job.os }}_${{ job.target }} - dependsOn: rocm_smi_lib_build_${{ job.os }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - packageManager: ${{ job.packageManager }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - parameters: - runRocminfo: false - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocm_smi_lib - testDir: '$(Agent.BuildDirectory)' - testExecutable: 'sudo ./rocm/share/rocm_smi/rsmitst_tests/rsmitst' - testParameters: '--gtest_output=xml:./test_output.xml --gtest_color=yes' - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - environment: test - gpuTarget: ${{ job.target }} +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: rocm_smi_lib_test_${{ job.os }}_${{ job.target }} + dependsOn: rocm_smi_lib_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + parameters: + runRocminfo: false + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: '$(Agent.BuildDirectory)' + testExecutable: 'sudo ./rocm/share/rocm_smi/rsmitst_tests/rsmitst' + testParameters: '--gtest_output=xml:./test_output.xml --gtest_color=yes' + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + environment: test + gpuTarget: ${{ job.target }} \ No newline at end of file diff --git a/.azuredevops/components/rocminfo.yml b/.azuredevops/components/rocminfo.yml index aada773ca..f3e87bf57 100644 --- a/.azuredevops/components/rocminfo.yml +++ b/.azuredevops/components/rocminfo.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocminfo - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -40,7 +59,11 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - - job: rocminfo_build_${{ job.os }} + - job: ${{ parameters.componentName }}_build_${{ job.os }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }} pool: vmImage: 'ubuntu-22.04' ${{ if eq(job.os, 'almalinux8') }}: @@ -62,14 +85,18 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} dependencyList: ${{ parameters.rocmDependencies }} aggregatePipeline: ${{ parameters.aggregatePipeline }} os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} useAmdclang: false extraBuildFlags: >- @@ -78,65 +105,71 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocminfo_test_${{ job.target }} - dependsOn: rocminfo_build_${{ job.os }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - packageManager: ${{ job.packageManager }} - registerROCmPackages: true - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - parameters: - runRocminfo: false - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocminfo - testDir: '$(Agent.BuildDirectory)' - testExecutable: './rocm/bin/rocminfo' - testParameters: '' - testPublishResults: false - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocm_agent_enumerator - testDir: '$(Agent.BuildDirectory)' - testExecutable: './rocm/bin/rocm_agent_enumerator' - testParameters: '' - testPublishResults: false - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - registerROCmPackages: true - environment: test - gpuTarget: ${{ job.target }} +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: rocminfo_test_${{ job.target }} + dependsOn: rocminfo_build_${{ job.os }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + registerROCmPackages: true + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + parameters: + runRocminfo: false + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: '$(Agent.BuildDirectory)' + testExecutable: './rocm/bin/rocminfo' + testParameters: '' + testPublishResults: false + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: rocm_agent_enumerator + testDir: '$(Agent.BuildDirectory)' + testExecutable: './rocm/bin/rocm_agent_enumerator' + testParameters: '' + testPublishResults: false + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + registerROCmPackages: true + environment: test + gpuTarget: ${{ job.target }} diff --git a/.azuredevops/components/rocprofiler-compute.yml b/.azuredevops/components/rocprofiler-compute.yml index ed83b277a..bccb51f67 100644 --- a/.azuredevops/components/rocprofiler-compute.yml +++ b/.azuredevops/components/rocprofiler-compute.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocprofiler-compute - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -36,6 +55,7 @@ parameters: - pymongo - pyyaml - setuptools + - sqlalchemy - tabulate - textual - textual_plotext @@ -78,6 +98,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocprofiler_compute_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -94,15 +118,19 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: extraBuildFlags: >- -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml @@ -111,78 +139,83 @@ jobs: # pipModules: ${{ parameters.pipModules }} # gpuTarget: ${{ job.target }} -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocprofiler_compute_test_${{ job.target }} - timeoutInMinutes: 120 - dependsOn: rocprofiler_compute_build_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - - name: PYTHON_VERSION - value: 3.10 - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - gpuTarget: ${{ job.target }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - - task: Bash@3 - displayName: Add en_US.UTF-8 locale - inputs: - targetType: inline - script: | - sudo locale-gen en_US.UTF-8 - sudo update-locale - locale -a - - task: Bash@3 - displayName: Add ROCm binaries to PATH - inputs: - targetType: inline - script: | - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin" - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml - parameters: - extraBuildFlags: >- - -DCMAKE_HIP_ARCHITECTURES=${{ job.target }} - -DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang - -DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake/hip - -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm - -DROCM_PATH=$(Agent.BuildDirectory)/rocm - -DCMAKE_BUILD_TYPE=Release - -DENABLE_TESTS=ON - -DINSTALL_TESTS=ON - -GNinja - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocprofiler-compute - testDir: $(Build.BinariesDirectory)/libexec/rocprofiler-compute - testExecutable: ROCM_PATH=$(Agent.BuildDirectory)/rocm ctest - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - gpuTarget: ${{ job.target }} +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: rocprofiler_compute_test_${{ job.target }} + timeoutInMinutes: 120 + dependsOn: rocprofiler_compute_build_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + - name: PYTHON_VERSION + value: 3.10 + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + preTargetFilter: ${{ parameters.componentName }} + gpuTarget: ${{ job.target }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - task: Bash@3 + displayName: Add en_US.UTF-8 locale + inputs: + targetType: inline + script: | + sudo locale-gen en_US.UTF-8 + sudo update-locale + locale -a + - task: Bash@3 + displayName: Add ROCm binaries to PATH + inputs: + targetType: inline + script: | + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin" + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + extraBuildFlags: >- + -DCMAKE_HIP_ARCHITECTURES=${{ job.target }} + -DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang + -DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake/hip + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DROCM_PATH=$(Agent.BuildDirectory)/rocm + -DCMAKE_BUILD_TYPE=Release + -DENABLE_TESTS=ON + -DINSTALL_TESTS=ON + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testDir: $(Build.BinariesDirectory)/libexec/rocprofiler-compute + testExecutable: ROCM_PATH=$(Agent.BuildDirectory)/rocm ctest + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} diff --git a/.azuredevops/components/rocprofiler-sdk.yml b/.azuredevops/components/rocprofiler-sdk.yml index 7dea99f0e..3f1656040 100644 --- a/.azuredevops/components/rocprofiler-sdk.yml +++ b/.azuredevops/components/rocprofiler-sdk.yml @@ -1,10 +1,29 @@ parameters: +- name: componentName + type: string + default: rocprofiler-sdk - name: checkoutRepo type: string default: 'self' - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -73,6 +92,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: rocprofiler_sdk_build_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -89,6 +112,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: @@ -96,6 +120,8 @@ jobs: dependencyList: ${{ parameters.rocmDependencies }} gpuTarget: ${{ job.target }} aggregatePipeline: ${{ parameters.aggregatePipeline }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} - task: Bash@3 displayName: Add Python site-packages binaries to path inputs: @@ -105,6 +131,7 @@ jobs: echo "##vso[task.prependpath]$USER_BASE/bin" - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: + componentName: ${{ parameters.componentName }} extraBuildFlags: >- -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm -DROCPROFILER_BUILD_TESTS=ON @@ -114,9 +141,12 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml # - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml @@ -126,62 +156,67 @@ jobs: # gpuTarget: ${{ job.target }} # registerROCmPackages: true -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: rocprofiler_sdk_test_${{ job.target }} - dependsOn: rocprofiler_sdk_build_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - registerROCmPackages: true - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml - parameters: - checkoutRepo: ${{ parameters.checkoutRepo }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmDependencies }} - gpuTarget: ${{ job.target }} - - task: Bash@3 - displayName: Add Python and ROCm binaries to path - inputs: - targetType: inline - script: | - USER_BASE=$(python3 -m site --user-base) - echo "##vso[task.prependpath]$USER_BASE/bin" - echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml - parameters: - extraBuildFlags: >- - -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm - -DROCPROFILER_BUILD_TESTS=ON - -DROCPROFILER_BUILD_SAMPLES=ON - -DROCPROFILER_BUILD_RELEASE=ON - -DGPU_TARGETS=${{ job.target }} - -GNinja - - template: ${{ variables.CI_TEMPLATE_PATH}}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: rocprofiler-sdk - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - gpuTarget: ${{ job.target }} - registerROCmPackages: true +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: rocprofiler_sdk_test_${{ job.target }} + dependsOn: rocprofiler_sdk_build_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + registerROCmPackages: true + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} + checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmDependencies }} + gpuTarget: ${{ job.target }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - task: Bash@3 + displayName: Add Python and ROCm binaries to path + inputs: + targetType: inline + script: | + USER_BASE=$(python3 -m site --user-base) + echo "##vso[task.prependpath]$USER_BASE/bin" + echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + componentName: ${{ parameters.componentName }} + extraBuildFlags: >- + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DROCPROFILER_BUILD_TESTS=ON + -DROCPROFILER_BUILD_SAMPLES=ON + -DROCPROFILER_BUILD_RELEASE=ON + -DGPU_TARGETS=${{ job.target }} + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH}}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} + registerROCmPackages: true diff --git a/.azuredevops/components/roctracer.yml b/.azuredevops/components/roctracer.yml index d00c03ecc..503cd18bd 100644 --- a/.azuredevops/components/roctracer.yml +++ b/.azuredevops/components/roctracer.yml @@ -8,6 +8,22 @@ parameters: - name: checkoutRef type: string default: '' +# monorepo related parameters +- name: sparseCheckoutDir + type: string + default: '' +- name: triggerDownstreamJobs + type: boolean + default: false +- name: downstreamAggregateNames + type: string + default: '' +- name: buildDependsOn + type: object + default: null +- name: unifiedBuild + type: boolean + default: false # set to true if doing full build of ROCm stack # and dependencies are pulled from same pipeline - name: aggregatePipeline @@ -65,6 +81,10 @@ parameters: jobs: - ${{ each job in parameters.jobMatrix.buildJobs }}: - job: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} + ${{ if parameters.buildDependsOn }}: + dependsOn: + - ${{ each build in parameters.buildDependsOn }}: + - ${{ build }}_${{ job.os }}_${{ job.target }} variables: - group: common - template: /.azuredevops/variables-global.yml @@ -87,6 +107,7 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: checkoutRef: ${{ parameters.checkoutRef }} @@ -94,6 +115,8 @@ jobs: gpuTarget: ${{ job.target }} aggregatePipeline: ${{ parameters.aggregatePipeline }} os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} # the linker flags will not affect ubuntu2204 builds as the paths do not exist - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml parameters: @@ -109,10 +132,13 @@ jobs: -GNinja - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/manifest.yml parameters: + componentName: ${{ parameters.componentName }} + sparseCheckoutDir: ${{ parameters.sparseCheckoutDir }} os: ${{ job.os }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml parameters: + componentName: ${{ parameters.componentName }} os: ${{ job.os }} gpuTarget: ${{ job.target }} - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-links.yml @@ -123,53 +149,57 @@ jobs: # gpuTarget: ${{ job.target }} # registerROCmPackages: true -- ${{ each job in parameters.jobMatrix.testJobs }}: - - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} - dependsOn: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} - condition: - and(succeeded(), - eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), - not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), variables['Build.DefinitionName'])), - eq(${{ parameters.aggregatePipeline }}, False) - ) - variables: - - group: common - - template: /.azuredevops/variables-global.yml - pool: ${{ job.target }}_test_pool - workspace: - clean: all - steps: - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - packageManager: ${{ job.packageManager }} - registerROCmPackages: true - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml - parameters: - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml - parameters: - checkoutRef: ${{ parameters.checkoutRef }} - dependencyList: ${{ parameters.rocmTestDependencies }} - gpuTarget: ${{ job.target }} - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml - parameters: - componentName: roctracer - testExecutable: $(Agent.BuildDirectory)/rocm/share/roctracer/run_tests.sh - testParameters: '' - testDir: $(Agent.BuildDirectory) - testPublishResults: false - os: ${{ job.os }} - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml - parameters: - aptPackages: ${{ parameters.aptPackages }} - pipModules: ${{ parameters.pipModules }} - environment: test - gpuTarget: ${{ job.target }} - registerROCmPackages: true +- ${{ if eq(parameters.unifiedBuild, False) }}: + - ${{ each job in parameters.jobMatrix.testJobs }}: + - job: ${{ parameters.componentName }}_test_${{ job.os }}_${{ job.target }} + dependsOn: ${{ parameters.componentName }}_build_${{ job.os }}_${{ job.target }} + condition: + and(succeeded(), + eq(variables['ENABLE_${{ upper(job.target) }}_TESTS'], 'true'), + not(containsValue(split(variables['DISABLED_${{ upper(job.target) }}_TESTS'], ','), '${{ parameters.componentName }}')), + eq(${{ parameters.aggregatePipeline }}, False) + ) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: ${{ job.target }}_test_pool + workspace: + clean: all + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + packageManager: ${{ job.packageManager }} + registerROCmPackages: true + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + parameters: + preTargetFilter: ${{ parameters.componentName }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + checkoutRef: ${{ parameters.checkoutRef }} + dependencyList: ${{ parameters.rocmTestDependencies }} + gpuTarget: ${{ job.target }} + os: ${{ job.os }} + ${{ if parameters.triggerDownstreamJobs }}: + downstreamAggregateNames: ${{ parameters.downstreamAggregateNames }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: ${{ parameters.componentName }} + testExecutable: $(Agent.BuildDirectory)/rocm/share/roctracer/run_tests.sh + testParameters: '' + testDir: $(Agent.BuildDirectory) + testPublishResults: false + os: ${{ job.os }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/docker-container.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + environment: test + gpuTarget: ${{ job.target }} + registerROCmPackages: true diff --git a/.azuredevops/dependencies/catch2.yml b/.azuredevops/dependencies/catch2.yml new file mode 100644 index 000000000..aaf1d41be --- /dev/null +++ b/.azuredevops/dependencies/catch2.yml @@ -0,0 +1,63 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: catch2Version + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: catch2_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - task: Bash@3 + displayName: Clone catch2 ${{ parameters.catch2Version }} + inputs: + targetType: inline + script: git clone https://github.com/catchorg/Catch2.git -b ${{ parameters.catch2Version }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/Catch2/build + cmakeSourceDir: $(Agent.BuildDirectory)/Catch2 + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_BUILD_TYPE=Release + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/dependencies/fmtlib.yml b/.azuredevops/dependencies/fmtlib.yml new file mode 100644 index 000000000..c1ee707c4 --- /dev/null +++ b/.azuredevops/dependencies/fmtlib.yml @@ -0,0 +1,67 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: fmtlibVersion + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + - libfmt-dev + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: fmtlib_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - task: Bash@3 + displayName: Clone fmtlib ${{ parameters.fmtlibVersion }} + inputs: + targetType: inline + script: git clone https://github.com/fmtlib/fmt.git -b ${{ parameters.fmtlibVersion }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/fmt/build + cmakeSourceDir: $(Agent.BuildDirectory)/fmt + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_BUILD_TYPE=Release + -DFMT_SYSTEM_HEADERS=ON + -DFMT_INSTALL=ON + -DFMT_TEST=OFF + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/dependencies/libdivide.yml b/.azuredevops/dependencies/libdivide.yml new file mode 100644 index 000000000..e20a1ccea --- /dev/null +++ b/.azuredevops/dependencies/libdivide.yml @@ -0,0 +1,64 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: libdivideVersion + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: libdivide_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - task: Bash@3 + displayName: Clone libdivide ${{ parameters.libdivideVersion }} + inputs: + targetType: inline + script: git clone https://github.com/ridiculousfish/libdivide.git -b ${{ parameters.libdivideVersion }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/libdivide/build + cmakeSourceDir: $(Agent.BuildDirectory)/libdivide + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_BUILD_TYPE=Release + -DLIBDIVIDE_BUILD_TESTS=OFF + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/dependencies/spdlog.yml b/.azuredevops/dependencies/spdlog.yml new file mode 100644 index 000000000..f561f8a52 --- /dev/null +++ b/.azuredevops/dependencies/spdlog.yml @@ -0,0 +1,71 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: spdlogVersion + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - git + - ninja-build + +- name: jobMatrix + type: object + default: + buildJobs: + - { os: ubuntu2204, packageManager: apt} + - { os: almalinux8, packageManager: dnf} + +jobs: +- ${{ each job in parameters.jobMatrix.buildJobs }}: + - job: spdlog_${{ job.os }} + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: 'ubuntu-22.04' + ${{ if eq(job.os, 'almalinux8') }}: + container: + image: rocmexternalcicd.azurecr.io/manylinux228:latest + endpoint: ContainerService3 + workspace: + clean: all + steps: + - checkout: none + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + packageManager: ${{ job.packageManager }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-vendor.yml + parameters: + dependencyList: + - fmtlib + - task: Bash@3 + displayName: Clone spdlog ${{ parameters.spdlogVersion }} + inputs: + targetType: inline + script: git clone https://github.com/gabime/spdlog.git -b ${{ parameters.spdlogVersion }} + workingDirectory: $(Agent.BuildDirectory) + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + os: ${{ job.os }} + cmakeBuildDir: $(Agent.BuildDirectory)/spdlog/build + cmakeSourceDir: $(Agent.BuildDirectory)/spdlog + useAmdclang: false + extraBuildFlags: >- + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/vendor + -DCMAKE_BUILD_TYPE=Release + -DSPDLOG_USE_STD_FORMAT=OFF + -DSPDLOG_FMT_EXTERNAL_HO=ON + -DSPDLOG_INSTALL=ON + -GNinja + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + os: ${{ job.os }} diff --git a/.azuredevops/tag-builds/catch2.yml b/.azuredevops/tag-builds/catch2.yml new file mode 100644 index 000000000..ded20ab86 --- /dev/null +++ b/.azuredevops/tag-builds/catch2.yml @@ -0,0 +1,23 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: catch2Version + type: string + default: "v3.7.0" + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/catch2.yml + parameters: + catch2Version: ${{ parameters.catch2Version }} diff --git a/.azuredevops/tag-builds/fmtlib.yml b/.azuredevops/tag-builds/fmtlib.yml new file mode 100644 index 000000000..37d807b67 --- /dev/null +++ b/.azuredevops/tag-builds/fmtlib.yml @@ -0,0 +1,23 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: fmtlibVersion + type: string + default: "11.1.3" + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/fmtlib.yml + parameters: + fmtlibVersion: ${{ parameters.fmtlibVersion }} diff --git a/.azuredevops/tag-builds/libdivide.yml b/.azuredevops/tag-builds/libdivide.yml new file mode 100644 index 000000000..7ae199743 --- /dev/null +++ b/.azuredevops/tag-builds/libdivide.yml @@ -0,0 +1,23 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: libdivideVersion + type: string + default: master + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/libdivide.yml + parameters: + libdivideVersion: ${{ parameters.libdivideVersion }} diff --git a/.azuredevops/tag-builds/spdlog.yml b/.azuredevops/tag-builds/spdlog.yml new file mode 100644 index 000000000..300079340 --- /dev/null +++ b/.azuredevops/tag-builds/spdlog.yml @@ -0,0 +1,23 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: spdlogVersion + type: string + default: "v1.15.1" + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_DEPENDENCIES_PATH }}/spdlog.yml + parameters: + spdlogVersion: ${{ parameters.spdlogVersion }} diff --git a/.azuredevops/templates/steps/checkout.yml b/.azuredevops/templates/steps/checkout.yml index f021cbc40..4c5d58f56 100644 --- a/.azuredevops/templates/steps/checkout.yml +++ b/.azuredevops/templates/steps/checkout.yml @@ -20,7 +20,7 @@ steps: retryCountOnTaskFailure: 3 fetchFilter: blob:none ${{ if ne(parameters.sparseCheckoutDir, '') }}: - sparseCheckoutDirectories: ${{ parameters.sparseCheckoutDir }} + sparseCheckoutDirectories: ${{ parameters.sparseCheckoutDir }} shared path: sparse - ${{ if ne(parameters.sparseCheckoutDir, '') }}: - task: Bash@3 diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index 86d1b58e9..5fbb57bb5 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -63,8 +63,8 @@ parameters: developBranch: develop hasGpuTarget: false hip-tests: - pipelineId: 233 - developBranch: amd-staging + pipelineId: 362 + developBranch: develop hasGpuTarget: false hipBLAS: pipelineId: 317 @@ -171,16 +171,16 @@ parameters: developBranch: develop hasGpuTarget: false rocm-core: - pipelineId: 103 - developBranch: master + pipelineId: 349 + developBranch: develop hasGpuTarget: false rocm-examples: pipelineId: 216 developBranch: amd-staging hasGpuTarget: true rocminfo: - pipelineId: 91 - developBranch: amd-staging + pipelineId: 356 + developBranch: develop hasGpuTarget: false rocMLIR: pipelineId: 229 @@ -195,8 +195,8 @@ parameters: developBranch: master hasGpuTarget: false rocm_smi_lib: - pipelineId: 96 - developBranch: amd-staging + pipelineId: 358 + developBranch: develop hasGpuTarget: false rocPRIM: pipelineId: 273 @@ -207,7 +207,7 @@ parameters: developBranch: develop hasGpuTarget: true rocprofiler-compute: - pipelineId: 257 + pipelineId: 344 developBranch: develop hasGpuTarget: true rocprofiler-register: @@ -215,8 +215,8 @@ parameters: developBranch: develop hasGpuTarget: false rocprofiler-sdk: - pipelineId: 246 - developBranch: amd-staging + pipelineId: 347 + developBranch: develop hasGpuTarget: true rocprofiler-systems: pipelineId: 255 @@ -227,8 +227,8 @@ parameters: developBranch: develop hasGpuTarget: true ROCR-Runtime: - pipelineId: 10 - developBranch: amd-staging + pipelineId: 354 + developBranch: develop hasGpuTarget: false rocRAND: pipelineId: 274 @@ -251,8 +251,8 @@ parameters: developBranch: develop hasGpuTarget: true roctracer: - pipelineId: 141 - developBranch: amd-staging + pipelineId: 331 + developBranch: develop hasGpuTarget: true rocWMMA: pipelineId: 109 diff --git a/.azuredevops/templates/steps/dependencies-vendor.yml b/.azuredevops/templates/steps/dependencies-vendor.yml index 8d885b553..10086e38e 100644 --- a/.azuredevops/templates/steps/dependencies-vendor.yml +++ b/.azuredevops/templates/steps/dependencies-vendor.yml @@ -8,10 +8,14 @@ parameters: type: object default: boost: 250 + catch2: 343 + fmtlib: 341 grpc: 72 gtest: 73 half560: 68 lapack: 69 + libdivide: 342 + spdlog: 340 steps: - ${{ each dependency in parameters.dependencyList }}: @@ -29,7 +33,7 @@ steps: inputs: archiveFilePatterns: '$(Pipeline.Workspace)/d/**/*.tar.gz' destinationFolder: $(Agent.BuildDirectory)/vendor - cleanDestinationFolder: true + cleanDestinationFolder: false overwriteExistingFiles: true - task: DeleteFiles@1 displayName: Clean up ${{ dependency }} diff --git a/.wordlist.txt b/.wordlist.txt index b89c32061..cf9f990d4 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -294,6 +294,7 @@ Multicore Multithreaded MyEnvironment MyST +NANOO NBIO NBIOs NCCL @@ -672,6 +673,7 @@ github globals gnupg grayscale +gx gzip heterogenous hipBLAS @@ -744,6 +746,7 @@ logits lossy macOS matchers +maxtext megatron microarchitecture migraphx @@ -781,6 +784,7 @@ parallelizing param parameterization passthrough +pe perfcounter performant perl @@ -810,6 +814,7 @@ profiler profilers protobuf pseudorandom +px py pytorch recommender @@ -920,6 +925,7 @@ toolchain toolchains toolset toolsets +torchtitan torchvision tqdm tracebacks diff --git a/RELEASE.md b/RELEASE.md index be1527030..9d8835de8 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -57,9 +57,8 @@ ROCm documentation continues to be updated to provide clearer and more comprehen For more information about the changes, see [Changelog for the AI Developer Hub](https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/changelog.html). -* ROCm provides a comprehensive ecosystem for deep learning development. For more details, see [Deep learning frameworks for ROCm](https://rocm.docs.amd.com/en/docs-6.4.3/how-to/deep-learning-rocm.html). AMD ROCm adds support for the following deep learning frameworks: +* ROCm provides a comprehensive ecosystem for deep learning development. For more details, see [Deep learning frameworks for ROCm](https://rocm.docs.amd.com/en/docs-6.4.3/how-to/deep-learning-rocm.html). AMD ROCm adds support for the following deep learning framework: - * Taichi is an open-source, imperative, and parallel programming language designed for high-performance numerical computation. Embedded in Python, it leverages just-in-time (JIT) compilation frameworks such as LLVM to accelerate compute-intensive Python code by compiling it to native GPU or CPU instructions. It is currently supported on ROCm 6.3.2. For more information, see [Taichi compatibility](https://rocm.docs.amd.com/en/docs-6.4.3/compatibility/ml-compatibility/taichi-compatibility.html). * Megablocks is a light-weight library for mixture-of-experts (MoE) training. The core of the system is efficient "dropless-MoE" and standard MoE layers. Megablocks is integrated with Megatron-LM, where data and pipeline parallel training of MoEs is supported. It is currently supported on ROCm 6.3.0. For more information, see [Megablocks compatibility](https://rocm.docs.amd.com/en/docs-6.4.3/compatibility/ml-compatibility/megablocks-compatibility.html). * The [Data types and precision support](https://rocm.docs.amd.com/en/latest/reference/precision-support.html) topic now includes new hardware and library support information. diff --git a/docs/conf.py b/docs/conf.py index cf158903b..f852b6697 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -126,11 +126,15 @@ article_pages = [ {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.5", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.6", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-primus-migration-guide", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/primus-megatron", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/pytorch-training", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.3", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.4", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4", "os": ["linux"]}, diff --git a/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml new file mode 100644 index 000000000..418415319 --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml @@ -0,0 +1,91 @@ +vllm_benchmark: + unified_docker: + latest: + pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.0_20250812 + docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.0_20250812/images/sha256-4c277ad39af3a8c9feac9b30bf78d439c74d9b4728e788a419d3f1d0c30cacaa + rocm_version: 6.4.1 + vllm_version: 0.10.0 (0.10.1.dev395+g340ea86df.rocm641) + pytorch_version: 2.7.0+gitf717b2a + hipblaslt_version: 0.15 + model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.1 8B + mad_tag: pyt_vllm_llama-3.1-8b + model_repo: meta-llama/Llama-3.1-8B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: float16 + - model: Llama 3.1 70B + mad_tag: pyt_vllm_llama-3.1-70b + model_repo: meta-llama/Llama-3.1-70B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct + precision: float16 + - model: Llama 3.1 405B + mad_tag: pyt_vllm_llama-3.1-405b + model_repo: meta-llama/Llama-3.1-405B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct + precision: float16 + - model: Llama 2 70B + mad_tag: pyt_vllm_llama-2-70b + model_repo: meta-llama/Llama-2-70b-chat-hf + url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf + precision: float16 + - model: Llama 3.1 8B FP8 + mad_tag: pyt_vllm_llama-3.1-8b_fp8 + model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV + precision: float8 + - model: Llama 3.1 70B FP8 + mad_tag: pyt_vllm_llama-3.1-70b_fp8 + model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV + precision: float8 + - model: Llama 3.1 405B FP8 + mad_tag: pyt_vllm_llama-3.1-405b_fp8 + model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV + precision: float8 + - group: Mistral AI + tag: mistral + models: + - model: Mixtral MoE 8x7B + mad_tag: pyt_vllm_mixtral-8x7b + model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 + precision: float16 + - model: Mixtral MoE 8x22B + mad_tag: pyt_vllm_mixtral-8x22b + model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 + precision: float16 + - model: Mixtral MoE 8x7B FP8 + mad_tag: pyt_vllm_mixtral-8x7b_fp8 + model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + precision: float8 + - model: Mixtral MoE 8x22B FP8 + mad_tag: pyt_vllm_mixtral-8x22b_fp8 + model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + precision: float8 + - group: Qwen + tag: qwen + models: + - model: QwQ-32B + mad_tag: pyt_vllm_qwq-32b + model_repo: Qwen/QwQ-32B + url: https://huggingface.co/Qwen/QwQ-32B + precision: float16 + - model: Qwen3 30B A3B + mad_tag: pyt_vllm_qwen3-30b-a3b + model_repo: Qwen/Qwen3-30B-A3B + url: https://huggingface.co/Qwen/Qwen3-30B-A3B + precision: float16 + - group: Microsoft Phi + tag: phi + models: + - model: Phi-4 + mad_tag: pyt_vllm_phi-4 + model_repo: microsoft/phi-4 + url: https://huggingface.co/microsoft/phi-4 diff --git a/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml b/docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml similarity index 100% rename from docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml rename to docs/data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml diff --git a/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml index cc832dffb..8f80424d3 100644 --- a/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml @@ -1,17 +1,16 @@ -sglang_benchmark: - unified_docker: - latest: - pull_tag: lmsysorg/sglang:v0.4.5-rocm630 - docker_hub_url: https://hub.docker.com/layers/lmsysorg/sglang/v0.4.5-rocm630/images/sha256-63d2cb760a237125daf6612464cfe2f395c0784e21e8b0ea37d551cd10d3c951 - rocm_version: 6.3.0 - sglang_version: 0.4.5 (0.4.5-rocm) - pytorch_version: 2.6.0a0+git8d4926e - model_groups: - - group: DeepSeek - tag: deepseek - models: - - model: DeepSeek-R1-Distill-Qwen-32B - mad_tag: pyt_sglang_deepseek-r1-distill-qwen-32b - model_repo: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B - url: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B - precision: bfloat16 +dockers: + - pull_tag: lmsysorg/sglang:v0.4.5-rocm630 + docker_hub_url: https://hub.docker.com/layers/lmsysorg/sglang/v0.4.5-rocm630/images/sha256-63d2cb760a237125daf6612464cfe2f395c0784e21e8b0ea37d551cd10d3c951 + components: + ROCm: 6.3.0 + SGLang: 0.4.5 (0.4.5-rocm) + PyTorch: 2.6.0a0+git8d4926e +model_groups: + - group: DeepSeek + tag: deepseek + models: + - model: DeepSeek-R1-Distill-Qwen-32B + mad_tag: pyt_sglang_deepseek-r1-distill-qwen-32b + model_repo: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B + url: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B + precision: bfloat16 diff --git a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml index 714534ef1..99d9b773b 100644 --- a/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml @@ -1,88 +1,188 @@ -vllm_benchmark: - unified_docker: - latest: - # TODO: update me - pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.0_20250812 - docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.0_20250812/images/sha256-4c277ad39af3a8c9feac9b30bf78d439c74d9b4728e788a419d3f1d0c30cacaa - rocm_version: 6.4.1 - vllm_version: 0.10.0 (0.10.1.dev395+g340ea86df.rocm641) - pytorch_version: 2.7.0+gitf717b2a (2.7.0+gitf717b2a) - hipblaslt_version: 0.15 - model_groups: - - group: Meta Llama - tag: llama - models: - - model: Llama 3.1 8B - mad_tag: pyt_vllm_llama-3.1-8b - model_repo: meta-llama/Llama-3.1-8B-Instruct - url: https://huggingface.co/meta-llama/Llama-3.1-8B - precision: float16 - - model: Llama 3.1 70B - mad_tag: pyt_vllm_llama-3.1-70b - model_repo: meta-llama/Llama-3.1-70B-Instruct - url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct - precision: float16 - - model: Llama 3.1 405B - mad_tag: pyt_vllm_llama-3.1-405b - model_repo: meta-llama/Llama-3.1-405B-Instruct - url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct - precision: float16 - - model: Llama 2 70B - mad_tag: pyt_vllm_llama-2-70b - model_repo: meta-llama/Llama-2-70b-chat-hf - url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf - precision: float16 - - model: Llama 3.1 8B FP8 - mad_tag: pyt_vllm_llama-3.1-8b_fp8 - model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV - url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV - precision: float8 - - model: Llama 3.1 70B FP8 - mad_tag: pyt_vllm_llama-3.1-70b_fp8 - model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV - url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV - precision: float8 - - model: Llama 3.1 405B FP8 - mad_tag: pyt_vllm_llama-3.1-405b_fp8 - model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV - url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV - precision: float8 - - group: Mistral AI - tag: mistral - models: - - model: Mixtral MoE 8x7B - mad_tag: pyt_vllm_mixtral-8x7b - model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1 - url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 - precision: float16 - - model: Mixtral MoE 8x22B - mad_tag: pyt_vllm_mixtral-8x22b - model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 - url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 - precision: float16 - - model: Mixtral MoE 8x7B FP8 - mad_tag: pyt_vllm_mixtral-8x7b_fp8 - model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV - url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV - precision: float8 - - model: Mixtral MoE 8x22B FP8 - mad_tag: pyt_vllm_mixtral-8x22b_fp8 - model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV - url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV - precision: float8 - - group: Qwen - tag: qwen - models: - - model: QwQ-32B - mad_tag: pyt_vllm_qwq-32b - model_repo: Qwen/QwQ-32B - url: https://huggingface.co/Qwen/QwQ-32B - precision: float16 - tunableop: true - - group: Microsoft Phi - tag: phi - models: - - model: Phi-4 - mad_tag: pyt_vllm_phi-4 - model_repo: microsoft/phi-4 - url: https://huggingface.co/microsoft/phi-4 +dockers: + - pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.1_20250909 + docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.1_20250909/images/sha256-1113268572e26d59b205792047bea0e61e018e79aeadceba118b7bf23cb3715c + components: + ROCm: 6.4.1 + vLLM: 0.10.1 (0.10.1rc2.dev409+g0b6bf6691.rocm641) + PyTorch: 2.7.0+gitf717b2a + hipBLASLt: 0.15 +model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.1 8B + mad_tag: pyt_vllm_llama-3.1-8b + model_repo: meta-llama/Llama-3.1-8B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: float16 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 70B + mad_tag: pyt_vllm_llama-3.1-70b + model_repo: meta-llama/Llama-3.1-70B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 405B + mad_tag: pyt_vllm_llama-3.1-405b + model_repo: meta-llama/Llama-3.1-405B-Instruct + url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 2 70B + mad_tag: pyt_vllm_llama-2-70b + model_repo: meta-llama/Llama-2-70b-chat-hf + url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 4096 + max_num_batched_tokens: 4096 + max_model_len: 4096 + - model: Llama 3.1 8B FP8 + mad_tag: pyt_vllm_llama-3.1-8b_fp8 + model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV + precision: float8 + config: + tp: 1 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 70B FP8 + mad_tag: pyt_vllm_llama-3.1-70b_fp8 + model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Llama 3.1 405B FP8 + mad_tag: pyt_vllm_llama-3.1-405b_fp8 + model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV + url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - group: Mistral AI + tag: mistral + models: + - model: Mixtral MoE 8x7B + mad_tag: pyt_vllm_mixtral-8x7b + model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 32768 + max_num_batched_tokens: 32768 + max_model_len: 8192 + - model: Mixtral MoE 8x22B + mad_tag: pyt_vllm_mixtral-8x22b + model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1 + url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1 + precision: float16 + config: + tp: 8 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 65536 + max_num_batched_tokens: 65536 + max_model_len: 8192 + - model: Mixtral MoE 8x7B FP8 + mad_tag: pyt_vllm_mixtral-8x7b_fp8 + model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 32768 + max_num_batched_tokens: 32768 + max_model_len: 8192 + - model: Mixtral MoE 8x22B FP8 + mad_tag: pyt_vllm_mixtral-8x22b_fp8 + model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV + precision: float8 + config: + tp: 8 + dtype: auto + kv_cache_dtype: fp8 + max_seq_len_to_capture: 65536 + max_num_batched_tokens: 65536 + max_model_len: 8192 + - group: Qwen + tag: qwen + models: + - model: QwQ-32B + mad_tag: pyt_vllm_qwq-32b + model_repo: Qwen/QwQ-32B + url: https://huggingface.co/Qwen/QwQ-32B + precision: float16 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 131072 + max_num_batched_tokens: 131072 + max_model_len: 8192 + - model: Qwen3 30B A3B + mad_tag: pyt_vllm_qwen3-30b-a3b + model_repo: Qwen/Qwen3-30B-A3B + url: https://huggingface.co/Qwen/Qwen3-30B-A3B + precision: float16 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 32768 + max_num_batched_tokens: 32768 + max_model_len: 8192 + - group: Microsoft Phi + tag: phi + models: + - model: Phi-4 + mad_tag: pyt_vllm_phi-4 + model_repo: microsoft/phi-4 + url: https://huggingface.co/microsoft/phi-4 + config: + tp: 1 + dtype: auto + kv_cache_dtype: auto + max_seq_len_to_capture: 16384 + max_num_batched_tokens: 16384 + max_model_len: 8192 diff --git a/docs/data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml new file mode 100644 index 000000000..5ca21898c --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml @@ -0,0 +1,72 @@ +dockers: + - pull_tag: rocm/jax-training:maxtext-v25.7 + docker_hub_url: https://hub.docker.com/layers/rocm/jax-training/maxtext-v25.7/images/sha256-45f4c727d4019a63fc47313d3a5f5a5105569539294ddfd2d742218212ae9025 + components: + ROCm: 6.4.1 + JAX: 0.5.0 + Python: 3.10.12 + Transformer Engine: 2.1.0+90d703dd + hipBLASLt: 1.x.x + - pull_tag: rocm/jax-training:maxtext-v25.7-jax060 + docker_hub_url: https://hub.docker.com/layers/rocm/jax-training/maxtext-v25.7/images/sha256-45f4c727d4019a63fc47313d3a5f5a5105569539294ddfd2d742218212ae9025 + components: + ROCm: 6.4.1 + JAX: 0.6.0 + Python: 3.10.12 + Transformer Engine: 2.1.0+90d703dd + hipBLASLt: 1.1.0-499ece1c21 +model_groups: + - group: Meta Llama + tag: llama + models: + - model: Llama 3.3 70B + mad_tag: jax_maxtext_train_llama-3.3-70b + model_repo: Llama-3.3-70B + precision: bf16 + doc_options: ["single-node"] + - model: Llama 3.1 8B + mad_tag: jax_maxtext_train_llama-3.1-8b + model_repo: Llama-3.1-8B + precision: bf16 + doc_options: ["single-node"] + - model: Llama 3.1 70B + mad_tag: jax_maxtext_train_llama-3.1-70b + model_repo: Llama-3.1-70B + precision: bf16 + doc_options: ["single-node"] + - model: Llama 3 8B + mad_tag: jax_maxtext_train_llama-3-8b + multinode_training_script: llama3_8b_multinode.sh + doc_options: ["multi-node"] + - model: Llama 3 70B + mad_tag: jax_maxtext_train_llama-3-70b + multinode_training_script: llama3_70b_multinode.sh + doc_options: ["multi-node"] + - model: Llama 2 7B + mad_tag: jax_maxtext_train_llama-2-7b + model_repo: Llama-2-7B + precision: bf16 + multinode_training_script: llama2_7b_multinode.sh + doc_options: ["single-node", "multi-node"] + - model: Llama 2 70B + mad_tag: jax_maxtext_train_llama-2-70b + model_repo: Llama-2-70B + precision: bf16 + multinode_training_script: llama2_70b_multinode.sh + doc_options: ["single-node", "multi-node"] + - group: DeepSeek + tag: deepseek + models: + - model: DeepSeek-V2-Lite (16B) + mad_tag: jax_maxtext_train_deepseek-v2-lite-16b + model_repo: DeepSeek-V2-lite + precision: bf16 + doc_options: ["single-node"] + - group: Mistral AI + tag: mistral + models: + - model: Mixtral 8x7B + mad_tag: jax_maxtext_train_mixtral-8x7b + model_repo: Mixtral-8x7B + precision: bf16 + doc_options: ["single-node"] diff --git a/docs/data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml new file mode 100644 index 000000000..df0a198d5 --- /dev/null +++ b/docs/data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml @@ -0,0 +1,120 @@ +unified_docker: + latest: + pull_tag: rocm/pytorch-training:v25.6 + docker_hub_url: https://hub.docker.com/r/rocm/pytorch-training/tags + rocm_version: 6.4.1 + pytorch_version: 2.8.0a0+git7d205b2 + python_version: 3.10.17 + transformer_engine_version: 1.14.0+2f85f5f2 + flash_attention_version: 3.0.0.post1 + hipblaslt_version: 0.15.0-8c6919d + triton_version: 3.3.0 +model_groups: + - group: Pre-training + tag: pre-training + models: + - model: Llama 3.1 8B + mad_tag: pyt_train_llama-3.1-8b + model_repo: Llama-3.1-8B + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: BF16 + training_modes: [pretrain] + - model: Llama 3.1 70B + mad_tag: pyt_train_llama-3.1-70b + model_repo: Llama-3.1-70B + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct + precision: BF16 + training_modes: [pretrain] + - model: FLUX.1-dev + mad_tag: pyt_train_flux + model_repo: Flux + url: https://huggingface.co/black-forest-labs/FLUX.1-dev + precision: BF16 + training_modes: [pretrain] + - group: Fine-tuning + tag: fine-tuning + models: + - model: Llama 4 Scout 17B-16E + mad_tag: pyt_train_llama-4-scout-17b-16e + model_repo: Llama-4-17B_16E + url: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.3 70B + mad_tag: pyt_train_llama-3.3-70b + model_repo: Llama-3.3-70B + url: https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct + precision: BF16 + training_modes: [finetune_fw, finetune_lora, finetune_qlora] + - model: Llama 3.2 1B + mad_tag: pyt_train_llama-3.2-1b + model_repo: Llama-3.2-1B + url: https://huggingface.co/meta-llama/Llama-3.2-1B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.2 3B + mad_tag: pyt_train_llama-3.2-3b + model_repo: Llama-3.2-3B + url: https://huggingface.co/meta-llama/Llama-3.2-3B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.2 Vision 11B + mad_tag: pyt_train_llama-3.2-vision-11b + model_repo: Llama-3.2-Vision-11B + url: https://huggingface.co/meta-llama/Llama-3.2-11B-Vision + precision: BF16 + training_modes: [finetune_fw] + - model: Llama 3.2 Vision 90B + mad_tag: pyt_train_llama-3.2-vision-90b + model_repo: Llama-3.2-Vision-90B + url: https://huggingface.co/meta-llama/Llama-3.2-90B-Vision + precision: BF16 + training_modes: [finetune_fw] + - model: Llama 3.1 8B + mad_tag: pyt_train_llama-3.1-8b + model_repo: Llama-3.1-8B + url: https://huggingface.co/meta-llama/Llama-3.1-8B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3.1 70B + mad_tag: pyt_train_llama-3.1-70b + model_repo: Llama-3.1-70B + url: https://huggingface.co/meta-llama/Llama-3.1-70B + precision: BF16 + training_modes: [finetune_fw, finetune_lora, finetune_qlora] + - model: Llama 3.1 405B + mad_tag: pyt_train_llama-3.1-405b + model_repo: Llama-3.1-405B + url: https://huggingface.co/meta-llama/Llama-3.1-405B + precision: BF16 + training_modes: [finetune_qlora, HF_finetune_lora] + - model: Llama 3 8B + mad_tag: pyt_train_llama-3-8b + model_repo: Llama-3-8B + url: https://huggingface.co/meta-llama/Meta-Llama-3-8B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 3 70B + mad_tag: pyt_train_llama-3-70b + model_repo: Llama-3-70B + url: https://huggingface.co/meta-llama/Meta-Llama-3-70B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 2 7B + mad_tag: pyt_train_llama-2-7b + model_repo: Llama-2-7B + url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 + precision: BF16 + training_modes: [finetune_fw, finetune_lora, finetune_qlora] + - model: Llama 2 13B + mad_tag: pyt_train_llama-2-13b + model_repo: Llama-2-13B + url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Llama 2 70B + mad_tag: pyt_train_llama-2-70b + model_repo: Llama-2-70B + url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 + precision: BF16 + training_modes: [finetune_lora, finetune_qlora, HF_finetune_lora] diff --git a/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml b/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml index df0a198d5..dc19843be 100644 --- a/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml +++ b/docs/data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml @@ -1,38 +1,17 @@ -unified_docker: - latest: - pull_tag: rocm/pytorch-training:v25.6 - docker_hub_url: https://hub.docker.com/r/rocm/pytorch-training/tags - rocm_version: 6.4.1 - pytorch_version: 2.8.0a0+git7d205b2 - python_version: 3.10.17 - transformer_engine_version: 1.14.0+2f85f5f2 - flash_attention_version: 3.0.0.post1 - hipblaslt_version: 0.15.0-8c6919d - triton_version: 3.3.0 +dockers: + - pull_tag: rocm/pytorch-training:v25.7 + docker_hub_url: https://hub.docker.com/layers/rocm/pytorch-training/v25.7/images/sha256-cc6fd840ab89cb81d926fc29eca6d075aee9875a55a522675a4b9231c9a0a712 + components: + ROCm: 6.4.2 + PyTorch: 2.8.0a0+gitd06a406 + Python: 3.10.18 + Transformer Engine: 2.2.0.dev0+94e53dd8 + Flash Attention: 3.0.0.post1 + hipBLASLt: 1.1.0-4b9a52edfc + Triton: 3.3.0 model_groups: - - group: Pre-training - tag: pre-training - models: - - model: Llama 3.1 8B - mad_tag: pyt_train_llama-3.1-8b - model_repo: Llama-3.1-8B - url: https://huggingface.co/meta-llama/Llama-3.1-8B - precision: BF16 - training_modes: [pretrain] - - model: Llama 3.1 70B - mad_tag: pyt_train_llama-3.1-70b - model_repo: Llama-3.1-70B - url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct - precision: BF16 - training_modes: [pretrain] - - model: FLUX.1-dev - mad_tag: pyt_train_flux - model_repo: Flux - url: https://huggingface.co/black-forest-labs/FLUX.1-dev - precision: BF16 - training_modes: [pretrain] - - group: Fine-tuning - tag: fine-tuning + - group: Meta Llama + tag: llama models: - model: Llama 4 Scout 17B-16E mad_tag: pyt_train_llama-4-scout-17b-16e @@ -75,19 +54,19 @@ model_groups: model_repo: Llama-3.1-8B url: https://huggingface.co/meta-llama/Llama-3.1-8B precision: BF16 - training_modes: [finetune_fw, finetune_lora] + training_modes: [pretrain, finetune_fw, finetune_lora, HF_pretrain] - model: Llama 3.1 70B mad_tag: pyt_train_llama-3.1-70b model_repo: Llama-3.1-70B - url: https://huggingface.co/meta-llama/Llama-3.1-70B + url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct precision: BF16 - training_modes: [finetune_fw, finetune_lora, finetune_qlora] + training_modes: [pretrain, finetune_fw, finetune_lora] - model: Llama 3.1 405B mad_tag: pyt_train_llama-3.1-405b model_repo: Llama-3.1-405B url: https://huggingface.co/meta-llama/Llama-3.1-405B precision: BF16 - training_modes: [finetune_qlora, HF_finetune_lora] + training_modes: [finetune_qlora] - model: Llama 3 8B mad_tag: pyt_train_llama-3-8b model_repo: Llama-3-8B @@ -117,4 +96,67 @@ model_groups: model_repo: Llama-2-70B url: https://github.com/meta-llama/llama-models/tree/main/models/llama2 precision: BF16 - training_modes: [finetune_lora, finetune_qlora, HF_finetune_lora] + training_modes: [finetune_lora, finetune_qlora] + - group: OpenAI + tag: openai + models: + - model: GPT OSS 20B + mad_tag: pyt_train_gpt_oss_20b + model_repo: GPT-OSS-20B + url: https://huggingface.co/openai/gpt-oss-20b + precision: BF16 + training_modes: [HF_finetune_lora] + - model: GPT OSS 120B + mad_tag: pyt_train_gpt_oss_120b + model_repo: GPT-OSS-120B + url: https://huggingface.co/openai/gpt-oss-120b + precision: BF16 + training_modes: [HF_finetune_lora] + - group: Qwen + tag: qwen + models: + - model: Qwen 3 8B + mad_tag: pyt_train_qwen3-8b + model_repo: Qwen3-8B + url: https://huggingface.co/Qwen/Qwen3-8B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Qwen 3 32B + mad_tag: pyt_train_qwen3-32b + model_repo: Qwen3-32 + url: https://huggingface.co/Qwen/Qwen3-32B + precision: BF16 + training_modes: [finetune_lora] + - model: Qwen 2.5 32B + mad_tag: pyt_train_qwen2.5-32b + model_repo: Qwen2.5-32B + url: https://huggingface.co/Qwen/Qwen2.5-32B + precision: BF16 + training_modes: [finetune_lora] + - model: Qwen 2.5 72B + mad_tag: pyt_train_qwen2.5-72b + model_repo: Qwen2.5-72B + url: https://huggingface.co/Qwen/Qwen2.5-72B + precision: BF16 + training_modes: [finetune_lora] + - model: Qwen 2 1.5B + mad_tag: pyt_train_qwen2-1.5b + model_repo: Qwen2-1.5B + url: https://huggingface.co/Qwen/Qwen2-1.5B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - model: Qwen 2 7B + mad_tag: pyt_train_qwen2-7b + model_repo: Qwen2-7B + url: https://huggingface.co/Qwen/Qwen2-7B + precision: BF16 + training_modes: [finetune_fw, finetune_lora] + - group: Flux + tag: flux + models: + - model: FLUX.1-dev + mad_tag: pyt_train_flux + model_repo: Flux + url: https://huggingface.co/black-forest-labs/FLUX.1-dev + precision: BF16 + training_modes: [pretrain] diff --git a/docs/how-to/deep-learning-rocm.rst b/docs/how-to/deep-learning-rocm.rst index e7448a074..a242e8520 100644 --- a/docs/how-to/deep-learning-rocm.rst +++ b/docs/how-to/deep-learning-rocm.rst @@ -23,93 +23,114 @@ The table below summarizes information about ROCm-enabled deep learning framewor - Installation options - GitHub - * - `PyTorch `_ + * - `PyTorch `__ - .. raw:: html - + - - - `Docker image `_ - - `Wheels package `_ - - `ROCm Base Docker image `_ - - `Upstream Docker file `_ + - `Docker image `__ + - `Wheels package `__ + - `ROCm Base Docker image `__ + - `Upstream Docker file `__ - .. raw:: html - + - - * - `TensorFlow `_ + + * - `TensorFlow `__ - .. raw:: html - + - - - `Docker image `_ - - `Wheels package `_ + - `Docker image `__ + - `Wheels package `__ - .. raw:: html - + - * - `JAX `_ + * - `JAX `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - - * - `verl `_ + + * - `verl `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - * - `Stanford Megatron-LM `_ + * - `Stanford Megatron-LM `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - - * - `DGL `_ + + * - `DGL `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - * - `Megablocks `_ + * - `Megablocks `__ - .. raw:: html - + - - - `Docker image `_ + - `Docker image `__ - .. raw:: html - + - - * - `Taichi `_ + + * - `Taichi `__ - .. raw:: html - + - - - `Docker image `_ - - `Wheels package `_ + - `Docker image `__ + - `Wheels package `__ - .. raw:: html - - + + + * - `Ray `__ + - .. raw:: html + + + - + - `Docker image `__ + - `Wheels package `__ + - `ROCm Base Docker image `__ + - .. raw:: html + + + + * - `llama.cpp `__ + - .. raw:: html + + + - + - `Docker image `__ + - .. raw:: html + + * - `Ray `__ - .. raw:: html @@ -146,10 +167,3 @@ through the following guides. * :doc:`Use ROCm for AI inference optimization ` - - - - - - - diff --git a/docs/how-to/rocm-for-ai/inference-optimization/workload.rst b/docs/how-to/rocm-for-ai/inference-optimization/workload.rst index 0580e7434..bc9463f58 100644 --- a/docs/how-to/rocm-for-ai/inference-optimization/workload.rst +++ b/docs/how-to/rocm-for-ai/inference-optimization/workload.rst @@ -939,7 +939,7 @@ hipBLASLt benchmarking The GEMM library `hipBLASLt `_ provides a benchmark tool for its supported operations. Refer to the -`documentation `_ +`documentation `_ for details. * Example 1: Benchmark mix fp8 GEMM diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812.rst new file mode 100644 index 000000000..68d7f66e7 --- /dev/null +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812.rst @@ -0,0 +1,445 @@ +:orphan: + +.. meta:: + :description: Learn how to validate LLM inference performance on MI300X accelerators using AMD MAD and the + ROCm vLLM Docker image. + :keywords: model, MAD, automation, dashboarding, validate + +********************************** +vLLM inference performance testing +********************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm vLLM + inference performance documentation. See :doc:`../vllm` for the latest version. + +.. _vllm-benchmark-unified-docker-812: + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + The `ROCm vLLM Docker <{{ unified_docker.docker_hub_url }}>`_ image offers + a prebuilt, optimized environment for validating large language model (LLM) + inference performance on AMD Instinct™ MI300X series accelerators. This ROCm vLLM + Docker image integrates vLLM and PyTorch tailored specifically for MI300X series + accelerators and includes the following components: + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + * - `ROCm `__ + - {{ unified_docker.rocm_version }} + + * - `vLLM `__ + - {{ unified_docker.vllm_version }} + + * - `PyTorch `__ + - {{ unified_docker.pytorch_version }} + + * - `hipBLASLt `__ + - {{ unified_docker.hipblaslt_version }} + +With this Docker image, you can quickly test the :ref:`expected +inference performance numbers ` for +MI300X series accelerators. + +What's new +========== + +The following is summary of notable changes since the :doc:`previous ROCm/vLLM Docker release `. + +* Upgraded to vLLM v0.10. + +* FP8 KV cache support via AITER. + +* Full graph capture support via AITER. + +Supported models +================ + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + .. _vllm-benchmark-available-models-812: + + The following models are supported for inference performance benchmarking + with vLLM and ROCm. Some instructions, commands, and recommendations in this + documentation might vary by model -- select one to get started. + + .. raw:: html + +
+
+
Model group
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
+ +
+
Model
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
+ + .. _vllm-benchmark-vllm-812: + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{model.mad_tag}} + + .. note:: + + See the `{{ model.model }} model card on Hugging Face <{{ model.url }}>`_ to learn more about your selected model. + Some models require access authorization prior to use via an external license agreement through a third party. + + {% endfor %} + {% endfor %} + +.. note:: + + vLLM is a toolkit and library for LLM inference and serving. AMD implements + high-performance custom kernels and modules in vLLM to enhance performance. + See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for + more information. + +.. _vllm-benchmark-performance-measurements-812: + +Performance measurements +======================== + +To evaluate performance, the +`Performance results with AMD ROCm software `_ +page provides reference throughput and serving measurements for inferencing popular AI models. + +.. important:: + + The performance data presented in + `Performance results with AMD ROCm software `_ + only reflects the latest version of this inference benchmarking environment. + The listed measurements should not be interpreted as the peak performance achievable by AMD Instinct MI325X and MI300X accelerators or ROCm software. + +System validation +================= + +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. + +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. + +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.10.0_20250812-benchmark-models.yaml + + {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} + {% set model_groups = data.vllm_benchmark.model_groups %} + + Pull the Docker image + ===================== + + Download the `ROCm vLLM Docker image <{{ unified_docker.docker_hub_url }}>`_. + Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + + Benchmarking + ============ + + Once the setup is complete, choose between two options to reproduce the + benchmark results: + + .. _vllm-benchmark-mad-812: + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{model.mad_tag}} + + .. tab-set:: + + .. tab-item:: MAD-integrated benchmarking + + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt + + 2. Use this command to run the performance benchmark test on the `{{model.model}} <{{ model.url }}>`_ model + using one GPU with the :literal:`{{model.precision}}` data type on the host machine. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{model.mad_tag}} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 + + MAD launches a Docker container with the name + ``container_ci-{{model.mad_tag}}``. The throughput and serving reports of the + model are collected in the following paths: ``{{ model.mad_tag }}_throughput.csv`` + and ``{{ model.mad_tag }}_serving.csv``. + + Although the :ref:`available models + ` are preconfigured to collect + offline throughput and online serving performance data, you can + also change the benchmarking parameters. See the standalone + benchmarking tab for more information. + + {% if model.tunableop %} + + .. note:: + + For improved performance, consider enabling :ref:`PyTorch TunableOp `. + TunableOp automatically explores different implementations and configurations of certain PyTorch + operators to find the fastest one for your hardware. + + By default, ``{{model.mad_tag}}`` runs with TunableOp disabled (see + ``__). To enable it, include + the ``--tunableop on`` argument in your run. + + Enabling TunableOp triggers a two-pass run -- a warm-up followed by the + performance-collection run. + + {% endif %} + + .. tab-item:: Standalone benchmarking + + .. rubric:: Download the Docker image and required scripts + + 1. Run the vLLM benchmark tool independently by starting the + `Docker container <{{ unified_docker.docker_hub_url }}>`_ + as shown in the following snippet. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + docker run -it \ + --device=/dev/kfd \ + --device=/dev/dri \ + --group-add video \ + --shm-size 16G \ + --security-opt seccomp=unconfined \ + --security-opt apparmor=unconfined \ + --cap-add=SYS_PTRACE \ + -v $(pwd):/workspace \ + --env HUGGINGFACE_HUB_CACHE=/workspace \ + --name test \ + {{ unified_docker.pull_tag }} + + 2. In the Docker container, clone the ROCm MAD repository and navigate to the + benchmark scripts directory at ``~/MAD/scripts/vllm``. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD/scripts/vllm + + 3. To start the benchmark, use the following command with the appropriate options. + + .. code-block:: + + ./run.sh \ + --config $CONFIG_CSV \ + --model_repo {{ model.model_repo }} \ + + + .. dropdown:: Benchmark options + :open: + + .. list-table:: + :header-rows: 1 + :align: center + + * - Name + - Options + - Description + + * - ``--config`` + - ``configs/default.csv`` + - Run configs from the CSV for the chosen model repo and benchmark. + + * - + - ``configs/extended.csv`` + - + + * - + - ``configs/performance.csv`` + - + + * - ``--benchmark`` + - ``throughput`` + - Measure offline end-to-end throughput. + + * - + - ``serving`` + - Measure online serving performance. + + * - + - ``all`` + - Measure both throughput and serving. + + * - `` + - See `run.sh `__ for more info. + - Additional overrides to the config CSV. + + The input sequence length, output sequence length, and tensor parallel (TP) are + already configured. You don't need to specify them with this script. + + .. note:: + + For best performance, it's recommended to run with ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1``. + + If you encounter the following error, pass your access-authorized Hugging + Face token to the gated models. + + .. code-block:: + + OSError: You are trying to access a gated repo. + + # pass your HF_TOKEN + export HF_TOKEN=$your_personal_hf_token + + .. rubric:: Benchmarking examples + + Here are some examples of running the benchmark with various options: + + * Throughput benchmark + + Use this command to benchmark the throughput of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. + + .. code-block:: shell + + export MAD_MODEL_NAME={{ model.mad_tag }} + ./run.sh \ + --config configs/default.csv \ + --model_repo {{model.model_repo}} \ + --benchmark throughput + + Find the throughput benchmark report at ``./{{ model.mad_tag }}_throughput.csv``. + + * Serving benchmark + + Use this command to benchmark the serving performance of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. + + .. code-block:: + + export MAD_MODEL_NAME={{ model.mad_tag }} + ./run.sh \ + --config configs/default.csv \ + --model_repo {{model.model_repo}} \ + --benchmark serving + + Find the serving benchmark report at ``./{{ model.mad_tag }}_serving.csv``. + + .. raw:: html + + + + .. note:: + + Throughput is calculated as: + + - .. math:: throughput\_tot = requests \times (\mathsf{\text{input lengths}} + \mathsf{\text{output lengths}}) / elapsed\_time + + - .. math:: throughput\_gen = requests \times \mathsf{\text{output lengths}} / elapsed\_time + {% endfor %} + {% endfor %} + +Advanced usage +============== + +For information on experimental features and known issues related to ROCm optimization efforts on vLLM, +see the developer's guide at ``__. + +Reproducing the Docker image +---------------------------- + +To reproduce this ROCm/vLLM Docker image release, follow these steps: + +1. Clone the `vLLM repository `__. + + .. code-block:: shell + + git clone https://github.com/ROCm/vllm.git + +2. Checkout the specific release commit. + + .. code-block:: shell + + cd vllm + git checkout 340ea86dfe5955d6f9a9e767d6abab5aacf2c978 + +3. Build the Docker image. Replace ``vllm-rocm`` with your desired image tag. + + .. code-block:: shell + + docker build -f docker/Dockerfile.rocm -t vllm-rocm . + +Further reading +=============== + +- To learn more about the options for latency and throughput benchmark scripts, + see ``_. + +- To learn more about MAD and the ``madengine`` CLI, see the `MAD usage guide `__. + +- To learn more about system settings and management practices to configure your system for + AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. + +- For application performance optimization strategies for HPC and AI workloads, + including inference with vLLM, see :doc:`/how-to/rocm-for-ai/inference-optimization/workload`. + +- To learn how to run community models from Hugging Face on AMD GPUs, see + :doc:`Running models from Hugging Face `. + +- To learn how to fine-tune LLMs and optimize inference, see + :doc:`Fine-tuning LLMs and inference optimization `. + +- For a list of other ready-made Docker images for AI with ROCm, see + `AMD Infinity Hub `_. + +Previous versions +================= + +See :doc:`vllm-history` to find documentation for previous releases +of the ``ROCm/vllm`` Docker image. diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst index 9e0f4443a..9f6d001ad 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715.rst @@ -16,7 +16,7 @@ vLLM inference performance testing .. _vllm-benchmark-unified-docker-715: -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} @@ -46,7 +46,7 @@ vLLM inference performance testing - {{ unified_docker.hipblaslt_version }} With this Docker image, you can quickly test the :ref:`expected -inference performance numbers ` for +inference performance numbers ` for MI300X series accelerators. What's new @@ -69,7 +69,7 @@ The following is summary of notable changes since the :doc:`previous ROCm/vLLM D Supported models ================ -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} @@ -162,7 +162,7 @@ To test for optimal performance, consult the recommended :ref:`System health ben `. This suite of tests will help you verify and fine-tune your system's configuration. -.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark_models.yaml +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/previous-versions/vllm_0.9.1_20250715-benchmark-models.yaml {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} {% set model_groups = data.vllm_benchmark.model_groups %} @@ -219,7 +219,7 @@ system's configuration. ``container_ci-{{model.mad_tag}}``. The latency and throughput reports of the model are collected in the following path: ``~/MAD/reports_{{model.precision}}/``. - Although the :ref:`available models ` are preconfigured + Although the :ref:`available models ` are preconfigured to collect latency and throughput performance data, you can also change the benchmarking parameters. See the standalone benchmarking tab for more information. diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst index 6f87670ec..857a1ee0b 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history.rst @@ -7,7 +7,7 @@ vLLM inference performance testing version history This table lists previous versions of the ROCm vLLM inference Docker image for inference performance testing. For detailed information about available models for benchmarking, see the version-specific documentation. You can find tagged -previous releases of the ``ROCm/vllm`` Docker image on `Docker Hub `__. +previous releases of the ``ROCm/vllm`` Docker image on `Docker Hub `__. .. list-table:: :header-rows: 1 diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst index b9e22bf33..ad8db53c4 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference.rst @@ -31,26 +31,30 @@ PyTorch inference performance testing .. raw:: html
-
-
Model
-
- {% for model_group in model_groups %} -
{{ model_group.group }}
- {% endfor %} -
-
- - + +
{% for model_group in model_groups %} diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst index 340ef975e..1722b2018 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/sglang.rst @@ -2,19 +2,19 @@ :description: Learn how to validate LLM inference performance on MI300X accelerators using AMD MAD and SGLang :keywords: model, MAD, automation, dashboarding, validate -************************************ -SGLang inference performance testing -************************************ +***************************************************************** +SGLang inference performance testing DeepSeek-R1-Distill-Qwen-32B +***************************************************************** .. _sglang-benchmark-unified-docker: .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml - {% set unified_docker = data.sglang_benchmark.unified_docker.latest %} + {% set docker = data.dockers[0] %} `SGLang `__ is a high-performance inference and serving engine for large language models (LLMs) and vision models. The - ROCm-enabled `SGLang Docker image <{{ unified_docker.docker_hub_url }}>`__ + ROCm-enabled `SGLang Docker image <{{ docker.docker_hub_url }}>`__ bundles SGLang with PyTorch, optimized for AMD Instinct MI300X series accelerators. It includes the following software components: @@ -24,14 +24,10 @@ SGLang inference performance testing * - Software component - Version - * - `ROCm `__ - - {{ unified_docker.rocm_version }} - - * - `SGLang `__ - - {{ unified_docker.sglang_version }} - - * - `PyTorch `__ - - {{ unified_docker.pytorch_version }} + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + {% endfor %} System validation ================= @@ -50,8 +46,8 @@ system's configuration. .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/sglang-benchmark-models.yaml - {% set unified_docker = data.sglang_benchmark.unified_docker.latest %} - {% set model_groups = data.sglang_benchmark.model_groups %} + {% set unified_docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} Pull the Docker image ===================== diff --git a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst index 02c992620..f2b060ebd 100644 --- a/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst +++ b/docs/how-to/rocm-for-ai/inference/benchmark-docker/vllm.rst @@ -7,14 +7,13 @@ vLLM inference performance testing ********************************** -.. _vllm-benchmark-unified-docker-812: +.. _vllm-benchmark-unified-docker-909: .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml - {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} - {% set model_groups = data.vllm_benchmark.model_groups %} + {% set docker = data.dockers[0] %} - The `ROCm vLLM Docker <{{ unified_docker.docker_hub_url }}>`_ image offers + The `ROCm vLLM Docker <{{ docker.docker_hub_url }}>`_ image offers a prebuilt, optimized environment for validating large language model (LLM) inference performance on AMD Instinct™ MI300X series accelerators. This ROCm vLLM Docker image integrates vLLM and PyTorch tailored specifically for MI300X series @@ -26,20 +25,13 @@ vLLM inference performance testing * - Software component - Version - * - `ROCm `__ - - {{ unified_docker.rocm_version }} - - * - `vLLM `__ - - {{ unified_docker.vllm_version }} - - * - `PyTorch `__ - - {{ unified_docker.pytorch_version }} - - * - `hipBLASLt `__ - - {{ unified_docker.hipblaslt_version }} + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + {% endfor %} With this Docker image, you can quickly test the :ref:`expected -inference performance numbers ` for +inference performance numbers ` for MI300X series accelerators. What's new @@ -47,21 +39,23 @@ What's new The following is summary of notable changes since the :doc:`previous ROCm/vLLM Docker release `. -* Upgraded to vLLM v0.10. +* Upgraded to vLLM v0.10.1. -* FP8 KV cache support via AITER. +* Set ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1`` by default for better performance. -* Full graph capture support via AITER. +* Set ``VLLM_ROCM_USE_AITER_RMSNORM=0`` by default to avoid various issues with torch compile. + +.. _vllm-benchmark-supported-models-909: Supported models ================ .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml - {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} - {% set model_groups = data.vllm_benchmark.model_groups %} + {% set docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} - .. _vllm-benchmark-available-models-812: + .. _vllm-benchmark-available-models-909: The following models are supported for inference performance benchmarking with vLLM and ROCm. Some instructions, commands, and recommendations in this @@ -70,55 +64,51 @@ Supported models .. raw:: html
-
-
Model group
-
- {% for model_group in model_groups %} -
{{ model_group.group }}
- {% endfor %} -
-
- -
-
Model
-
- {% for model_group in model_groups %} - {% set models = model_group.models %} - {% for model in models %} - {% if models|length % 3 == 0 %} -
{{ model.model }}
- {% else %} -
{{ model.model }}
- {% endif %} +
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
{% endfor %} - {% endfor %} +
+
+ +
+
Variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
-
- .. _vllm-benchmark-vllm-812: + .. _vllm-benchmark-vllm-909: {% for model_group in model_groups %} {% for model in model_group.models %} - .. container:: model-doc {{model.mad_tag}} + .. container:: model-doc {{ model.mad_tag }} .. note:: See the `{{ model.model }} model card on Hugging Face <{{ model.url }}>`_ to learn more about your selected model. Some models require access authorization prior to use via an external license agreement through a third party. + {% if model.precision == "float8" and model.model_repo.startswith("amd") %} + This model uses FP8 quantization via `AMD Quark `__ for efficient inference on AMD accelerators. + {% endif %} {% endfor %} {% endfor %} -.. note:: - - vLLM is a toolkit and library for LLM inference and serving. AMD implements - high-performance custom kernels and modules in vLLM to enhance performance. - See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for - more information. - -.. _vllm-benchmark-performance-measurements-812: +.. _vllm-benchmark-performance-measurements-909: Performance measurements ======================== @@ -151,18 +141,18 @@ system's configuration. .. datatemplate:yaml:: /data/how-to/rocm-for-ai/inference/vllm-benchmark-models.yaml - {% set unified_docker = data.vllm_benchmark.unified_docker.latest %} - {% set model_groups = data.vllm_benchmark.model_groups %} + {% set docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} Pull the Docker image ===================== - Download the `ROCm vLLM Docker image <{{ unified_docker.docker_hub_url }}>`_. + Download the `ROCm vLLM Docker image <{{ docker.docker_hub_url }}>`_. Use the following command to pull the Docker image from Docker Hub. .. code-block:: shell - docker pull {{ unified_docker.pull_tag }} + docker pull {{ docker.pull_tag }} Benchmarking ============ @@ -170,7 +160,7 @@ system's configuration. Once the setup is complete, choose between two options to reproduce the benchmark results: - .. _vllm-benchmark-mad-812: + .. _vllm-benchmark-mad-909: {% for model_group in model_groups %} {% for model in model_group.models %} @@ -181,6 +171,9 @@ system's configuration. .. tab-item:: MAD-integrated benchmarking + The following run command is tailored to {{ model.model }}. + See :ref:`vllm-benchmark-supported-models-909` to switch to another available model. + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local directory and install the required packages on the host machine. @@ -208,7 +201,7 @@ system's configuration. and ``{{ model.mad_tag }}_serving.csv``. Although the :ref:`available models - ` are preconfigured to collect + ` are preconfigured to collect offline throughput and online serving performance data, you can also change the benchmarking parameters. See the standalone benchmarking tab for more information. @@ -232,132 +225,143 @@ system's configuration. .. tab-item:: Standalone benchmarking - .. rubric:: Download the Docker image and required scripts + The following commands are optimized for {{ model.model }}. + See :ref:`vllm-benchmark-supported-models-909` to switch to another available model. - 1. Run the vLLM benchmark tool independently by starting the - `Docker container <{{ unified_docker.docker_hub_url }}>`_ - as shown in the following snippet. + .. seealso:: + + For more information on configuration, see the `config files + `__ + in the MAD repository. Refer to the `vLLM engine `__ + for descriptions of available configuration options + and `Benchmarking vLLM `__ for + additional benchmarking information. + + .. rubric:: Launch the container + + You can run the vLLM benchmark tool independently by starting the + `Docker container <{{ docker.docker_hub_url }}>`_ as shown + in the following snippet. + + .. code-block:: shell + + docker pull {{ docker.pull_tag }} + docker run -it \ + --device=/dev/kfd \ + --device=/dev/dri \ + --group-add video \ + --shm-size 16G \ + --security-opt seccomp=unconfined \ + --security-opt apparmor=unconfined \ + --cap-add=SYS_PTRACE \ + -v $(pwd):/workspace \ + --env HUGGINGFACE_HUB_CACHE=/workspace \ + --name test \ + {{ docker.pull_tag }} + + .. rubric:: Throughput command + + Use the following command to start the throughput benchmark. + + .. code-block:: shell + + model={{ model.model_repo }} + tp={{ model.config.tp }} + num_prompts=1024 + in=128 + out=128 + dtype={{ model.config.dtype }} + kv_cache_dtype={{ model.config.kv_cache_dtype }} + max_num_seqs=1024 + max_seq_len_to_capture={{ model.config.max_seq_len_to_capture }} + max_num_batched_tokens={{ model.config.max_num_batched_tokens }} + max_model_len={{ model.config.max_model_len }} + + vllm bench throughput --model $model \ + -tp $tp \ + --num-prompts $num_prompts \ + --input-len $in \ + --output-len $out \ + --dtype $dtype \ + --kv-cache-dtype $kv_cache_dtype \ + --max-num-seqs $max_num_seqs \ + --max-seq-len-to-capture $max_seq_len_to_capture \ + --max-num-batched-tokens $max_num_batched_tokens \ + --max-model-len $max_model_len \ + --trust-remote-code \ + --output-json ${model}_throughput.json \ + --gpu-memory-utilization 0.9 + + .. rubric:: Serving command + + 1. Start the server using the following command: .. code-block:: shell - docker pull {{ unified_docker.pull_tag }} - docker run -it \ - --device=/dev/kfd \ - --device=/dev/dri \ - --group-add video \ - --shm-size 16G \ - --security-opt seccomp=unconfined \ - --security-opt apparmor=unconfined \ - --cap-add=SYS_PTRACE \ - -v $(pwd):/workspace \ - --env HUGGINGFACE_HUB_CACHE=/workspace \ - --name test \ - {{ unified_docker.pull_tag }} + model={{ model.model_repo }} + tp={{ model.config.tp }} + dtype={{ model.config.dtype }} + kv_cache_dtype={{ model.config.kv_cache_dtype }} + max_num_seqs=256 + max_seq_len_to_capture={{ model.config.max_seq_len_to_capture }} + max_num_batched_tokens={{ model.config.max_num_batched_tokens }} + max_model_len={{ model.config.max_model_len }} - 2. In the Docker container, clone the ROCm MAD repository and navigate to the - benchmark scripts directory at ``~/MAD/scripts/vllm``. + vllm serve $model \ + -tp $tp \ + --dtype $dtype \ + --kv-cache-dtype $kv_cache_dtype \ + --max-num-seqs $max_num_seqs \ + --max-seq-len-to-capture $max_seq_len_to_capture \ + --max-num-batched-tokens $max_num_batched_tokens \ + --max-model-len $max_model_len \ + --no-enable-prefix-caching \ + --swap-space 16 \ + --disable-log-requests \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 + + Wait until the model has loaded and the server is ready to accept requests. + + 2. On another terminal on the same machine, run the benchmark: .. code-block:: shell - git clone https://github.com/ROCm/MAD - cd MAD/scripts/vllm + # Connect to the container + docker exec -it test bash - 3. To start the benchmark, use the following command with the appropriate options. + # Wait for the server to start + until curl -s http://localhost:8000/v1/models; do sleep 30; done + + # Run the benchmark + model={{ model.model_repo }} + max_concurrency=1 + num_prompts=10 + in=128 + out=128 + vllm bench serve --model $model \ + --percentile-metrics "ttft,tpot,itl,e2el" \ + --dataset-name random \ + --ignore-eos \ + --max-concurrency $max_concurrency \ + --num-prompts $num_prompts \ + --random-input-len $in \ + --random-output-len $out \ + --trust-remote-code \ + --save-result \ + --result-filename ${model}_serving.json + + .. note:: + + If you encounter the following error, pass your access-authorized Hugging + Face token to the gated models. .. code-block:: - ./run.sh \ - --config $CONFIG_CSV \ - --model_repo {{ model.model_repo }} \ - + OSError: You are trying to access a gated repo. - .. dropdown:: Benchmark options - :open: - - .. list-table:: - :header-rows: 1 - :align: center - - * - Name - - Options - - Description - - * - ``--config`` - - ``configs/default.csv`` - - Run configs from the CSV for the chosen model repo and benchmark. - - * - - - ``configs/extended.csv`` - - - - * - - - ``configs/performance.csv`` - - - - * - ``--benchmark`` - - ``throughput`` - - Measure offline end-to-end throughput. - - * - - - ``serving`` - - Measure online serving performance. - - * - - - ``all`` - - Measure both throughput and serving. - - * - `` - - See `run.sh `__ for more info. - - Additional overrides to the config CSV. - - The input sequence length, output sequence length, and tensor parallel (TP) are - already configured. You don't need to specify them with this script. - - .. note:: - - For best performance, it's recommended to run with ``VLLM_V1_USE_PREFILL_DECODE_ATTENTION=1``. - - If you encounter the following error, pass your access-authorized Hugging - Face token to the gated models. - - .. code-block:: - - OSError: You are trying to access a gated repo. - - # pass your HF_TOKEN - export HF_TOKEN=$your_personal_hf_token - - .. rubric:: Benchmarking examples - - Here are some examples of running the benchmark with various options: - - * Throughput benchmark - - Use this command to benchmark the throughput of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. - - .. code-block:: shell - - export MAD_MODEL_NAME={{ model.mad_tag }} - ./run.sh \ - --config configs/default.csv \ - --model_repo {{model.model_repo}} \ - --benchmark throughput - - Find the throughput benchmark report at ``./{{ model.mad_tag }}_throughput.csv``. - - * Serving benchmark - - Use this command to benchmark the serving performance of the {{model.model}} model on eight GPUs with :literal:`{{model.precision}}` precision. - - .. code-block:: - - export MAD_MODEL_NAME={{ model.mad_tag }} - ./run.sh \ - --config configs/default.csv \ - --model_repo {{model.model_repo}} \ - --benchmark serving - - Find the serving benchmark report at ``./{{ model.mad_tag }}_serving.csv``. + # pass your HF_TOKEN + export HF_TOKEN=$your_personal_hf_token .. raw:: html @@ -382,7 +386,7 @@ Advanced usage ============== For information on experimental features and known issues related to ROCm optimization efforts on vLLM, -see the developer's guide at ``__. +see the developer's guide at ``__. Reproducing the Docker image ---------------------------- @@ -400,7 +404,7 @@ To reproduce this ROCm/vLLM Docker image release, follow these steps: .. code-block:: shell cd vllm - git checkout 340ea86dfe5955d6f9a9e767d6abab5aacf2c978 + git checkout 6663000a391911eba96d7864a26ac42b07f6ef29 3. Build the Docker image. Replace ``vllm-rocm`` with your desired image tag. @@ -419,15 +423,12 @@ Further reading - To learn more about system settings and management practices to configure your system for AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. +- See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for + a brief introduction to vLLM and optimization strategies. + - For application performance optimization strategies for HPC and AI workloads, including inference with vLLM, see :doc:`/how-to/rocm-for-ai/inference-optimization/workload`. -- To learn how to run community models from Hugging Face on AMD GPUs, see - :doc:`Running models from Hugging Face `. - -- To learn how to fine-tune LLMs and optimize inference, see - :doc:`Fine-tuning LLMs and inference optimization `. - - For a list of other ready-made Docker images for AI with ROCm, see `AMD Infinity Hub `_. diff --git a/docs/how-to/rocm-for-ai/install.rst b/docs/how-to/rocm-for-ai/install.rst index 6847d06b4..cb949cb31 100644 --- a/docs/how-to/rocm-for-ai/install.rst +++ b/docs/how-to/rocm-for-ai/install.rst @@ -22,9 +22,9 @@ If you’re new to ROCm, refer to the :doc:`ROCm quick start install guide for L `. If you’re using a Radeon GPU for graphics-accelerated applications, refer to the -`Radeon installation instructions `_. +`Radeon installation instructions `_. -You can install ROCm on :ref:`compatible systems ` via your Linux +You can install ROCm on :doc:`compatible systems ` via your Linux distribution's package manager. See the following documentation resources to get started: * :doc:`ROCm installation overview ` diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst index bb364e42a..76c3582e7 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext.rst @@ -2,9 +2,9 @@ :description: How to train a model using JAX MaxText for ROCm. :keywords: ROCm, AI, LLM, train, jax, torch, Llama, flux, tutorial, docker -************************************** -Training a model with MaxText for ROCm -************************************** +****************************************** +Training a model with JAX MaxText for ROCm +****************************************** MaxText is a high-performance, open-source framework built on the Google JAX machine learning library to train LLMs at scale. The MaxText framework for @@ -12,70 +12,108 @@ ROCm is an optimized fork of the upstream ``__ enabling efficient AI workloads on AMD MI300X series accelerators. -The MaxText for ROCm training Docker (``rocm/jax-training:maxtext-v25.5``) image +The MaxText for ROCm training Docker image provides a prebuilt environment for training on AMD Instinct MI300X and MI325X accelerators, including essential components like JAX, XLA, ROCm libraries, and MaxText utilities. It includes the following software components: -+--------------------------+--------------------------------+ -| Software component | Version | -+==========================+================================+ -| ROCm | 6.3.4 | -+--------------------------+--------------------------------+ -| JAX | 0.4.35 | -+--------------------------+--------------------------------+ -| Python | 3.10.12 | -+--------------------------+--------------------------------+ -| Transformer Engine | 1.12.0.dev0+b8b92dc | -+--------------------------+--------------------------------+ -| hipBLASLt | 0.13.0-ae9c477a | -+--------------------------+--------------------------------+ +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml -Supported features and models -============================= + {% set dockers = data.dockers %} + .. tab-set:: -MaxText provides the following key features to train large language models efficiently: + {% for docker in dockers %} + {% set jax_version = docker.components["JAX"] %} + + .. tab-item:: JAX {{ jax_version }} + :sync: {{ docker.pull_tag }} + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + + {% endfor %} + {% if jax_version == "0.6.0" %} + .. note:: + + Shardy is a new config in JAX 0.6.0. You might get related errors if it's + not configured correctly. For now you can turn it off by setting + ``shardy=False`` during the training run. You can also follow the `migration + guide `__ to enable + it. + + The provided multi-node training scripts in this documentation are + not currently supported with JAX 0.6.0. For multi-node training, use the JAX 0.5.0 + Docker image. + {% endif %} + + {% endfor %} + +MaxText with on ROCm provides the following key features to train large language models efficiently: - Transformer Engine (TE) -- Flash Attention (FA) 3 +- Flash Attention (FA) 3 -- with or without sequence input packing - GEMM tuning - Multi-node support -.. _amd-maxtext-model-support: +- NANOO FP8 quantization support -The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. +.. _amd-maxtext-model-support-v257: -* Llama 3.3 70B +Supported models +================ -* Llama 3.1 8B +The following models are pre-optimized for performance on AMD Instinct MI300 +series accelerators. Some instructions, commands, and available training +configurations in this documentation might vary by model -- select one to get +started. -* Llama 3.1 70B +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml -* Llama 3 8B + {% set model_groups = data.model_groups %} + .. raw:: html -* Llama 3 70B +
+
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
-* Llama 2 7B - -* Llama 2 70B - -* DeepSeek-V2-Lite +
+
Variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
.. note:: Some models, such as Llama 3, require an external license agreement through a third party (for example, Meta). -Unsupported features --------------------- - -Currently, MaxText's default packed input format is not supported. Using this format -with the current Docker image results in incorrect attention calculations -across different input sequences. Support for packed input format is planned for a future release. - System validation ================= @@ -98,14 +136,14 @@ This Docker image is optimized for specific model configurations outlined as follows. Performance can vary for other training workloads, as AMD doesn’t validate configurations and run conditions outside those described. -.. _amd-maxtext-multi-node-setup: +.. _amd-maxtext-multi-node-setup-v257: Multi-node setup ---------------- For multi-node environments, ensure you have all the necessary packages for your network device, such as, RDMA. If you're not using a multi-node setup -with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. +with RDMA, skip ahead to :ref:`amd-maxtext-get-started-v257`. 1. Install the following packages to build and install the RDMA driver. @@ -170,7 +208,7 @@ with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. e. RDMA interface - Ensure the :ref:`required packages ` are installed on all nodes. + Ensure the :ref:`required packages ` are installed on all nodes. Then, set the RDMA interfaces to use for communication. .. code-block:: bash @@ -180,196 +218,203 @@ with RDMA, skip ahead to :ref:`amd-maxtext-download-docker`. # If using Mellanox NIC export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 -.. _amd-maxtext-download-docker: +.. _amd-maxtext-get-started-v257: -Pull the Docker image ---------------------- +Benchmarking +============ -1. Use the following command to pull the Docker image from Docker Hub. +Once the setup is complete, choose between two options to reproduce the +benchmark results: - .. code-block:: shell +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/jax-maxtext-benchmark-models.yaml - docker pull rocm/jax-training:maxtext-v25.5 + .. _vllm-benchmark-mad: -2. Use the following command to launch the Docker container. Note that the benchmarking scripts - used in the :ref:`following section ` automatically launch the Docker container - and execute the benchmark. + {% set dockers = data.dockers %} + {% set model_groups = data.model_groups %} + {% for model_group in model_groups %} + {% for model in model_group.models %} - .. code-block:: shell + .. container:: model-doc {{model.mad_tag}} - docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME/.ssh:/root/.ssh --shm-size 128G --name maxtext_training rocm/jax-training:maxtext-v25.5 + .. tab-set:: -.. _amd-maxtext-get-started: + {% if model.mad_tag and "single-node" in model.doc_options %} + .. tab-item:: MAD-integrated benchmarking -Getting started + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt + + 2. Use this command to run the performance benchmark test on the {{ model.model }} model + using one GPU with the :literal:`{{model.precision}}` data type on the host machine. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{model.mad_tag}} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 + + MAD launches a Docker container with the name + ``container_ci-{{model.mad_tag}}``. The latency and throughput reports of the + model are collected in the following path: ``~/MAD/perf.csv/``. + {% endif %} + + .. tab-item:: Standalone benchmarking + + .. rubric:: Download the Docker image and required scripts + + Run the JAX MaxText benchmark tool independently by starting the + Docker container as shown in the following snippet. + + .. tab-set:: + {% for docker in dockers %} + {% set jax_version = docker.components["JAX"] %} + + .. tab-item:: JAX {{ jax_version }} + :sync: {{ docker.pull_tag }} + + .. code-block:: shell + + docker pull {{ docker.pull_tag }} + {% endfor %} + + {% if model.model_repo and "single-node" in model.doc_options %} + .. rubric:: Single node training + + 1. Set up environment variables. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN= + export HF_HOME= + + ``MAD_SECRETS_HFTOKEN`` is your Hugging Face access token to access models, tokenizers, and data. + See `User access tokens `__. + + ``HF_HOME`` is where ``huggingface_hub`` will store local data. See `huggingface_hub CLI `__. + If you already have downloaded or cached Hugging Face artifacts, set this variable to that path. + Downloaded files typically get cached to ``~/.cache/huggingface``. + + 2. Launch the Docker container. + + .. tab-set:: + {% for docker in dockers %} + {% set jax_version = docker.components["JAX"] %} + + .. tab-item:: JAX {{ jax_version }} + :sync: {{ docker.pull_tag }} + + .. code-block:: shell + + docker run -it \ + --device=/dev/dri \ + --device=/dev/kfd \ + --network host \ + --ipc host \ + --group-add video \ + --cap-add=SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --privileged \ + -v $HOME:$HOME \ + -v $HOME/.ssh:/root/.ssh \ + -v $HF_HOME:/hf_cache \ + -e HF_HOME=/hf_cache \ + -e MAD_SECRETS_HFTOKEN=$MAD_SECRETS_HFTOKEN + --shm-size 64G \ + --name training_env \ + {{ docker.pull_tag }} + {% endfor %} + + 3. In the Docker container, clone the ROCm MAD repository and navigate to the + benchmark scripts directory at ``MAD/scripts/jax-maxtext``. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD/scripts/jax-maxtext + + 4. Run the setup scripts to install libraries and datasets needed + for benchmarking. + + .. code-block:: shell + + ./jax-maxtext_benchmark_setup.sh -m {{ model.model_repo }} + + 5. To run the training benchmark without quantization, use the following command: + + .. code-block:: shell + + ./jax-maxtext_benchmark_report.sh -m {{ model.model_repo }} + + For quantized training, use the following command: + + .. code-block:: shell + + ./jax-maxtext_benchmark_report.sh -m {{ model.model_repo }} -q nanoo_fp8 + + .. important:: + + Quantized training is not supported with the JAX 0.6.0 Docker image; support + will be added in a future release. For quantized training, use the JAX 0.5.0 + Docker image: ``rocm/jax-training:maxtext-v25.7``. + + {% endif %} + {% if model.multinode_training_script and "multi-node" in model.doc_options %} + .. rubric:: Multi-node training + + The following examples use SLURM to run on multiple nodes. + + .. note:: + + The following scripts will launch the Docker container and run the + benchmark. Run them outside of any Docker container. + + 1. Make sure ``$HF_HOME`` is set before running the test. See + `ROCm benchmarking `__ + for more details on downloading the Llama models before running the + benchmark. + + 2. To run multi-node training for {{ model.model }}, + use the + `multi-node training script `__ + under the ``scripts/jax-maxtext/gpu-rocm/`` directory. + + 3. Run the multi-node training benchmark script. + + .. code-block:: shell + + sbatch -N {{ model.multinode_training_script }} + + {% else %} + .. rubric:: Multi-node training + + For multi-node training examples, choose a model from :ref:`amd-maxtext-model-support-v257` + with an available `multi-node training script `__. + {% endif %} + {% endfor %} + {% endfor %} + +Further reading =============== -The following examples demonstrate how to get started with single node -and multi-node training using the benchmarking scripts provided at -``__. +- See the ROCm/maxtext benchmarking README at ``__. -.. important:: +- To learn more about MAD and the ``madengine`` CLI, see the `MAD usage guide `__. - The provided scripts launch a Docker container and execute a benchmark. Ensure you run these commands outside of any existing Docker container. +- To learn more about system settings and management practices to configure your system for + AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. -Before running any benchmarks, ensure the ``$HF_HOME`` environment variable is -set correctly and points to your Hugging Face cache directory. Refer to the -README at ``__ -for more detailed instructions. - -Single node training benchmarking examples ------------------------------------------- - -* Example 1: Single node training with Llama 2 7B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_7b.sh - -* Example 2: Single node training with Llama 2 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_70b.sh - -* Example 3: Single node training with Llama 3 8B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_8b.sh - -* Example 4: Single node training with Llama 3 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_70b.sh - -* Example 5: Single node training with Llama 3.3 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3.3_70b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3.3_70b.sh - -* Example 6: Single node training with DeepSeek V2 16B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/deepseek_v2_16b.sh - - Run the single node training benchmark: - - .. code-block:: shell - - IMAGE="rocm/jax-training:maxtext-v25.5" bash ./deepseek_v2_16b.sh - - .. note:: - - The reported TFLOP/s by MaxText for DeepSeek is not accurate. Use - the tokens/s as a performance indicator. - -Multi-node training benchmarking examples ------------------------------------------ - -The following examples use SLURM for running on multiple nodes -- the commands might need to be adjusted for your -own cluster setup. - -* Example 1: Multi-node training with Llama 2 7B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama2_7b_multinode.sh - -* Example 2: Multi-node training with Llama 2 70B - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama2_70b_multinode.sh - -* Example 3: Multi-node training with Llama 3 8B model - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama3_8b_multinode.sh - -* Example 4: Multi-node training with Llama 3 70B model - - Download the benchmarking script: - - .. code-block:: shell - - wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b_multinode.sh - - Run the multi-node training benchmark. For example: - - .. code-block:: shell - - sbatch -N llama3_70b_multinode.sh +- For a list of other ready-made Docker images for AI with ROCm, see + `AMD Infinity Hub `_. Previous versions ================= diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst index 687cc514f..4df1da960 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/megatron-lm.rst @@ -70,32 +70,32 @@ workloads: {% set model_groups = data.model_groups %} .. raw:: html -
-
-
Model
-
+
+
+
Model
+
{% for model_group in model_groups %} -
{{ model_group.group }}
+
{{ model_group.group }}
{% endfor %} -
-
+
+
-
-
Model variant
-
+
+
Variant
+
{% for model_group in model_groups %} {% set models = model_group.models %} {% for model in models %} {% if models|length % 3 == 0 %} -
{{ model.model }}
+
{{ model.model }}
{% else %} -
{{ model.model }}
+
{{ model.model }}
{% endif %} {% endfor %} {% endfor %} -
-
+
+
.. note:: diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst index b67d1ac3a..e4d039356 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history.rst @@ -17,12 +17,21 @@ previous releases of the ``ROCm/jax-training`` Docker image on `Docker Hub ` + * `Docker Hub (JAX 0.6.0) `__ + * `Docker Hub (JAX 0.5.0) `__ + + * - 25.5 - * ROCm 6.3.4 * JAX 0.4.35 - - * :doc:`Documentation <../jax-maxtext>` + * :doc:`Documentation ` * `Docker Hub `__ * - 25.4 diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst index 03836c9fc..3fe728c35 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4.rst @@ -51,7 +51,7 @@ MaxText provides the following key features to train large language models effic - Multi-node support -.. _amd-maxtext-model-support: +.. _amd-maxtext-model-support-v254: The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst new file mode 100644 index 000000000..9bd7081d2 --- /dev/null +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5.rst @@ -0,0 +1,385 @@ +:orphan: + +.. meta:: + :description: How to train a model using JAX MaxText for ROCm. + :keywords: ROCm, AI, LLM, train, jax, torch, Llama, flux, tutorial, docker + +************************************** +Training a model with MaxText for ROCm +************************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm JAX MaxText + training performance documentation. See :doc:`../jax-maxtext` for the latest version. + +MaxText is a high-performance, open-source framework built on the Google JAX +machine learning library to train LLMs at scale. The MaxText framework for +ROCm is an optimized fork of the upstream +``__ enabling efficient AI workloads +on AMD MI300X series accelerators. + +The MaxText for ROCm training Docker (``rocm/jax-training:maxtext-v25.5``) image +provides a prebuilt environment for training on AMD Instinct MI300X and MI325X accelerators, +including essential components like JAX, XLA, ROCm libraries, and MaxText utilities. +It includes the following software components: + ++--------------------------+--------------------------------+ +| Software component | Version | ++==========================+================================+ +| ROCm | 6.3.4 | ++--------------------------+--------------------------------+ +| JAX | 0.4.35 | ++--------------------------+--------------------------------+ +| Python | 3.10.12 | ++--------------------------+--------------------------------+ +| Transformer Engine | 1.12.0.dev0+b8b92dc | ++--------------------------+--------------------------------+ +| hipBLASLt | 0.13.0-ae9c477a | ++--------------------------+--------------------------------+ + +Supported features and models +============================= + +MaxText provides the following key features to train large language models efficiently: + +- Transformer Engine (TE) + +- Flash Attention (FA) 3 + +- GEMM tuning + +- Multi-node support + +.. _amd-maxtext-model-support-v255: + +The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. + +* Llama 3.3 70B + +* Llama 3.1 8B + +* Llama 3.1 70B + +* Llama 3 8B + +* Llama 3 70B + +* Llama 2 7B + +* Llama 2 70B + +* DeepSeek-V2-Lite + +.. note:: + + Some models, such as Llama 3, require an external license agreement through + a third party (for example, Meta). + +Unsupported features +-------------------- + +Currently, MaxText's default packed input format is not supported. Using this format +with the current Docker image results in incorrect attention calculations +across different input sequences. Support for packed input format is planned for a future release. + +System validation +================= + +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. + +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. + +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. + +Environment setup +================= + +This Docker image is optimized for specific model configurations outlined +as follows. Performance can vary for other training workloads, as AMD +doesn’t validate configurations and run conditions outside those described. + +.. _amd-maxtext-multi-node-setup-v255: + +Multi-node setup +---------------- + +For multi-node environments, ensure you have all the necessary packages for +your network device, such as, RDMA. If you're not using a multi-node setup +with RDMA, skip ahead to :ref:`amd-maxtext-download-docker-v255`. + +1. Install the following packages to build and install the RDMA driver. + + .. code-block:: shell + + sudo apt install iproute2 -y + sudo apt install -y linux-headers-"$(uname-r)" libelf-dev + sudo apt install -y gcc make libtool autoconf librdmacm-dev rdmacm-utils infiniband-diags ibverbs-utils perftest ethtool libibverbs-dev rdma-core strace libibmad5 libibnetdisc5 ibverbs-providers libibumad-dev libibumad3 libibverbs1 libnl-3-dev libnl-route-3-dev + + Refer to your NIC manufacturer's documentation for further steps on + compiling and installing the RoCE driver. For example, for Broadcom, + see `Compiling Broadcom NIC software from source `_ + in `Ethernet networking guide for AMD Instinct MI300X GPU clusters `_. + +2. Set the following environment variables. + + a. Master address + + Change ``localhost`` to the master node's resolvable hostname or IP address: + + .. code-block:: bash + + export MASTER_ADDR="${MASTER_ADDR:-localhost}" + + b. Number of nodes + + Set the number of nodes you want to train on (for example, ``2``, ``4``, or ``8``): + + .. code-block:: bash + + export NNODES="${NNODES:-1}" + + c. Node ranks + + Set the rank of each node (``0`` for master, ``1`` for the first worker node, and so on) + Node ranks should be unique across all nodes in the cluster. + + .. code-block:: bash + + export NODE_RANK="${NODE_RANK:-0}" + + d. Network interface + + Update the network interface in the script to match your system's network interface. To + find your network interface, run the following (outside of any Docker container): + + .. code-block:: bash + + ip a + + Look for an active interface with an IP address in the same subnet as + your other nodes. Then, update the following variable in the script, for + example: + + .. code-block:: bash + + export NCCL_SOCKET_IFNAME=ens50f0np0 + + This variable specifies which network interface to use for inter-node communication. + Setting this variable to the incorrect interface can result in communication failures + or significantly reduced performance. + + e. RDMA interface + + Ensure the :ref:`required packages ` are installed on all nodes. + Then, set the RDMA interfaces to use for communication. + + .. code-block:: bash + + # If using Broadcom NIC + export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 + # If using Mellanox NIC + export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 + +.. _amd-maxtext-download-docker-v255: + +Pull the Docker image +--------------------- + +1. Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull rocm/jax-training:maxtext-v25.5 + +2. Use the following command to launch the Docker container. Note that the benchmarking scripts + used in the :ref:`following section ` automatically launch the Docker container + and execute the benchmark. + + .. code-block:: shell + + docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME/.ssh:/root/.ssh --shm-size 128G --name maxtext_training rocm/jax-training:maxtext-v25.5 + +.. _amd-maxtext-get-started-v255: + +Getting started +=============== + +The following examples demonstrate how to get started with single node +and multi-node training using the benchmarking scripts provided at +``__. + +.. important:: + + The provided scripts launch a Docker container and execute a benchmark. Ensure you run these commands outside of any existing Docker container. + +Before running any benchmarks, ensure the ``$HF_HOME`` environment variable is +set correctly and points to your Hugging Face cache directory. Refer to the +README at ``__ +for more detailed instructions. + +Single node training benchmarking examples +------------------------------------------ + +* Example 1: Single node training with Llama 2 7B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_7b.sh + +* Example 2: Single node training with Llama 2 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama2_70b.sh + +* Example 3: Single node training with Llama 3 8B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_8b.sh + +* Example 4: Single node training with Llama 3 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3_70b.sh + +* Example 5: Single node training with Llama 3.3 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3.3_70b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./llama3.3_70b.sh + +* Example 6: Single node training with DeepSeek V2 16B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/deepseek_v2_16b.sh + + Run the single node training benchmark: + + .. code-block:: shell + + IMAGE="rocm/jax-training:maxtext-v25.5" bash ./deepseek_v2_16b.sh + + .. note:: + + The reported TFLOP/s by MaxText for DeepSeek is not accurate. Use + the tokens/s as a performance indicator. + +Multi-node training benchmarking examples +----------------------------------------- + +The following examples use SLURM for running on multiple nodes -- the commands might need to be adjusted for your +own cluster setup. + +* Example 1: Multi-node training with Llama 2 7B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_7b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama2_7b_multinode.sh + +* Example 2: Multi-node training with Llama 2 70B + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama2_70b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama2_70b_multinode.sh + +* Example 3: Multi-node training with Llama 3 8B model + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_8b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama3_8b_multinode.sh + +* Example 4: Multi-node training with Llama 3 70B model + + Download the benchmarking script: + + .. code-block:: shell + + wget https://raw.githubusercontent.com/ROCm/maxtext/refs/heads/main/benchmarks/gpu-rocm/llama3_70b_multinode.sh + + Run the multi-node training benchmark. For example: + + .. code-block:: shell + + sbatch -N llama3_70b_multinode.sh + +Previous versions +================= + +See :doc:`jax-maxtext-history` to find documentation for previous releases +of the ``ROCm/jax-training`` Docker image. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst index a9d99378e..c18b1dfea 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev.rst @@ -18,7 +18,7 @@ Training a model with ROCm Megatron-LM The ROCm Megatron-LM framework is a specialized fork of the robust Megatron-LM, designed to enable efficient training of large-scale language models on AMD GPUs. By leveraging AMD Instinct™ MI300X accelerators, AMD Megatron-LM delivers enhanced scalability, performance, and resource utilization for AI -workloads. It is purpose-built to :ref:`support models ` +workloads. It is purpose-built to :ref:`support models ` like Meta's Llama 2, Llama 3, and Llama 3.1, enabling developers to train next-generation AI models with greater efficiency. See the GitHub repository at ``__. @@ -67,7 +67,7 @@ Megatron-LM provides the following key features to train large language models e - Pre-training -.. _amd-megatron-lm-model-support: +.. _amd-megatron-lm-model-support-24-12: The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst index 3a2f23322..e039aff8a 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3.rst @@ -67,7 +67,7 @@ Megatron-LM provides the following key features to train large language models e - Pre-training -.. _amd-megatron-lm-model-support: +.. _amd-megatron-lm-model-support-25-3: The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator. @@ -278,7 +278,7 @@ handle a variety of input sequences, including unseen words or domain-specific t .. tab-item:: Llama :sync: llama - To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer``. + To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer``. To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``. Set the Hugging Face model link in the ``TOKENIZER_MODEL`` variable. @@ -292,7 +292,7 @@ handle a variety of input sequences, including unseen words or domain-specific t .. tab-item:: DeepSeek V2 :sync: deepseek - To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. + To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. Multi-node training ^^^^^^^^^^^^^^^^^^^ diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst index 76e5eb716..9d7c7ecd6 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4.rst @@ -67,7 +67,7 @@ Megatron-LM provides the following key features to train large language models e - Pre-training -.. _amd-megatron-lm-model-support: +.. _amd-megatron-lm-model-support-25-4: The following models are pre-optimized for performance on AMD Instinct MI300X series accelerators. @@ -291,7 +291,7 @@ or ``${DATA_DIR}/tokenizer_llama2``. .. tab-item:: Llama :sync: llama - To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer`` + To train any of the Llama 2 models that :ref:`this Docker image supports `, use the ``Llama2Tokenizer`` or the default ``HuggingFaceTokenizer``. To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``. @@ -320,7 +320,7 @@ or ``${DATA_DIR}/tokenizer_llama2``. .. tab-item:: DeepSeek V2 :sync: deepseek - To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. + To train any of the DeepSeek V2 models that :ref:`this Docker image supports `, use the ``DeepSeekV2Tokenizer``. Multi-node training ^^^^^^^^^^^^^^^^^^^ diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst index 1535f1d43..07d640159 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history.rst @@ -16,12 +16,20 @@ previous releases of the ``ROCm/pytorch-training`` Docker image on `Docker Hub < - Components - Resources + * - v25.7 + - + * ROCm 6.4.2 + * PyTorch 2.8.0a0+gitd06a406 + - + * :doc:`Documentation <../pytorch-training>` + * `Docker Hub `__ + * - v25.6 - * ROCm 6.3.4 * PyTorch 2.8.0a0+git7d205b2 - - * :doc:`Documentation <../pytorch-training>` + * :doc:`Documentation ` * `Docker Hub `__ * - v25.5 diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst index a43297657..e68a1092b 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5.rst @@ -437,3 +437,8 @@ Once the setup is complete, choose between two options to start benchmarking: ./pytorch_benchmark_report.sh -t HF_finetune_lora -p BF16 -m Llama-2-70B +Previous versions +================= + +See :doc:`pytorch-training-history` to find documentation for previous releases +of the ``ROCm/pytorch-training`` Docker image. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6.rst new file mode 100644 index 000000000..f9bc57a43 --- /dev/null +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6.rst @@ -0,0 +1,456 @@ +:orphan: + +.. meta:: + :description: How to train a model using PyTorch for ROCm. + :keywords: ROCm, AI, LLM, train, PyTorch, torch, Llama, flux, tutorial, docker + +************************************** +Training a model with PyTorch for ROCm +************************************** + +.. caution:: + + This documentation does not reflect the latest version of ROCm vLLM + performance benchmark documentation. See :doc:`../pytorch-training` for the latest version. + +PyTorch is an open-source machine learning framework that is widely used for +model training with GPU-optimized components for transformer-based models. + +The `PyTorch for ROCm training Docker `_ +(``rocm/pytorch-training:v25.6``) image provides a prebuilt optimized environment for fine-tuning and pretraining a +model on AMD Instinct MI325X and MI300X accelerators. It includes the following software components to accelerate +training workloads: + ++--------------------------+--------------------------------+ +| Software component | Version | ++==========================+================================+ +| ROCm | 6.3.4 | ++--------------------------+--------------------------------+ +| PyTorch | 2.8.0a0+git7d205b2 | ++--------------------------+--------------------------------+ +| Python | 3.10.17 | ++--------------------------+--------------------------------+ +| Transformer Engine | 1.14.0+2f85f5f2 | ++--------------------------+--------------------------------+ +| Flash Attention | 3.0.0.post1 | ++--------------------------+--------------------------------+ +| hipBLASLt | 0.15.0-8c6919d | ++--------------------------+--------------------------------+ +| Triton | 3.3.0 | ++--------------------------+--------------------------------+ + +.. _amd-pytorch-training-model-support-v256: + +Supported models +================ + +The following models are pre-optimized for performance on the AMD Instinct MI325X and MI300X accelerators. + +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/previous-versions/pytorch-training-v25.6-benchmark-models.yaml + + {% set unified_docker = data.unified_docker.latest %} + {% set model_groups = data.model_groups %} + + .. raw:: html + +
+
+
Workload
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
+ {% endfor %} +
+
+ +
+
Model
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
+
+ + .. note:: + + Some models require an external license agreement through a third party (for example, Meta). + + .. _amd-pytorch-training-performance-measurements-v256: + + Performance measurements + ======================== + + To evaluate performance, the + `Performance results with AMD ROCm software `_ + page provides reference throughput and latency measurements for training + popular AI models. + + .. note:: + + The performance data presented in + `Performance results with AMD ROCm software `_ + should not be interpreted as the peak performance achievable by AMD + Instinct MI325X and MI300X accelerators or ROCm software. + + System validation + ================= + + Before running AI workloads, it's important to validate that your AMD hardware is configured + correctly and performing optimally. + + If you have already validated your system settings, including aspects like NUMA auto-balancing, you + can skip this step. Otherwise, complete the procedures in the :ref:`System validation and + optimization ` guide to properly configure your system settings + before starting training. + + To test for optimal performance, consult the recommended :ref:`System health benchmarks + `. This suite of tests will help you verify and fine-tune your + system's configuration. + + This Docker image is optimized for specific model configurations outlined + below. Performance can vary for other training workloads, as AMD + doesn’t validate configurations and run conditions outside those described. + + Benchmarking + ============ + + Once the setup is complete, choose between two options to start benchmarking: + + .. tab-set:: + + .. tab-item:: MAD-integrated benchmarking + + Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt + + {% for model_group in model_groups %} + {% for model in model_group.models %} + + .. container:: model-doc {{ model.mad_tag }} + + For example, use this command to run the performance benchmark test on the {{ model.model }} model + using one GPU with the {{ model.precision }} data type on the host machine. + + .. code-block:: shell + + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{ model.mad_tag }} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 + + MAD launches a Docker container with the name + ``container_ci-{{ model.mad_tag }}``, for example. The latency and throughput reports of the + model are collected in the following path: ``~/MAD/perf.csv``. + + {% endfor %} + {% endfor %} + + .. tab-item:: Standalone benchmarking + + .. rubric:: Download the Docker image and required packages + + Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull {{ unified_docker.pull_tag }} + + Run the Docker container. + + .. code-block:: shell + + docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME:$HOME -v $HOME/.ssh:/root/.ssh --shm-size 64G --name training_env {{ unified_docker.pull_tag }} + + Use these commands if you exit the ``training_env`` container and need to return to it. + + .. code-block:: shell + + docker start training_env + docker exec -it training_env bash + + In the Docker container, clone the ``__ + repository and navigate to the benchmark scripts directory + ``/workspace/MAD/scripts/pytorch_train``. + + .. code-block:: shell + + git clone https://github.com/ROCm/MAD + cd MAD/scripts/pytorch_train + + .. rubric:: Prepare training datasets and dependencies + + The following benchmarking examples require downloading models and datasets + from Hugging Face. To ensure successful access to gated repos, set your + ``HF_TOKEN``. + + .. code-block:: shell + + export HF_TOKEN=$your_personal_hugging_face_access_token + + Run the setup script to install libraries and datasets needed for benchmarking. + + .. code-block:: shell + + ./pytorch_benchmark_setup.sh + + .. container:: model-doc pyt_train_llama-3.1-8b + + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 8B: + + .. list-table:: + :header-rows: 1 + + * - Library + - Reference + + * - ``accelerate`` + - `Hugging Face Accelerate `_ + + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 + + .. container:: model-doc pyt_train_llama-3.1-70b + + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 70B: + + .. list-table:: + :header-rows: 1 + + * - Library + - Reference + + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 + + * - ``torchdata`` + - `TorchData `_ + + * - ``tomli`` + - `Tomli `_ + + * - ``tiktoken`` + - `tiktoken `_ + + * - ``blobfile`` + - `blobfile `_ + + * - ``tabulate`` + - `tabulate `_ + + * - ``wandb`` + - `Weights & Biases `_ + + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 + + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 + + .. container:: model-doc pyt_train_flux + + ``pytorch_benchmark_setup.sh`` installs the following libraries for FLUX: + + .. list-table:: + :header-rows: 1 + + * - Library + - Reference + + * - ``accelerate`` + - `Hugging Face Accelerate `_ + + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 + + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 + + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 + + * - ``csvkit`` + - `csvkit `_ 2.0.1 + + * - ``deepspeed`` + - `DeepSpeed `_ 0.16.2 + + * - ``diffusers`` + - `Hugging Face Diffusers `_ 0.31.0 + + * - ``GitPython`` + - `GitPython `_ 3.1.44 + + * - ``opencv-python-headless`` + - `opencv-python-headless `_ 4.10.0.84 + + * - ``peft`` + - `PEFT `_ 0.14.0 + + * - ``protobuf`` + - `Protocol Buffers `_ 5.29.2 + + * - ``pytest`` + - `PyTest `_ 8.3.4 + + * - ``python-dotenv`` + - `python-dotenv `_ 1.0.1 + + * - ``seaborn`` + - `Seaborn `_ 0.13.2 + + * - ``transformers`` + - `Transformers `_ 4.47.0 + + ``pytorch_benchmark_setup.sh`` downloads the following datasets from Hugging Face: + + * `bghira/pseudo-camera-10k `_ + + {% for model_group in model_groups %} + {% for model in model_group.models %} + {% if model_group.tag == "pre-training" and model.mad_tag in ["pyt_train_llama-3.1-8b", "pyt_train_llama-3.1-70b", "pyt_train_flux"] %} + + .. container:: model-doc {{ model.mad_tag }} + + .. rubric:: Pretraining + + To start the pre-training benchmark, use the following command with the + appropriate options. See the following list of options and their descriptions. + + .. code-block:: shell + + ./pytorch_benchmark_report.sh -t pretrain -m {{ model.model_repo }} -p $datatype -s $sequence_length + + .. list-table:: + :header-rows: 1 + + * - Name + - Options + - Description + + {% if model.mad_tag == "pyt_train_llama-3.1-8b" %} + * - ``$datatype`` + - ``BF16`` or ``FP8`` + - Only Llama 3.1 8B supports FP8 precision. + {% else %} + * - ``$datatype`` + - ``BF16`` + - Only Llama 3.1 8B supports FP8 precision. + {% endif %} + + * - ``$sequence_length`` + - Sequence length for the language model. + - Between 2048 and 8192. 8192 by default. + + {% if model.mad_tag == "pyt_train_flux" %} + .. container:: model-doc {{ model.mad_tag }} + + .. note:: + + Occasionally, downloading the Flux dataset might fail. In the event of this + error, manually download it from Hugging Face at + `black-forest-labs/FLUX.1-dev `_ + and save it to `/workspace/FluxBenchmark`. This ensures that the test script can access + the required dataset. + {% endif %} + {% endif %} + + {% if model_group.tag == "fine-tuning" %} + .. container:: model-doc {{ model.mad_tag }} + + .. rubric:: Fine-tuning + + To start the fine-tuning benchmark, use the following command with the + appropriate options. See the following list of options and their descriptions. + + .. code-block:: shell + + ./pytorch_benchmark_report.sh -t $training_mode -m {{ model.model_repo }} -p BF16 -s $sequence_length + + .. list-table:: + :header-rows: 1 + + * - Name + - Options + - Description + + * - ``$training_mode`` + - ``finetune_fw`` + - Full weight fine-tuning (BF16 supported) + + * - + - ``finetune_lora`` + - LoRA fine-tuning (BF16 supported) + + * - + - ``finetune_qlora`` + - QLoRA fine-tuning (BF16 supported) + + * - + - ``HF_finetune_lora`` + - LoRA fine-tuning with Hugging Face PEFT + + * - ``$datatype`` + - ``BF16`` + - All models support BF16. + + * - ``$sequence_length`` + - Between 2048 and 16384. + - Sequence length for the language model. + + .. note:: + + {{ model.model }} currently supports the following fine-tuning methods: + + {% for method in model.training_modes %} + * ``{{ method }}`` + {% endfor %} + {% if model.training_modes|length < 4 %} + + The upstream `torchtune `_ repository + does not currently provide YAML configuration files for other combinations of + model to fine-tuning method + However, you can still configure your own YAML files to enable support for + fine-tuning methods not listed here by following existing patterns in the + ``/workspace/torchtune/recipes/configs`` directory. + {% endif %} + {% endif %} + {% endfor %} + {% endfor %} + + .. rubric:: Benchmarking examples + + For examples of benchmarking commands, see ``__. + +Further reading +=============== + +- To learn more about MAD and the ``madengine`` CLI, see the `MAD usage guide `__. + +- To learn more about system settings and management practices to configure your system for + AMD Instinct MI300X series accelerators, see `AMD Instinct MI300X system optimization `_. + +- For a list of other ready-made Docker images for AI with ROCm, see + `AMD Infinity Hub `_. + +Previous versions +================= + +See :doc:`pytorch-training-history` to find documentation for previous releases +of the ``ROCm/pytorch-training`` Docker image. diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst index 0a80c7c9b..81ec4ed50 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/primus-megatron.rst @@ -55,32 +55,32 @@ vary by model -- select one to get started. {% set model_groups = data.model_groups %} .. raw:: html -
-
-
Model
-
+
+
+
Model
+
{% for model_group in model_groups %} -
{{ model_group.group }}
+
{{ model_group.group }}
{% endfor %} -
-
+
+
-
-
Model variant
-
+
+
Variant
+
{% for model_group in model_groups %} {% set models = model_group.models %} {% for model in models %} {% if models|length % 3 == 0 %} -
{{ model.model }}
+
{{ model.model }}
{% else %} -
{{ model.model }}
+
{{ model.model }}
{% endif %} {% endfor %} {% endfor %} -
-
+
+
.. note:: diff --git a/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst b/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst index 46b9daf2f..d8ab01318 100644 --- a/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst +++ b/docs/how-to/rocm-for-ai/training/benchmark-docker/pytorch-training.rst @@ -9,28 +9,25 @@ Training a model with PyTorch for ROCm PyTorch is an open-source machine learning framework that is widely used for model training with GPU-optimized components for transformer-based models. -The `PyTorch for ROCm training Docker `_ -(``rocm/pytorch-training:v25.6``) image provides a prebuilt optimized environment for fine-tuning and pretraining a -model on AMD Instinct MI325X and MI300X accelerators. It includes the following software components to accelerate -training workloads: +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml -+--------------------------+--------------------------------+ -| Software component | Version | -+==========================+================================+ -| ROCm | 6.3.4 | -+--------------------------+--------------------------------+ -| PyTorch | 2.8.0a0+git7d205b2 | -+--------------------------+--------------------------------+ -| Python | 3.10.17 | -+--------------------------+--------------------------------+ -| Transformer Engine | 1.14.0+2f85f5f2 | -+--------------------------+--------------------------------+ -| Flash Attention | 3.0.0.post1 | -+--------------------------+--------------------------------+ -| hipBLASLt | 0.15.0-8c6919d | -+--------------------------+--------------------------------+ -| Triton | 3.3.0 | -+--------------------------+--------------------------------+ + {% set dockers = data.dockers %} + {% set docker = dockers[0] %} + The `PyTorch for ROCm training Docker <{{ docker.docker_hub_url }}>`__ + (``{{ docker.pull_tag }}``) image provides a prebuilt optimized environment for fine-tuning and pretraining a + model on AMD Instinct MI325X and MI300X accelerators. It includes the following software components to accelerate + training workloads: + + .. list-table:: + :header-rows: 1 + + * - Software component + - Version + + {% for component_name, component_version in docker.components.items() %} + * - {{ component_name }} + - {{ component_version }} + {% endfor %} .. _amd-pytorch-training-model-support: @@ -38,119 +35,152 @@ Supported models ================ The following models are pre-optimized for performance on the AMD Instinct MI325X and MI300X accelerators. +Some instructions, commands, and training recommendations in this documentation might +vary by model -- select one to get started. .. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml - {% set unified_docker = data.unified_docker.latest %} + {% set unified_docker = data.dockers[0] %} {% set model_groups = data.model_groups %} - .. raw:: html
-
-
Workload
-
- {% for model_group in model_groups %} -
{{ model_group.group }}
- {% endfor %} -
-
- -
-
Model
-
- {% for model_group in model_groups %} - {% set models = model_group.models %} - {% for model in models %} - {% if models|length % 3 == 0 %} -
{{ model.model }}
- {% else %} -
{{ model.model }}
- {% endif %} +
+
Model
+
+ {% for model_group in model_groups %} +
{{ model_group.group }}
{% endfor %} - {% endfor %} -
-
+
+
+ +
+
Variant
+
+ {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + {% if models|length % 3 == 0 %} +
{{ model.model }}
+ {% else %} +
{{ model.model }}
+ {% endif %} + {% endfor %} + {% endfor %} +
+
- .. note:: - Some models require an external license agreement through a third party (for example, Meta). + .. _amd-pytorch-training-supported-training-modes: - .. _amd-pytorch-training-performance-measurements: + The following table lists supported training modes per model. - Performance measurements - ======================== + .. dropdown:: Supported training modes - To evaluate performance, the + .. list-table:: + :header-rows: 1 + + * - Model + - Supported training modes + + {% for model_group in model_groups %} + {% set models = model_group.models %} + {% for model in models %} + * - {{ model.model }} + - ``{{ model.training_modes | join('``, ``') }}`` + + {% endfor %} + {% endfor %} + + .. note:: + + Some model and fine-tuning combinations are not listed. This is + because the `upstream torchtune repository `__ + doesn't provide default YAML configurations for them. + For advanced usage, you can create a custom configuration to enable + unlisted fine-tuning methods by using an existing file in the + ``/workspace/torchtune/recipes/configs`` directory as a template. + +.. _amd-pytorch-training-performance-measurements: + +Performance measurements +======================== + +To evaluate performance, the +`Performance results with AMD ROCm software `_ +page provides reference throughput and latency measurements for training +popular AI models. + +.. note:: + + The performance data presented in `Performance results with AMD ROCm software `_ - page provides reference throughput and latency measurements for training - popular AI models. + should not be interpreted as the peak performance achievable by AMD + Instinct MI325X and MI300X accelerators or ROCm software. - .. note:: +System validation +================= - The performance data presented in - `Performance results with AMD ROCm software `_ - should not be interpreted as the peak performance achievable by AMD - Instinct MI325X and MI300X accelerators or ROCm software. +Before running AI workloads, it's important to validate that your AMD hardware is configured +correctly and performing optimally. - System validation - ================= +If you have already validated your system settings, including aspects like NUMA auto-balancing, you +can skip this step. Otherwise, complete the procedures in the :ref:`System validation and +optimization ` guide to properly configure your system settings +before starting training. - Before running AI workloads, it's important to validate that your AMD hardware is configured - correctly and performing optimally. +To test for optimal performance, consult the recommended :ref:`System health benchmarks +`. This suite of tests will help you verify and fine-tune your +system's configuration. - If you have already validated your system settings, including aspects like NUMA auto-balancing, you - can skip this step. Otherwise, complete the procedures in the :ref:`System validation and - optimization ` guide to properly configure your system settings - before starting training. +This Docker image is optimized for specific model configurations outlined +below. Performance can vary for other training workloads, as AMD +doesn’t test configurations and run conditions outside those described. - To test for optimal performance, consult the recommended :ref:`System health benchmarks - `. This suite of tests will help you verify and fine-tune your - system's configuration. +Run training +============ - This Docker image is optimized for specific model configurations outlined - below. Performance can vary for other training workloads, as AMD - doesn’t validate configurations and run conditions outside those described. +.. datatemplate:yaml:: /data/how-to/rocm-for-ai/training/pytorch-training-benchmark-models.yaml - Benchmarking - ============ + {% set unified_docker = data.dockers[0] %} + {% set model_groups = data.model_groups %} - Once the setup is complete, choose between two options to start benchmarking: + Once the setup is complete, choose between two options to start benchmarking training: .. tab-set:: .. tab-item:: MAD-integrated benchmarking - Clone the ROCm Model Automation and Dashboarding (``__) repository to a local - directory and install the required packages on the host machine. + 1. Clone the ROCm Model Automation and Dashboarding (``__) repository to a local + directory and install the required packages on the host machine. - .. code-block:: shell + .. code-block:: shell - git clone https://github.com/ROCm/MAD - cd MAD - pip install -r requirements.txt + git clone https://github.com/ROCm/MAD + cd MAD + pip install -r requirements.txt {% for model_group in model_groups %} {% for model in model_group.models %} .. container:: model-doc {{ model.mad_tag }} - For example, use this command to run the performance benchmark test on the {{ model.model }} model - using one GPU with the {{ model.precision }} data type on the host machine. + 2. For example, use this command to run the performance benchmark test on the {{ model.model }} model + using one node with the {{ model.precision }} data type on the host machine. - .. code-block:: shell + .. code-block:: shell - export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" - madengine run \ - --tags {{ model.mad_tag }} \ - --keep-model-dir \ - --live-output \ - --timeout 28800 + export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models" + madengine run \ + --tags {{ model.mad_tag }} \ + --keep-model-dir \ + --live-output \ + --timeout 28800 - MAD launches a Docker container with the name - ``container_ci-{{ model.mad_tag }}``, for example. The latency and throughput reports of the - model are collected in the following path: ``~/MAD/perf.csv``. + MAD launches a Docker container with the name + ``container_ci-{{ model.mad_tag }}``. The latency and throughput reports of the + model are collected in ``~/MAD/perf.csv``. {% endfor %} {% endfor %} @@ -159,222 +189,213 @@ The following models are pre-optimized for performance on the AMD Instinct MI325 .. rubric:: Download the Docker image and required packages - Use the following command to pull the Docker image from Docker Hub. + 1. Use the following command to pull the Docker image from Docker Hub. - .. code-block:: shell + .. code-block:: shell - docker pull {{ unified_docker.pull_tag }} + docker pull {{ unified_docker.pull_tag }} - Run the Docker container. + 2. Run the Docker container. - .. code-block:: shell + .. code-block:: shell - docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $HOME:$HOME -v $HOME/.ssh:/root/.ssh --shm-size 64G --name training_env {{ unified_docker.pull_tag }} + docker run -it \ + --device /dev/dri \ + --device /dev/kfd \ + --network host \ + --ipc host \ + --group-add video \ + --cap-add SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --privileged \ + -v $HOME:$HOME \ + -v $HOME/.ssh:/root/.ssh \ + --shm-size 64G \ + --name training_env \ + {{ unified_docker.pull_tag }} - Use these commands if you exit the ``training_env`` container and need to return to it. + Use these commands if you exit the ``training_env`` container and need to return to it. - .. code-block:: shell + .. code-block:: shell - docker start training_env - docker exec -it training_env bash + docker start training_env + docker exec -it training_env bash - In the Docker container, clone the ``__ - repository and navigate to the benchmark scripts directory - ``/workspace/MAD/scripts/pytorch_train``. + 3. In the Docker container, clone the ``__ + repository and navigate to the benchmark scripts directory + ``/workspace/MAD/scripts/pytorch_train``. - .. code-block:: shell + .. code-block:: shell - git clone https://github.com/ROCm/MAD - cd MAD/scripts/pytorch_train + git clone https://github.com/ROCm/MAD + cd MAD/scripts/pytorch_train .. rubric:: Prepare training datasets and dependencies - The following benchmarking examples require downloading models and datasets - from Hugging Face. To ensure successful access to gated repos, set your - ``HF_TOKEN``. + 1. The following benchmarking examples require downloading models and datasets + from Hugging Face. To ensure successful access to gated repos, set your + ``HF_TOKEN``. - .. code-block:: shell + .. code-block:: shell - export HF_TOKEN=$your_personal_hugging_face_access_token + export HF_TOKEN=$your_personal_hugging_face_access_token - Run the setup script to install libraries and datasets needed for benchmarking. + 2. Run the setup script to install libraries and datasets needed for benchmarking. - .. code-block:: shell + .. code-block:: shell - ./pytorch_benchmark_setup.sh + ./pytorch_benchmark_setup.sh - .. container:: model-doc pyt_train_llama-3.1-8b + .. container:: model-doc pyt_train_llama-3.1-8b - ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 8B: + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 8B: - .. list-table:: - :header-rows: 1 + .. list-table:: + :header-rows: 1 - * - Library - - Reference + * - Library + - Reference - * - ``accelerate`` - - `Hugging Face Accelerate `_ + * - ``accelerate`` + - `Hugging Face Accelerate `_ - * - ``datasets`` - - `Hugging Face Datasets `_ 3.2.0 + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 - .. container:: model-doc pyt_train_llama-3.1-70b + .. container:: model-doc pyt_train_llama-3.1-70b - ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 70B: + ``pytorch_benchmark_setup.sh`` installs the following libraries for Llama 3.1 70B: - .. list-table:: - :header-rows: 1 + .. list-table:: + :header-rows: 1 - * - Library - - Reference + * - Library + - Reference - * - ``datasets`` - - `Hugging Face Datasets `_ 3.2.0 + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 - * - ``torchdata`` - - `TorchData `_ + * - ``torchdata`` + - `TorchData `_ - * - ``tomli`` - - `Tomli `_ + * - ``tomli`` + - `Tomli `_ - * - ``tiktoken`` - - `tiktoken `_ + * - ``tiktoken`` + - `tiktoken `_ - * - ``blobfile`` - - `blobfile `_ + * - ``blobfile`` + - `blobfile `_ - * - ``tabulate`` - - `tabulate `_ + * - ``tabulate`` + - `tabulate `_ - * - ``wandb`` - - `Weights & Biases `_ + * - ``wandb`` + - `Weights & Biases `_ - * - ``sentencepiece`` - - `SentencePiece `_ 0.2.0 + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 - * - ``tensorboard`` - - `TensorBoard `_ 2.18.0 + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 - .. container:: model-doc pyt_train_flux + .. container:: model-doc pyt_train_flux - ``pytorch_benchmark_setup.sh`` installs the following libraries for FLUX: + ``pytorch_benchmark_setup.sh`` installs the following libraries for FLUX: - .. list-table:: - :header-rows: 1 + .. list-table:: + :header-rows: 1 - * - Library - - Reference + * - Library + - Reference - * - ``accelerate`` - - `Hugging Face Accelerate `_ + * - ``accelerate`` + - `Hugging Face Accelerate `_ - * - ``datasets`` - - `Hugging Face Datasets `_ 3.2.0 + * - ``datasets`` + - `Hugging Face Datasets `_ 3.2.0 - * - ``sentencepiece`` - - `SentencePiece `_ 0.2.0 + * - ``sentencepiece`` + - `SentencePiece `_ 0.2.0 - * - ``tensorboard`` - - `TensorBoard `_ 2.18.0 + * - ``tensorboard`` + - `TensorBoard `_ 2.18.0 - * - ``csvkit`` - - `csvkit `_ 2.0.1 + * - ``csvkit`` + - `csvkit `_ 2.0.1 - * - ``deepspeed`` - - `DeepSpeed `_ 0.16.2 + * - ``deepspeed`` + - `DeepSpeed `_ 0.16.2 - * - ``diffusers`` - - `Hugging Face Diffusers `_ 0.31.0 + * - ``diffusers`` + - `Hugging Face Diffusers `_ 0.31.0 - * - ``GitPython`` - - `GitPython `_ 3.1.44 + * - ``GitPython`` + - `GitPython `_ 3.1.44 - * - ``opencv-python-headless`` - - `opencv-python-headless `_ 4.10.0.84 + * - ``opencv-python-headless`` + - `opencv-python-headless `_ 4.10.0.84 - * - ``peft`` - - `PEFT `_ 0.14.0 + * - ``peft`` + - `PEFT `_ 0.14.0 - * - ``protobuf`` - - `Protocol Buffers `_ 5.29.2 + * - ``protobuf`` + - `Protocol Buffers `_ 5.29.2 - * - ``pytest`` - - `PyTest `_ 8.3.4 + * - ``pytest`` + - `PyTest `_ 8.3.4 - * - ``python-dotenv`` - - `python-dotenv `_ 1.0.1 + * - ``python-dotenv`` + - `python-dotenv `_ 1.0.1 - * - ``seaborn`` - - `Seaborn `_ 0.13.2 + * - ``seaborn`` + - `Seaborn `_ 0.13.2 - * - ``transformers`` - - `Transformers `_ 4.47.0 + * - ``transformers`` + - `Transformers `_ 4.47.0 - ``pytorch_benchmark_setup.sh`` downloads the following datasets from Hugging Face: + ``pytorch_benchmark_setup.sh`` downloads the following datasets from Hugging Face: - * `bghira/pseudo-camera-10k `_ + * `bghira/pseudo-camera-10k `_ {% for model_group in model_groups %} {% for model in model_group.models %} - {% if model_group.tag == "pre-training" and model.mad_tag in ["pyt_train_llama-3.1-8b", "pyt_train_llama-3.1-70b", "pyt_train_flux"] %} + {% set training_modes = model.training_modes %} + {% set training_mode_descs = { + "pretrain": "Benchmark pre-training.", + "HF_pretrain": "Llama 3.1 8B pre-training with FP8 precision." + } %} + {% set available_modes = training_modes | select("in", ["pretrain", "HF_pretrain"]) | list %} + {% if available_modes %} .. container:: model-doc {{ model.mad_tag }} - .. rubric:: Pretraining + .. rubric:: Pre-training To start the pre-training benchmark, use the following command with the appropriate options. See the following list of options and their descriptions. .. code-block:: shell - ./pytorch_benchmark_report.sh -t pretrain -m {{ model.model_repo }} -p $datatype -s $sequence_length - - .. list-table:: - :header-rows: 1 - - * - Name - - Options - - Description - - {% if model.mad_tag == "pyt_train_llama-3.1-8b" %} - * - ``$datatype`` - - ``BF16`` or ``FP8`` - - Only Llama 3.1 8B supports FP8 precision. - {% else %} - * - ``$datatype`` - - ``BF16`` - - Only Llama 3.1 8B supports FP8 precision. - {% endif %} - - * - ``$sequence_length`` - - Sequence length for the language model. - - Between 2048 and 8192. 8192 by default. + ./pytorch_benchmark_report.sh -t {% if available_modes | length == 1 %}{{ available_modes[0] }}{% else %}$training_mode{% endif %} \ + -m {{ model.model_repo }} \ + -p $datatype \ + -s $sequence_length {% if model.mad_tag == "pyt_train_flux" %} .. container:: model-doc {{ model.mad_tag }} .. note:: + Currently, FLUX models are not supported out-of-the-box on {{ unified_docker.pull_tag }}. + To use FLUX, refer to the previous version of the ``pytorch-training`` Docker: :doc:`previous-versions/pytorch-training-v25.6` + Occasionally, downloading the Flux dataset might fail. In the event of this error, manually download it from Hugging Face at `black-forest-labs/FLUX.1-dev `_ and save it to `/workspace/FluxBenchmark`. This ensures that the test script can access the required dataset. {% endif %} - {% endif %} - - {% if model_group.tag == "fine-tuning" %} - .. container:: model-doc {{ model.mad_tag }} - - .. rubric:: Fine-tuning - - To start the fine-tuning benchmark, use the following command with the - appropriate options. See the following list of options and their descriptions. - - .. code-block:: shell - - ./pytorch_benchmark_report.sh -t $training_mode -m {{ model.model_repo }} -p BF16 -s $sequence_length .. list-table:: :header-rows: 1 @@ -383,53 +404,143 @@ The following models are pre-optimized for performance on the AMD Instinct MI325 - Options - Description - * - ``$training_mode`` - - ``finetune_fw`` - - Full weight fine-tuning (BF16 supported) - - * - - - ``finetune_lora`` - - LoRA fine-tuning (BF16 supported) - - * - - - ``finetune_qlora`` - - QLoRA fine-tuning (BF16 supported) - - * - - - ``HF_finetune_lora`` - - LoRA fine-tuning with Hugging Face PEFT + {% for mode in available_modes %} + * - {% if loop.first %}``$training_mode``{% endif %} + - ``{{ mode }}`` + - {{ training_mode_descs[mode] }} + {% endfor %} * - ``$datatype`` - - ``BF16`` - - All models support BF16. + - ``BF16``{% if model.mad_tag == "pyt_train_llama-3.1-8b" %} or ``FP8``{% endif %} + - Only Llama 3.1 8B supports FP8 precision. + + * - ``$sequence_length`` + - Sequence length for the language model. + - Between 2048 and 8192. 8192 by default. + {% endif %} + + {% set training_mode_descs = { + "finetune_fw": "Full weight fine-tuning (BF16 and FP8 supported).", + "finetune_lora": "LoRA fine-tuning (BF16 supported).", + "finetune_qlora": "QLoRA fine-tuning (BF16 supported).", + "HF_finetune_lora": "LoRA fine-tuning with Hugging Face PEFT.", + } %} + {% set available_modes = training_modes | select("in", ["finetune_fw", "finetune_lora", "finetune_qlora", "HF_finetune_lora"]) | list %} + {% if available_modes %} + .. container:: model-doc {{ model.mad_tag }} + + .. rubric:: Fine-tuning + + To start the fine-tuning benchmark, use the following command with the + appropriate options. See the following list of options and their descriptions. + See :ref:`supported training modes `. + + .. code-block:: shell + + ./pytorch_benchmark_report.sh -t $training_mode \ + -m {{ model.model_repo }} \ + -p $datatype \ + -s $sequence_length + + .. list-table:: + :header-rows: 1 + + * - Name + - Options + - Description + + {% for mode in available_modes %} + * - {% if loop.first %}``$training_mode``{% endif %} + - ``{{ mode }}`` + - {{ training_mode_descs[mode] }} + {% endfor %} + + * - ``$datatype`` + - ``BF16``{% if "finetune_fw" in available_modes %} or ``FP8``{% endif %} + - All models support BF16.{% if "finetune_fw" in available_modes %} FP8 is only available for full weight fine-tuning.{% endif %} * - ``$sequence_length`` - Between 2048 and 16384. - Sequence length for the language model. + {% if model.mad_tag in ["pyt_train_llama3.2-vision-11b", "pyt_train_llama-3.2-vision-90b"] %} .. note:: - {{ model.model }} currently supports the following fine-tuning methods: + For LoRA and QLoRA support with vision models (Llama 3.2 11B and 90B), + use the following torchtune commit for compatibility: - {% for method in model.training_modes %} - * ``{{ method }}`` - {% endfor %} - {% if model.training_modes|length < 4 %} + .. code-block:: shell + + git checkout 48192e23188b1fc524dd6d127725ceb2348e7f0e + + {% elif model.mad_tag in ["pyt_train_llama-2-7b", "pyt_train_llama-2-13b", "pyt_train_llama-2-70b"] %} + .. note:: + + You might encounter the following error with Llama 2: ``ValueError: seq_len (16384) of + input tensor should be smaller than max_seq_len (4096)``. + This error indicates that an input sequence is longer than the model's maximum context window. + + Ensure your tokenized input does not exceed the model's ``max_seq_len`` (4096 + tokens in this case). You can resolve this by truncating the input or splitting + it into smaller chunks before passing it to the model. + + Note on reproducibility: The results in this guide are based on + commit ``b4c98ac`` from the upstream + ``__ repository. For the + latest updates, you can use the main branch. - The upstream `torchtune `_ repository - does not currently provide YAML configuration files for other combinations of - model to fine-tuning method - However, you can still configure your own YAML files to enable support for - fine-tuning methods not listed here by following existing patterns in the - ``/workspace/torchtune/recipes/configs`` directory. {% endif %} {% endif %} {% endfor %} {% endfor %} - .. rubric:: Benchmarking examples + .. rubric:: Benchmarking examples - For examples of benchmarking commands, see ``__. + For examples of benchmarking commands, see ``__. + +Multi-node training +------------------- + +Pre-training +~~~~~~~~~~~~ + +Multi-node training with torchtitan is supported. The provided SLURM script is pre-configured for Llama 3 70B. + +To launch the training job on a SLURM cluster for Llama 3 70B, run the following commands from the MAD repository. + +.. code-block:: shell + + # In the MAD repository + cd scripts/pytorch_train + sbatch run_slurm_train.sh + +Fine-tuning +~~~~~~~~~~~ + +Multi-node training with torchtune is supported. The provided SLURM script is pre-configured for Llama 3.3 70B. + +To launch the training job on a SLURM cluster for Llama 3.3 70B, run the following commands from the MAD repository. + +.. code-block:: shell + + huggingface-cli login # Get access to HF Llama model space + huggingface-cli download meta-llama/Llama-3.3-70B-Instruct --local-dir ./models/Llama-3.3-70B-Instruct # Download the Llama 3.3 model locally + # In the MAD repository + cd scripts/pytorch_train + sbatch Torchtune_Multinode.sh + +.. note:: + + Information regarding benchmark setup: + + * By default, Llama 3.3 70B is fine-tuned using ``alpaca_dataset``. + * You can adjust the torchtune `YAML configuration file + `__ + if you're using a different model. + * The number of nodes and other parameters can be tuned in the SLURM script ``Torchtune_Multinode.sh``. + * Set the ``mounting_paths`` inside the SLURM script. + +Once the run is finished, you can find the log files in the ``result_torchtune/`` directory. Further reading =============== diff --git a/docs/sphinx/static/css/vllm-benchmark.css b/docs/sphinx/static/css/vllm-benchmark.css index 4c10b1ffb..231bb2cac 100644 --- a/docs/sphinx/static/css/vllm-benchmark.css +++ b/docs/sphinx/static/css/vllm-benchmark.css @@ -7,15 +7,14 @@ html { --compat-head-color: var(--pst-color-surface); --compat-param-hover-color: var(--pst-color-link-hover); --compat-param-selected-color: var(--pst-color-primary); + --compat-border-color: var(--pst-color-border); } html[data-theme="light"] { - --compat-border-color: var(--pst-gray-500); --compat-param-disabled-color: var(--pst-gray-300); } html[data-theme="dark"] { - --compat-border-color: var(--pst-gray-600); --compat-param-disabled-color: var(--pst-gray-600); } @@ -23,6 +22,7 @@ div#vllm-benchmark-ud-params-picker.container-fluid { padding: 0 0 1rem 0; } +div[data-param-k="model-group"], div[data-param-k="model"] { background-color: var(--compat-bg-color); padding: 2px; @@ -31,40 +31,19 @@ div[data-param-k="model"] { cursor: pointer; } +div[data-param-k="model-group"][data-param-state="selected"], div[data-param-k="model"][data-param-state="selected"] { background-color: var(--compat-param-selected-color); color: var(--compat-fg-color); } -div[data-param-k="model"][data-param-state="latest-version"] { - background-color: var(--compat-param-selected-color); - color: var(--compat-fg-color); -} - -div[data-param-k="model"][data-param-state="disabled"] { - background-color: var(--compat-param-disabled-color); - text-decoration: line-through; - /* text-decoration-color: var(--pst-color-danger); */ - cursor: auto; -} - -div[data-param-k="model"]:not([data-param-state]):hover { +div[data-param-k="model-group"]:hover, +div[data-param-k="model"]:hover { background-color: var(--compat-param-hover-color); -} - -div[data-param-k="model-group"] { - background-color: var(--compat-bg-color); - padding: 2px; - border: solid 1px var(--compat-border-color); - font-weight: 500; - cursor: pointer; -} - -div[data-param-k="model-group"][data-param-state="selected"] { - background-color: var(--compat-param-selected-color); color: var(--compat-fg-color); } +/* div[data-param-k="model-group"][data-param-state="latest-version"] { background-color: var(--compat-param-selected-color); color: var(--compat-fg-color); @@ -73,26 +52,19 @@ div[data-param-k="model-group"][data-param-state="latest-version"] { div[data-param-k="model-group"][data-param-state="disabled"] { background-color: var(--compat-param-disabled-color); text-decoration: line-through; - /* text-decoration-color: var(--pst-color-danger); */ + text-decoration-color: var(--pst-color-danger); cursor: auto; } - -div[data-param-k="model-group"]:not([data-param-state]):hover { - background-color: var(--compat-param-hover-color); -} +*/ .model-param-head { background-color: var(--compat-head-color); padding: 0.15rem 0.15rem 0.15rem 0.67rem; - /* margin: 2px; */ - border-right: solid 2px var(--compat-accent-color); + border-right: solid 4px var(--compat-accent-color); font-weight: 600; } .model-param { - /* padding: 2px; */ - /* margin: 0 2px 0 2px; */ - /* margin: 2px; */ border: solid 1px var(--compat-border-color); font-weight: 500; }