Compare commits
2 Commits
amd/hsivas
...
roc-5.7.x
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
497a5a26d8 | ||
|
|
a0d75e9720 |
9
.github/CODEOWNERS
vendored
Executable file → Normal file
@@ -1,8 +1 @@
|
||||
* @amd-aakash @jlgreathouse @samjwu @yhuiYH @ROCm/rocm-documentation
|
||||
# Documentation files
|
||||
docs/ @amd-aakash @jlgreathouse @samjwu @yhuiYH @ROCm/rocm-documentation
|
||||
*.md @amd-aakash @jlgreathouse @samjwu @yhuiYH @ROCm/rocm-documentation
|
||||
*.rst @amd-aakash @jlgreathouse @samjwu @yhuiYH @ROCm/rocm-documentation
|
||||
# External CI
|
||||
/.azuredevops/ @ROCm/external-ci
|
||||
tools/rocm-build/ @ROCm/rocm-devops
|
||||
* @saadrahim @Rmalavally @amd-aakash @zhang2amd @jlgreathouse @samjwu @MathiasMagnus @LisaDelaney
|
||||
|
||||
76
.github/ISSUE_TEMPLATE/0_issue_report.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Issue Report
|
||||
description: File a report for something not working correctly.
|
||||
title: "[Issue]: "
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to fill out this report!
|
||||
|
||||
On a Linux system, you can acquire your OS, CPU, GPU, and ROCm version (for filling out this report) with the following commands:
|
||||
echo "OS:" && cat /etc/os-release | grep -E "^(NAME=|VERSION=)";
|
||||
echo "CPU: " && cat /proc/cpuinfo | grep "model name" | sort --unique;
|
||||
echo "GPU:" && /opt/rocm/bin/rocminfo | grep -E "^\s*(Name|Marketing Name)";
|
||||
echo "ROCm in /opt:" && ls -1 /opt | grep -E "rocm-";
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Problem Description
|
||||
description: Describe the issue you encountered.
|
||||
placeholder: "The steps to reproduce can be included here, or in the dedicated section further below."
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Operating System
|
||||
description: What is the name and version number of the OS?
|
||||
placeholder: "e.g. Ubuntu 22.04.3 LTS (Jammy Jellyfish)"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: CPU
|
||||
description: What CPU did you encounter the issue on?
|
||||
placeholder: "e.g. AMD Ryzen 9 5900HX with Radeon Graphics"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: GPU
|
||||
description: What GPU(s) did you encounter the issue on?
|
||||
placeholder: "e.g. MI200"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: ROCm Version
|
||||
description: What version(s) of ROCm did you encounter the issue on?
|
||||
placeholder: "e.g. 5.7.0"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: ROCm Component
|
||||
description: (Optional) If this issue relates to a specific ROCm component, it can be mentioned here.
|
||||
placeholder: "e.g. rocBLAS"
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: (Optional) Detailed steps to reproduce the issue.
|
||||
placeholder: Please also include what you expected to happen, and what actually did, at the failing step(s).
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Output of /opt/rocm/bin/rocminfo --support
|
||||
description: The output of rocminfo --support will help to better address the problem.
|
||||
placeholder: |
|
||||
ROCk module is loaded
|
||||
=====================
|
||||
HSA System Attributes
|
||||
=====================
|
||||
[...]
|
||||
validations:
|
||||
required: true
|
||||
32
.github/ISSUE_TEMPLATE/1_feature_request.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Feature Suggestion
|
||||
description: Suggest an additional functionality, or new way of handling an existing functionality.
|
||||
title: "[Feature]: "
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to make a suggestion!
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Suggestion Description
|
||||
description: Describe your suggestion.
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Operating System
|
||||
description: (Optional) If this is for a specific OS, you can mention it here.
|
||||
placeholder: "e.g. Ubuntu"
|
||||
- type: input
|
||||
attributes:
|
||||
label: GPU
|
||||
description: (Optional) If this is for a specific GPU or GPU family, you can mention it here.
|
||||
placeholder: "e.g. MI200"
|
||||
- type: input
|
||||
attributes:
|
||||
label: ROCm Component
|
||||
description: (Optional) If this issue relates to a specific ROCm component, it can be mentioned here.
|
||||
placeholder: "e.g. rocBLAS"
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: ROCm Community Discussions
|
||||
url: https://github.com/RadeonOpenCompute/ROCm/discussions
|
||||
about: Please ask and answer questions here for anything ROCm.
|
||||
22
.github/workflows/issue_retrieval.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Issue retrieval
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, edited]
|
||||
|
||||
jobs:
|
||||
auto-retrieve:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate_token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app_id: ${{ secrets.ACTION_APP_ID }}
|
||||
private_key: ${{ secrets.ACTION_PEM }}
|
||||
- name: 'Retrieve Issue'
|
||||
uses: harkgill-amd/rocm_issue_management@main
|
||||
with:
|
||||
authentication-token: ${{ steps.generate_token.outputs.token }}
|
||||
github-organization: 'ROCm'
|
||||
project-num: '6'
|
||||
6
.github/workflows/linting.yml
vendored
@@ -2,13 +2,13 @@ name: Linting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- 'docs/*'
|
||||
- 'roc**'
|
||||
pull_request:
|
||||
branches:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- 'docs/*'
|
||||
@@ -17,4 +17,4 @@ on:
|
||||
jobs:
|
||||
call-workflow-passing-data:
|
||||
name: Documentation
|
||||
uses: ROCm/rocm-docs-core/.github/workflows/linting.yml@develop
|
||||
uses: RadeonOpenCompute/rocm-docs-core/.github/workflows/linting.yml@develop
|
||||
|
||||
6
.gitignore
vendored
@@ -1,21 +1,19 @@
|
||||
.venv
|
||||
.vscode
|
||||
build
|
||||
__pycache__
|
||||
|
||||
# documentation artifacts
|
||||
_build/
|
||||
_images/
|
||||
__pycache__/
|
||||
_static/
|
||||
_templates/
|
||||
_toc.yml
|
||||
docBin/
|
||||
_doxygen/
|
||||
_readthedocs/
|
||||
__pycache__/
|
||||
|
||||
# avoid duplicating contributing.md due to conf.py
|
||||
docs/CHANGELOG.md
|
||||
docs/contribute/index.md
|
||||
docs/about/release-notes.md
|
||||
docs/release/changelog.md
|
||||
docs/about/CHANGELOG.md
|
||||
@@ -12,5 +12,7 @@ config:
|
||||
MD041: false
|
||||
MD051: false
|
||||
ignores:
|
||||
- "{,docs/}{RELEASE,release,CHANGELOG,changelog}.md"
|
||||
- CHANGELOG.md
|
||||
- docs/CHANGELOG.md
|
||||
- "{,docs/}{RELEASE,release}.md"
|
||||
- tools/autotag/templates/**/*.md
|
||||
|
||||
@@ -6,17 +6,13 @@ version: 2
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
formats: [htmlzip]
|
||||
formats: [htmlzip, pdf]
|
||||
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/sphinx/requirements.txt
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
os: ubuntu-20.04
|
||||
tools:
|
||||
python: "3.10"
|
||||
apt_packages:
|
||||
- "doxygen"
|
||||
- "gfortran" # For pre-processing fortran sources
|
||||
- "graphviz" # For dot graphs in doxygen
|
||||
python: "3.8"
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
matrix:
|
||||
- name: Markdown
|
||||
sources:
|
||||
- ['tools/autotag/templates/**/*.md', '!tools/autotag/templates/**/5*.md', '!tools/autotag/templates/**/6.0*.md', '!tools/autotag/templates/**/6.1*.md']
|
||||
- name: reST
|
||||
sources:
|
||||
- []
|
||||
- name: Cpp
|
||||
sources:
|
||||
- []
|
||||
1197
.wordlist.txt
13270
CHANGELOG.md
@@ -1,40 +0,0 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2023 Advanced Micro Devices, Inc. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
cmake_minimum_required(VERSION 3.18.0)
|
||||
|
||||
project(ROCm VERSION 5.7.1 LANGUAGES NONE)
|
||||
|
||||
option(BUILD_DOCS "Build ROCm documentation" ON)
|
||||
|
||||
include(GNUInstallDirs)
|
||||
|
||||
# Adding default path cmake modules
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
|
||||
|
||||
# Handle dependencies
|
||||
include(Dependencies)
|
||||
|
||||
# Build docs
|
||||
if(BUILD_DOCS)
|
||||
add_subdirectory(docs)
|
||||
endif()
|
||||
278
CONTRIBUTING.md
@@ -1,93 +1,229 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="Contributing to ROCm">
|
||||
<meta name="keywords" content="ROCm, contributing, contribute, maintainer, contributor">
|
||||
</head>
|
||||
# Contributing to ROCm documentation
|
||||
|
||||
# Contribute to ROCm
|
||||
AMD values and encourages contributions to our code and documentation. If you choose to
|
||||
contribute, we encourage you to be polite and respectful. Improving documentation is a long-term
|
||||
process, to which we are dedicated.
|
||||
|
||||
AMD values and encourages contributions to our code and documentation. If you want to contribute
|
||||
to our ROCm repositories, first review the following guidance. For documentation-specific information,
|
||||
see [Contributing to ROCm docs](https://rocm.docs.amd.com/en/latest/contribute/contributing.html).
|
||||
If you have issues when trying to contribute, refer to the
|
||||
[discussions](https://github.com/RadeonOpenCompute/ROCm/discussions) page in our GitHub
|
||||
repository.
|
||||
|
||||
ROCm is a software stack made up of a collection of drivers, development tools, and APIs that enable
|
||||
GPU programming from low-level kernel to end-user applications. Because some of our components
|
||||
are inherited from external projects (such as
|
||||
[LLVM](https://github.com/ROCm/llvm-project) and
|
||||
[Kernel driver](https://github.com/ROCm/ROCK-Kernel-Driver)), these use
|
||||
project-specific contribution guidelines and workflow. Refer to their repositories for more information.
|
||||
All other ROCm components follow the workflow described in the following sections.
|
||||
## Folder structure and naming convention
|
||||
|
||||
## Development workflow
|
||||
Our documentation follows the Pitchfork folder structure. Most documentation files are stored in the
|
||||
`/docs` folder. Some special files (such as release, contributing, and changelog) are stored in the root
|
||||
(`/`) folder.
|
||||
|
||||
ROCm uses GitHub to host code, collaborate, and manage version control. We use pull requests (PRs)
|
||||
for all changes within our repositories. We use
|
||||
[GitHub issues](https://github.com/ROCm/ROCm/issues) to track known issues, such as
|
||||
bugs.
|
||||
All images are stored in the `/docs/data` folder. An image's file path mirrors that of the documentation
|
||||
file where it is used.
|
||||
|
||||
### Issue tracking
|
||||
Our naming structure uses kebab case; for example, `my-file-name.rst`.
|
||||
|
||||
Before filing a new issue, search the
|
||||
[existing issues](https://github.com/ROCm/ROCm/issues) to make sure your issue isn't
|
||||
already listed.
|
||||
## Supported formats and syntax
|
||||
|
||||
General issue guidelines:
|
||||
Our documentation includes both Markdown and RST files. We are gradually transitioning existing
|
||||
Markdown to RST in order to more effectively meet our documentation needs. When contributing,
|
||||
RST is preferred; if you must use Markdown, use GitHub-flavored Markdown.
|
||||
|
||||
* Use your best judgement for issue creation. If your issue is already listed, upvote the issue and
|
||||
comment or post to provide additional details, such as how you reproduced this issue.
|
||||
* If you're not sure if your issue is the same, err on the side of caution and file your issue.
|
||||
You can add a comment to include the issue number (and link) for the similar issue. If we evaluate
|
||||
your issue as being the same as the existing issue, we'll close the duplicate.
|
||||
* If your issue doesn't exist, use the issue template to file a new issue.
|
||||
* When filing an issue, be sure to provide as much information as possible, including script output so
|
||||
we can collect information about your configuration. This helps reduce the time required to
|
||||
reproduce your issue.
|
||||
* Check your issue regularly, as we may require additional information to successfully reproduce the
|
||||
issue.
|
||||
We use [Sphinx Design](https://sphinx-design.readthedocs.io/en/latest/index.html) syntax and compile
|
||||
our API references using [Doxygen](https://www.doxygen.nl/).
|
||||
|
||||
### Pull requests
|
||||
The following table shows some common documentation components and the syntax convention we
|
||||
use for each:
|
||||
|
||||
When you create a pull request, you should target the default branch. Our repositories typically use the **develop** branch as the default integration branch.
|
||||
<table>
|
||||
<tr>
|
||||
<th>Component</th>
|
||||
<th>RST syntax</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Code blocks</td>
|
||||
<td>
|
||||
|
||||
When creating a PR, use the following process. Note that each repository may include additional,
|
||||
project-specific steps. Refer to each repository's PR process for any additional steps.
|
||||
```rst
|
||||
|
||||
* Identify the issue you want to fix
|
||||
* Target the default branch (usually the **develop** branch) for integration
|
||||
* Ensure your code builds successfully
|
||||
* Each component has a suite of test cases to run; include the log of the successful test run in your PR
|
||||
* Do not break existing test cases
|
||||
* New functionality is only merged with new unit tests
|
||||
* If your PR includes a new feature, you must provide an application or test so we can ensure that the
|
||||
feature works and continues to be valid in the future
|
||||
* Tests must have good code coverage
|
||||
* Submit your PR and work with the reviewer or maintainer to get your PR approved
|
||||
* Once approved, the PR is brought onto internal CI systems and may be merged into the component
|
||||
during our release cycle, as coordinated by the maintainer
|
||||
* We'll inform you once your change is committed
|
||||
.. code-block:: language-name
|
||||
|
||||
> [!IMPORTANT]
|
||||
> By creating a PR, you agree to allow your contribution to be licensed under the
|
||||
> terms of the LICENSE.txt file in the corresponding repository. Different repositories may use different
|
||||
> licenses.
|
||||
My code block.
|
||||
|
||||
You can look up each license on the [ROCm licensing](https://rocm.docs.amd.com/en/latest/about/license.html) page.
|
||||
|
||||
### New feature development
|
||||
```
|
||||
|
||||
Use the [GitHub Discussion forum](https://github.com/ROCm/ROCm/discussions)
|
||||
(Ideas category) to propose new features. Our maintainers are happy to provide direction and
|
||||
feedback on feature development.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Cross-referencing internal files</td>
|
||||
<td>
|
||||
|
||||
### Documentation
|
||||
```rst
|
||||
|
||||
Submit ROCm documentation changes to our
|
||||
[documentation repository](https://github.com/ROCm/ROCm). You must update
|
||||
documentation related to any new feature or API contribution.
|
||||
:doc:`Title <../path/to/file/filename>`
|
||||
|
||||
Note that each ROCm project uses its own repository for documentation.
|
||||
```
|
||||
|
||||
## Future development workflow
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>External links</td>
|
||||
<td>
|
||||
|
||||
The current ROCm development workflow is GitHub-based. If, in the future, we change this platform,
|
||||
the tools and links may change. In this instance, we will update contribution guidelines accordingly.
|
||||
```rst
|
||||
|
||||
`link name <URL>`_
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Headings</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
******************
|
||||
Chapter title (H1)
|
||||
******************
|
||||
|
||||
Section title (H2)
|
||||
===============
|
||||
|
||||
Subsection title (H3)
|
||||
---------------------
|
||||
|
||||
Sub-subsection title (H4)
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Images</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
.. image:: image1.png
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Internal links</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
1. Add a tag to the section you want to reference:
|
||||
|
||||
.. _my-section-tag: section-1
|
||||
|
||||
Section 1
|
||||
==========
|
||||
|
||||
2. Link to your tag:
|
||||
|
||||
As shown in :ref:`section-1`.
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Lists</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
# Ordered (numbered) list item
|
||||
|
||||
* Unordered (bulleted) list item
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Math (block)</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
.. math::
|
||||
|
||||
A = \begin{pmatrix}
|
||||
0.0 & 1.0 & 1.0 & 3.0 \\
|
||||
4.0 & 5.0 & 6.0 & 7.0 \\
|
||||
\end{pmatrix}
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Math (inline)</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
:math:`2 \times 2 `
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Notes</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
.. note::
|
||||
|
||||
My note here.
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Tables</td>
|
||||
<td>
|
||||
|
||||
```rst
|
||||
|
||||
.. csv-table:: Optional title here
|
||||
:widths: 30, 70 #optional column widths
|
||||
:header: "entry1 header", "entry2 header"
|
||||
|
||||
"entry1", "entry2"
|
||||
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Language and style
|
||||
|
||||
We use the
|
||||
[Google developer documentation style guide](https://developers.google.com/style/highlights) to
|
||||
guide our content.
|
||||
|
||||
Font size and type, page layout, white space control, and other formatting
|
||||
details are controlled via
|
||||
[rocm-docs-core](https://github.com/RadeonOpenCompute/rocm-docs-core). If you want to notify us
|
||||
of any formatting issues, create a pull request in our
|
||||
[rocm-docs-core](https://github.com/RadeonOpenCompute/rocm-docs-core) GitHub repository.
|
||||
|
||||
## Building our documentation
|
||||
|
||||
<!-- % TODO: Fix the link to be able to work at every files -->
|
||||
To learn how to build our documentation, refer to
|
||||
[Building documentation](./building.md).
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="ROCm governance model">
|
||||
<meta name="keywords" content="ROCm, governance">
|
||||
</head>
|
||||
|
||||
# Governance model
|
||||
|
||||
ROCm is a software stack made up of a collection of drivers, development tools, and APIs that enable
|
||||
GPU programming from the low-level kernel to end-user applications.
|
||||
|
||||
Components of ROCm that are inherited from external projects (such as
|
||||
[LLVM](https://github.com/ROCm/llvm-project) and
|
||||
[Kernel driver](https://github.com/ROCm/ROCK-Kernel-Driver)) follow their own
|
||||
governance model and code of conduct. All other components of ROCm are governed by this
|
||||
document.
|
||||
|
||||
## Governance
|
||||
|
||||
ROCm is led and managed by AMD.
|
||||
|
||||
We welcome contributions from the community. Our maintainers review all proposed changes to
|
||||
ROCm.
|
||||
|
||||
## Roles
|
||||
|
||||
* **Maintainers** are responsible for their designated component and repositories.
|
||||
* **Contributors** provide input and suggest changes to existing components.
|
||||
|
||||
### Maintainers
|
||||
|
||||
Maintainers are appointed by AMD. They are able to approve changes and can commit to our
|
||||
repositories. They must use pull requests (PRs) for all changes.
|
||||
|
||||
You can find the list of maintainers in the CODEOWNERS file of each repository. Code owners differ
|
||||
between repositories.
|
||||
|
||||
### Contributors
|
||||
|
||||
If you're not a maintainer, you're a contributor. We encourage the ROCm community to contribute in
|
||||
several ways:
|
||||
|
||||
* Help other community members by posting questions or solutions on our
|
||||
[GitHub discussion forums](https://github.com/ROCm/ROCm/discussions)
|
||||
* Notify us of a bugs by filing an issue report on
|
||||
[GitHub Issues](https://github.com/ROCm/ROCm/issues)
|
||||
* Improve our documentation by submitting a PR to our
|
||||
[repository](https://github.com/ROCm/ROCm/)
|
||||
* Improve the code base (for smaller or contained changes) by submitting a PR to the component
|
||||
* Suggest larger features by adding to the *Ideas* category in the
|
||||
[GitHub discussion forum](https://github.com/ROCm/ROCm/discussions)
|
||||
|
||||
For more information, refer to our [contribution guidelines](CONTRIBUTING.md).
|
||||
|
||||
## Code of conduct
|
||||
|
||||
To engage with any AMD ROCm component that is hosted on GitHub, you must abide by the
|
||||
[GitHub community guidelines](https://docs.github.com/en/site-policy/github-terms/github-community-guidelines)
|
||||
and the
|
||||
[GitHub community code of conduct](https://docs.github.com/en/site-policy/github-terms/github-community-code-of-conduct).
|
||||
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 - 2025 Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (c) 2023 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
35
README.md
@@ -1,4 +1,4 @@
|
||||
# AMD ROCm Software
|
||||
# AMD ROCm™ platform
|
||||
|
||||
ROCm is an open-source stack, composed primarily of open-source software, designed for graphics
|
||||
processing unit (GPU) computation. ROCm consists of a collection of drivers, development tools, and
|
||||
@@ -10,7 +10,7 @@ ecosystem. ROCm is particularly well-suited to GPU-accelerated high-performance
|
||||
artificial intelligence (AI), scientific computing, and computer aided design (CAD).
|
||||
|
||||
ROCm is powered by AMD’s
|
||||
[Heterogeneous-computing Interface for Portability (HIP)](https://github.com/ROCm/HIP),
|
||||
[Heterogeneous-computing Interface for Portability (HIP)](https://github.com/ROCm-Developer-Tools/HIP),
|
||||
an open-source software C++ GPU programming environment and its corresponding runtime. HIP
|
||||
allows ROCm developers to create portable applications on different platforms by deploying code on a
|
||||
range of platforms, from dedicated gaming GPUs to exascale HPC clusters.
|
||||
@@ -19,31 +19,32 @@ ROCm supports programming models, such as OpenMP and OpenCL, and includes all ne
|
||||
source software compilers, debuggers, and libraries. ROCm is fully integrated into machine learning
|
||||
(ML) frameworks, such as PyTorch and TensorFlow.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> A new open source build platform for ROCm is under development at
|
||||
> https://github.com/ROCm/TheRock, featuring a unified CMake build with bundled
|
||||
> dependencies, Windows support, and more.
|
||||
|
||||
## Getting and Building ROCm from Source
|
||||
|
||||
Please use [TheRock](https://github.com/ROCm/TheRock) build system to build ROCm from source.
|
||||
|
||||
## ROCm documentation
|
||||
|
||||
This repository contains the [manifest file](https://gerrit.googlesource.com/git-repo/+/HEAD/docs/manifest-format.md)
|
||||
for ROCm releases, changelogs, and release information.
|
||||
This repository contains the manifest file for ROCm releases, changelogs, and release information.
|
||||
|
||||
The `default.xml` file contains information for all repositories and the associated commit used to build
|
||||
the current ROCm release; `default.xml` uses the [Manifest Format repository](https://gerrit.googlesource.com/git-repo/).
|
||||
the current ROCm release; `default.xml` uses the Manifest Format repository.
|
||||
|
||||
Source code for our documentation is located in the `/docs` folder of most ROCm repositories. The
|
||||
`develop` branch of our repositories contains content for the next ROCm release.
|
||||
|
||||
The ROCm documentation homepage is [rocm.docs.amd.com](https://rocm.docs.amd.com).
|
||||
|
||||
For information on how to contribute to the ROCm documentation, see [Contributing to the ROCm documentation](https://rocm.docs.amd.com/en/latest/contribute/contributing.html).
|
||||
### Building our documentation
|
||||
|
||||
For a quick-start build, use the following code. For more options and detail, refer to
|
||||
[Building documentation](./contribute/building.md).
|
||||
|
||||
```bash
|
||||
cd docs
|
||||
|
||||
pip3 install -r sphinx/requirements.txt
|
||||
|
||||
python3 -m sphinx -T -E -b html -d _build/doctrees -D language=en . _build/html
|
||||
```
|
||||
|
||||
## Older ROCm releases
|
||||
|
||||
For release information for older ROCm releases, refer to the
|
||||
[ROCm release history](https://rocm.docs.amd.com/en/latest/release/versions.html).
|
||||
For release information for older ROCm releases, refer to
|
||||
[`CHANGELOG`](./CHANGELOG.md).
|
||||
|
||||
1534
RELEASE.md
@@ -1,47 +0,0 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2023 Advanced Micro Devices, Inc. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# ###########################
|
||||
# ROCm dependencies
|
||||
# ###########################
|
||||
|
||||
include(FetchContent)
|
||||
|
||||
if(BUILD_DOCS)
|
||||
find_package(ROCM 0.11.0 CONFIG QUIET PATHS "${ROCM_PATH}") # First version with Sphinx doc gen improvement
|
||||
if(NOT ROCM_FOUND)
|
||||
message(STATUS "ROCm CMake not found. Fetching...")
|
||||
set(rocm_cmake_tag
|
||||
"c044bb52ba85058d28afe2313be98d9fed02e293" # develop@2023.09.12. (move to 6.0 tag when released)
|
||||
CACHE STRING "rocm-cmake tag to download")
|
||||
FetchContent_Declare(
|
||||
rocm-cmake
|
||||
GIT_REPOSITORY https://github.com/ROCm/rocm-cmake.git
|
||||
GIT_TAG ${rocm_cmake_tag}
|
||||
SOURCE_SUBDIR "DISABLE ADDING TO BUILD" # We don't really want to consume the build and test targets of ROCm CMake.
|
||||
)
|
||||
FetchContent_MakeAvailable(rocm-cmake)
|
||||
find_package(ROCM CONFIG REQUIRED NO_DEFAULT_PATH PATHS "${rocm-cmake_SOURCE_DIR}")
|
||||
else()
|
||||
find_package(ROCM 0.11.0 CONFIG REQUIRED PATHS "${ROCM_PATH}")
|
||||
endif()
|
||||
endif()
|
||||
99
default.xml
@@ -1,44 +1,79 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<manifest>
|
||||
<remote name="rocm-org" fetch="https://github.com/ROCm/" />
|
||||
<default revision="refs/tags/rocm-7.2.0"
|
||||
remote="rocm-org"
|
||||
<remote name="roc-github"
|
||||
fetch="https://github.com/RadeonOpenCompute/" />
|
||||
<remote name="rocm-devtools"
|
||||
fetch="https://github.com/ROCm-Developer-Tools/" />
|
||||
<remote name="rocm-swplat"
|
||||
fetch="https://github.com/ROCmSoftwarePlatform/" />
|
||||
<remote name="gpuopen-libs"
|
||||
fetch="https://github.com/GPUOpen-ProfessionalCompute-Libraries/" />
|
||||
<remote name="gpuopen-tools"
|
||||
fetch="https://github.com/GPUOpen-Tools/" />
|
||||
<remote name="KhronosGroup"
|
||||
fetch="https://github.com/KhronosGroup/" />
|
||||
<default revision="refs/tags/rocm-5.7.1"
|
||||
remote="roc-github"
|
||||
sync-c="true"
|
||||
sync-j="4" />
|
||||
<!--list of projects for ROCm-->
|
||||
<!--list of projects for ROCM-->
|
||||
<project name="ROCK-Kernel-Driver" />
|
||||
<project name="ROCT-Thunk-Interface" />
|
||||
<project name="ROCR-Runtime" />
|
||||
<project name="amdsmi" />
|
||||
<project name="rocm_smi_lib" />
|
||||
<project name="rocm-core" />
|
||||
<project name="rocm-cmake" />
|
||||
<project name="rocminfo" />
|
||||
<project name="rocm_bandwidth_test" />
|
||||
<project name="rocm-examples" />
|
||||
<project name="rocprofiler" remote="rocm-devtools" />
|
||||
<project name="roctracer" remote="rocm-devtools" />
|
||||
<project path="ROCm-OpenCL-Runtime/api/opencl/khronos/icd" name="OpenCL-ICD-Loader" remote="KhronosGroup" revision="6c03f8b58fafd9dd693eaac826749a5cfad515f8" />
|
||||
<project name="clang-ocl" />
|
||||
<project name="rdc" />
|
||||
<!--HIP Projects-->
|
||||
<project name="HIPIFY" />
|
||||
<project name="HIP" remote="rocm-devtools" />
|
||||
<project name="HIP-Examples" remote="rocm-devtools" />
|
||||
<project name="clr" remote="rocm-devtools" />
|
||||
<project name="HIPIFY" remote="rocm-devtools" />
|
||||
<project name="HIPCC" remote="rocm-devtools" />
|
||||
<!-- The following projects are all associated with the AMDGPU LLVM compiler -->
|
||||
<project name="half" />
|
||||
<project name="llvm-project" />
|
||||
<project name="spirv-llvm-translator" />
|
||||
<project name="ROCm-Device-Libs" />
|
||||
<project name="ROCm-CompilerSupport" />
|
||||
<project name="half" remote="rocm-swplat" revision="37742ce15b76b44e4b271c1e66d13d2fa7bd003e" />
|
||||
<!-- gdb projects -->
|
||||
<project name="ROCdbgapi" />
|
||||
<project name="ROCgdb" />
|
||||
<project name="rocr_debug_agent" />
|
||||
<project name="ROCgdb" remote="rocm-devtools" />
|
||||
<project name="ROCdbgapi" remote="rocm-devtools" />
|
||||
<project name="rocr_debug_agent" remote="rocm-devtools" />
|
||||
<!-- ROCm Libraries -->
|
||||
<project groups="mathlibs" name="AMDMIGraphX" />
|
||||
<project groups="mathlibs" name="MIVisionX" />
|
||||
<project groups="mathlibs" name="ROCmValidationSuite" />
|
||||
<project groups="mathlibs" name="composable_kernel" />
|
||||
<project groups="mathlibs" name="hipfort" />
|
||||
<project groups="mathlibs" name="rccl" />
|
||||
<project groups="mathlibs" name="rocAL" />
|
||||
<project groups="mathlibs" name="rocALUTION" />
|
||||
<project groups="mathlibs" name="rocDecode" />
|
||||
<project groups="mathlibs" name="rocJPEG" />
|
||||
<project groups="mathlibs" name="rocm-libraries" />
|
||||
<project groups="mathlibs" name="rocm-systems" />
|
||||
<project groups="mathlibs" name="rocPyDecode" />
|
||||
<project groups="mathlibs" name="rocSHMEM" />
|
||||
<project groups="mathlibs" name="rocm-cmake" />
|
||||
<project groups="mathlibs" name="rpp" />
|
||||
<project groups="mathlibs" name="TransferBench" />
|
||||
<project groups="mathlibs" name="rocBLAS" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="Tensile" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="hipTensor" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="hipBLAS" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocFFT" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="hipFFT" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocRAND" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocSPARSE" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocSOLVER" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="hipSOLVER" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="hipSPARSE" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocALUTION" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocThrust" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="hipCUB" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocPRIM" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rocWMMA" remote="rocm-swplat" />
|
||||
<project groups="mathlibs" name="rccl" remote="rocm-swplat" />
|
||||
<project name="rocMLIR" remote="rocm-swplat" />
|
||||
<project name="MIOpen" remote="rocm-swplat" />
|
||||
<project name="composable_kernel" remote="rocm-swplat" />
|
||||
<project name="MIVisionX" remote="gpuopen-libs" />
|
||||
<project name="rpp" remote="gpuopen-libs" />
|
||||
<project name="hipfort" remote="rocm-swplat" />
|
||||
<project name="AMDMIGraphX" remote="rocm-swplat" />
|
||||
<project name="ROCmValidationSuite" remote="rocm-devtools" />
|
||||
<!-- Projects for OpenMP-Extras -->
|
||||
<project name="aomp" path="openmp-extras/aomp" />
|
||||
<project name="aomp-extras" path="openmp-extras/aomp-extras" />
|
||||
<project name="flang" path="openmp-extras/flang" />
|
||||
</manifest>
|
||||
<project name="aomp" path="openmp-extras/aomp" remote="rocm-devtools" />
|
||||
<project name="aomp-extras" path="openmp-extras/aomp-extras" remote="rocm-devtools" />
|
||||
<project name="flang" path="openmp-extras/flang" remote="rocm-devtools" />
|
||||
</manifest>
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2023 Advanced Micro Devices, Inc. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
include(ROCMSphinxDoc)
|
||||
|
||||
rocm_add_sphinx_doc(
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
OUTPUT_DIR html
|
||||
BUILDER html
|
||||
)
|
||||
|
||||
install(
|
||||
DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/html"
|
||||
DESTINATION "${CMAKE_INSTALL_DOCDIR}")
|
||||
63
docs/about/compatibility/3rd-party-support-matrix.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Third party support matrix
|
||||
|
||||
ROCm™ supports various 3rd party libraries and frameworks. Supported versions
|
||||
are tested and known to work. Non-supported versions of 3rd parties may also
|
||||
work, but aren't tested.
|
||||
|
||||
## Deep learning
|
||||
|
||||
ROCm releases support the most recent and two prior releases of PyTorch and
|
||||
TensorFlow.
|
||||
|
||||
| ROCm | [PyTorch](https://github.com/pytorch/pytorch/releases/) | [TensorFlow](https://github.com/tensorflow/tensorflow/releases/) |
|
||||
|:------|:--------------------------:|:--------------------:|
|
||||
| 5.0.2 | 1.8, 1.9, 1.10 | 2.6, 2.7, 2.8 |
|
||||
| 5.1.3 | 1.9, 1.10, 1.11 | 2.7, 2.8, 2.9 |
|
||||
| 5.2.x | 1.10, 1.11, 1.12 | 2.8, 2.9, 2.9 |
|
||||
| 5.3.x | 1.10.1, 1.11, 1.12.1, 1.13 | 2.8, 2.9, 2.10 |
|
||||
| 5.4.x | 1.10.1, 1.11, 1.12.1, 1.13 | 2.8, 2.9, 2.10, 2.11 |
|
||||
| 5.5.x | 1.10.1, 1.11, 1.12.1, 1.13 | 2.10, 2.11, 2.13 |
|
||||
| 5.6.x | 1.12.1, 1.13, 2.0 | 2.12, 2.13 |
|
||||
| 5.7.x | 1.12.1, 1.13, 2.0 | 2.12, 2.13 |
|
||||
|
||||
(communication-libraries)=
|
||||
|
||||
## Communication libraries
|
||||
|
||||
ROCm supports [OpenUCX](https://openucx.org/), an open-source,
|
||||
production-grade communication framework for data-centric and high performance
|
||||
applications.
|
||||
|
||||
UCX version | ROCm 5.4 and older | ROCm 5.5 and newer |
|
||||
|:----------|:------------------:|:------------------:|
|
||||
| -1.14.0 | COMPATIBLE | INCOMPATIBLE |
|
||||
| 1.14.1+ | COMPATIBLE | COMPATIBLE |
|
||||
|
||||
The Unified Collective Communication ([UCC](https://github.com/openucx/ucc)) library also has
|
||||
support for ROCm devices.
|
||||
|
||||
UCC version | ROCm 5.5 and older | ROCm 5.6 and newer |
|
||||
|:----------|:------------------:|:------------------:|
|
||||
| -1.1.0 | COMPATIBLE | INCOMPATIBLE |
|
||||
| 1.2.0+ | COMPATIBLE | COMPATIBLE |
|
||||
|
||||
## Algorithm libraries
|
||||
|
||||
ROCm releases provide algorithm libraries with interfaces compatible with
|
||||
contemporary CUDA / NVIDIA HPC SDK alternatives.
|
||||
|
||||
* Thrust → rocThrust
|
||||
* CUB → hipCUB
|
||||
|
||||
| ROCm | Thrust / CUB | HPC SDK |
|
||||
|:------|:------------:|:-------:|
|
||||
| 5.0.2 | 1.14 | 21.9 |
|
||||
| 5.1.3 | 1.15 | 22.1 |
|
||||
| 5.2.x | 1.15 | 22.2, 22.3 |
|
||||
| 5.3.x | 1.16 | 22.7 |
|
||||
| 5.4.x | 1.16 | 22.9 |
|
||||
| 5.5.x | 1.17 | 22.9 |
|
||||
| 5.6.x | 1.17.2 | 22.9 |
|
||||
| 5.7.x | 1.17.2 | 22.9 |
|
||||
|
||||
For the latest documentation of these libraries, refer to [API libraries](../../reference/library-index.md).
|
||||
130
docs/about/compatibility/docker-image-support-matrix.rst
Normal file
@@ -0,0 +1,130 @@
|
||||
******************************************************************
|
||||
Docker image support matrix
|
||||
******************************************************************
|
||||
|
||||
AMD validates and publishes `PyTorch <https://hub.docker.com/r/rocm/pytorch>`_ and
|
||||
`TensorFlow <https://hub.docker.com/r/rocm/tensorflow>`_ containers on dockerhub. The following
|
||||
tags, and associated inventories, are validated with ROCm 5.7.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: PyTorch
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Ubuntu 22.04
|
||||
|
||||
Tag: `rocm/pytorch:rocm5.7_ubuntu22.04_py3.10_pytorch_2.0.1 <https://hub.docker.com/layers/rocm/pytorch/rocm5.7_ubuntu22.04_py3.10_pytorch_2.0.1/images/sha256-21df283b1712f3d73884b9bc4733919374344ceacb694e8fbc2c50bdd3e767ee>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/apt/5.7/>`_
|
||||
* `Python 3.10 <https://www.python.org/downloads/release/python-31013/>`_
|
||||
* `Torch 2.0.1 <https://github.com/ROCmSoftwarePlatform/pytorch/tree/release/2.0>`_
|
||||
* `Apex 0.1 <https://github.com/ROCmSoftwarePlatform/apex/tree/v0.1>`_
|
||||
* `Torchvision 0.15.0 <https://github.com/pytorch/vision/tree/release/0.15>`_
|
||||
* `Tensorboard 2.14.0 <https://github.com/tensorflow/tensorboard/tree/2.14>`_
|
||||
* `MAGMA <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
* `UCX 1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
* `OMPI 4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
* `OFED 5.4.3 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
.. tab-item:: Ubuntu 20.04
|
||||
|
||||
Tag: `rocm/pytorch:rocm5.7_ubuntu20.04_py3.9_pytorch_staging <https://hub.docker.com/layers/rocm/pytorch/rocm5.7_ubuntu20.04_py3.9_pytorch_2.0.1/images/sha256-4dd86046e5f777f53ae40a75ecfc76a5e819f01f3b2d40eacbb2db95c2f971d4)>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/apt/5.7/>`_
|
||||
* `Python 3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
* `Torch 2.1.0 <https://github.com/ROCmSoftwarePlatform/pytorch/tree/rocm5.7_internal_testing>`_
|
||||
* `Apex 0.1 <https://github.com/ROCmSoftwarePlatform/apex/tree/v0.1>`_
|
||||
* `Torchvision 0.16.0 <https://github.com/pytorch/vision/tree/release/0.16>`_
|
||||
* `Tensorboard 2.14.0 <https://github.com/tensorflow/tensorboard/tree/2.14>`_
|
||||
* `MAGMA <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
* `UCX 1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
* `OMPI 4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
* `OFED 5.4.3 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
|
||||
Tag: `Ubuntu rocm/pytorch:rocm5.7_ubuntu20.04_py3.9_pytorch_1.12.1 <https://hub.docker.com/layers/rocm/pytorch/rocm5.7_ubuntu20.04_py3.9_pytorch_1.12.1/images/sha256-e67db9373c045a7b6defd43cc3d067e7d49fd5d380f3f8582d2fb219c1756e1f>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/apt/5.7/>`_
|
||||
* `Python 3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
* `Torch 1.12.1 <https://github.com/ROCmSoftwarePlatform/pytorch/tree/release/1.12>`_
|
||||
* `Apex 0.1 <https://github.com/ROCmSoftwarePlatform/apex/tree/v0.1>`_
|
||||
* `Torchvision 0.13.1 <https://github.com/pytorch/vision/tree/v0.13.1>`_
|
||||
* `Tensorboard 2.14.0 <https://github.com/tensorflow/tensorboard/tree/2.14>`_
|
||||
* `MAGMA <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
* `UCX 1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
* `OMPI 4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
* `OFED 5.4.3 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Tag: `Ubuntu rocm/pytorch:rocm5.7_ubuntu20.04_py3.9_pytorch_1.13.1 <https://hub.docker.com/layers/rocm/pytorch/rocm5.7_ubuntu20.04_py3.9_pytorch_1.13.1/images/sha256-ed99d159026093d2aaf5c48c1e4b0911508773430377051372733f75c340a4c1>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/apt/5.7/>`_
|
||||
* `Python 3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
* `Torch 1.12.1 <https://github.com/ROCmSoftwarePlatform/pytorch/tree/release/1.13>`_
|
||||
* `Apex 0.1 <https://github.com/ROCmSoftwarePlatform/apex/tree/v0.1>`_
|
||||
* `Torchvision 0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
* `Tensorboard 2.12.0 <https://github.com/tensorflow/tensorboard/tree/2.12.0>`_
|
||||
* `MAGMA <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
* `UCX 1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
* `OMPI 4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
* `OFED 5.4.3 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Tag: `Ubuntu rocm/pytorch:rocm5.7_ubuntu20.04_py3.9_pytorch_2.0.1 <https://hub.docker.com/layers/rocm/pytorch/rocm5.7_ubuntu20.04_py3.9_pytorch_2.0.1/images/sha256-4dd86046e5f777f53ae40a75ecfc76a5e819f01f3b2d40eacbb2db95c2f971d4>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/apt/5.7/>`_
|
||||
* `Python 3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
* `Torch 2.0.1 <https://github.com/ROCmSoftwarePlatform/pytorch/tree/release/2.0>`_
|
||||
* `Apex 0.1 <https://github.com/ROCmSoftwarePlatform/apex/tree/v0.1>`_
|
||||
* `Torchvision 0.15.2 <https://github.com/pytorch/vision/tree/release/0.15>`_
|
||||
* `Tensorboard 2.14.0 <https://github.com/tensorflow/tensorboard/tree/2.14>`_
|
||||
* `MAGMA <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
* `UCX 1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
* `OMPI 4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
* `OFED 5.4.3 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
.. tab-item:: CentOS 7
|
||||
|
||||
Tag: `rocm/pytorch:rocm5.7_centos7_py3.9_pytorch_staging <https://hub.docker.com/layers/rocm/pytorch/rocm5.7_centos7_py3.9_pytorch_staging/images/sha256-92240cdf0b4aa7afa76fc78be995caa19ee9c54b5c9f1683bdcac28cedb58d2b>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/yum/5.7/>`_
|
||||
* `Python 3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
* `Torch 2.1.0 <https://github.com/ROCmSoftwarePlatform/pytorch/tree/rocm5.7_internal_testing>`_
|
||||
* `Apex 0.1 <https://github.com/ROCmSoftwarePlatform/apex/tree/v0.1>`_
|
||||
* `Torchvision 0.16.0 <https://github.com/pytorch/vision/tree/release/0.16>`_
|
||||
* `MAGMA <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
|
||||
.. tab-item:: TensorFlow
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Ubuntu 20.04
|
||||
|
||||
Tag: `rocm5.7-tf2.12-dev <https://hub.docker.com/layers/rocm/tensorflow/rocm5.7-tf2.12-dev/images/sha256-e0ac4d49122702e5167175acaeb98a79b9500f585d5e74df18facf6b52ce3e59>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/apt/5.7/>`_
|
||||
* `Python 3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
* `tensorflow-rocm 2.12.1 <https://pypi.org/project/tensorflow-rocm/2.12.1.570/>`_
|
||||
* `Tensorboard 2.12.3 <https://github.com/tensorflow/tensorboard/tree/2.12>`_
|
||||
|
||||
Tag: `rocm5.7-tf2.13-dev <https://hub.docker.com/layers/rocm/tensorflow/rocm5.7-tf2.13-dev/images/sha256-6f995539eebc062aac2b53db40e2b545192d8b032d0deada8c24c6651a7ac332>`_
|
||||
|
||||
* Inventory:
|
||||
|
||||
* `ROCm 5.7 <https://repo.radeon.com/rocm/apt/5.7/>`_
|
||||
* `Python 3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
* `tensorflow-rocm 2.13.0 <https://pypi.org/project/tensorflow-rocm/2.13.0.570/>`_
|
||||
* `Tensorboard 2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
116
docs/about/compatibility/linux-support.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# GPU and OS support (Linux)
|
||||
|
||||
(linux-support)=
|
||||
|
||||
## Supported Linux distributions
|
||||
|
||||
AMD ROCm™ Platform supports the following Linux distributions.
|
||||
|
||||
::::{tab-set}
|
||||
|
||||
:::{tab-item} Supported
|
||||
|
||||
| Distribution | Processor Architectures | Validated Kernel | Support |
|
||||
| :----------- | :---------------------: | :--------------: | ------: |
|
||||
| RHEL 9.2 | x86-64 | 5.14 (5.14.0-284.11.1.el9_2.x86_64) | ✅ |
|
||||
| RHEL 9.1 | x86-64 | 5.14.0-284.11.1.el9_2.x86_64 | ✅ |
|
||||
| RHEL 8.8 | x86-64 | 4.18.0-477.el8.x86_64 | ✅ |
|
||||
| RHEL 8.7 | x86-64 | 4.18.0-425.10.1.el8_7.x86_64 | ✅ |
|
||||
| SLES 15 SP5 | x86-64 | 5.14.21-150500.53-default | ✅ |
|
||||
| SLES 15 SP4 | x86-64 | 5.14.21-150400.24.63-default | ✅ |
|
||||
| Ubuntu 22.04.2 | x86-64 | 5.19.0-45-generic | ✅ |
|
||||
| Ubuntu 20.04.5 | x86-64 | 5.15.0-75-generic | ✅ |
|
||||
|
||||
:::{versionadded} 5.6
|
||||
|
||||
* RHEL 8.8 and 9.2 support is added.
|
||||
* SLES 15 SP5 support is added
|
||||
|
||||
:::
|
||||
|
||||
:::{tab-item} Unsupported
|
||||
|
||||
| Distribution | Processor Architectures | Validated Kernel | Support |
|
||||
| :----------- | :---------------------: | :--------------: | ------: |
|
||||
| RHEL 9.0 | x86-64 | 5.14 | ❌ |
|
||||
| RHEL 8.6 | x86-64 | 5.14 | ❌ |
|
||||
| SLES 15 SP3 | x86-64 | 5.3 | ❌ |
|
||||
| Ubuntu 22.04.0 | x86-64 | 5.15 LTS, 5.17 OEM | ❌ |
|
||||
| Ubuntu 20.04.4 | x86-64 | 5.13 HWE, 5.13 OEM | ❌ |
|
||||
| Ubuntu 22.04.1 | x86-64 | 5.15 LTS | ❌ |
|
||||
|
||||
:::
|
||||
|
||||
::::
|
||||
|
||||
✅: **Supported** - AMD performs full testing of all ROCm components on distro
|
||||
GA image.
|
||||
❌: **Unsupported** - AMD no longer performs builds and testing on these
|
||||
previously supported distro GA images.
|
||||
|
||||
## Virtualization support
|
||||
|
||||
ROCm supports virtualization for select GPUs only as shown below.
|
||||
|
||||
| Hypervisor | Version | GPU | Validated Guest OS (validated kernel) |
|
||||
|----------------|----------|-------|----------------------------------------------------------------------------------|
|
||||
| VMWare | ESXi 8 | MI250 | Ubuntu 20.04 (`5.15.0-56-generic`) |
|
||||
| VMWare | ESXi 8 | MI210 | Ubuntu 20.04 (`5.15.0-56-generic`), SLES 15 SP4 (`5.14.21-150400.24.18-default`) |
|
||||
| VMWare | ESXi 7 | MI210 | Ubuntu 20.04 (`5.15.0-56-generic`), SLES 15 SP4 (`5.14.21-150400.24.18-default`) |
|
||||
|
||||
## Linux-supported GPUs
|
||||
|
||||
The table below shows supported GPUs for Instinct™, Radeon Pro™ and Radeon™
|
||||
GPUs. Please click the tabs below to switch between GPU product lines. If a GPU
|
||||
is not listed on this table, the GPU is not officially supported by AMD.
|
||||
|
||||
:::::{tab-set}
|
||||
|
||||
::::{tab-item} AMD Instinct™
|
||||
:sync: instinct
|
||||
|
||||
| Product Name | Architecture | [LLVM Target](https://www.llvm.org/docs/AMDGPUUsage.html#processors) |Support |
|
||||
|:------------:|:------------:|:--------------------------------------------------------------------:|:-------:|
|
||||
| AMD Instinct™ MI250X | CDNA2 | gfx90a | ✅ |
|
||||
| AMD Instinct™ MI250 | CDNA2 | gfx90a | ✅ |
|
||||
| AMD Instinct™ MI210 | CDNA2 | gfx90a | ✅ |
|
||||
| AMD Instinct™ MI100 | CDNA | gfx908 | ✅ |
|
||||
| AMD Instinct™ MI50 | GCN5.1 | gfx906 | ✅ |
|
||||
| AMD Instinct™ MI25 | GCN5.0 | gfx900 | ❌ |
|
||||
|
||||
::::
|
||||
|
||||
::::{tab-item} Radeon Pro™
|
||||
:sync: radeonpro
|
||||
|
||||
| Name | Architecture |[LLVM Target](https://www.llvm.org/docs/AMDGPUUsage.html#processors) | Support|
|
||||
|:----:|:------------:|:--------------------------------------------------------------------:|:-------:|
|
||||
| AMD Radeon™ Pro W7900 | RDNA3 | gfx1100 | ✅ (Ubuntu 22.04 only)|
|
||||
| AMD Radeon™ Pro W6800 | RDNA2 | gfx1030 | ✅ |
|
||||
| AMD Radeon™ Pro V620 | RDNA2 | gfx1030 | ✅ |
|
||||
| AMD Radeon™ Pro VII | GCN5.1 | gfx906 | ✅ |
|
||||
::::
|
||||
|
||||
::::{tab-item} Radeon™
|
||||
:sync: radeonpro
|
||||
|
||||
| Name | Architecture |[LLVM Target](https://www.llvm.org/docs/AMDGPUUsage.html#processors) | Support|
|
||||
|:----:|:---------------:|:--------------------------------------------------------------------:|:-------:|
|
||||
| AMD Radeon™ RX 7900 XTX | RDNA3 | gfx1100 | ✅ (Ubuntu 22.04 only)|
|
||||
| AMD Radeon™ VII | GCN5.1 | gfx906 | ✅ |
|
||||
|
||||
::::
|
||||
:::::
|
||||
|
||||
### Support status
|
||||
|
||||
✅: **Supported** - AMD enables these GPUs in our software distributions for
|
||||
the corresponding ROCm product.
|
||||
⚠️: **Deprecated** - Support will be removed in a future release.
|
||||
❌: **Unsupported** - This configuration is not enabled in our software
|
||||
distributions.
|
||||
|
||||
## CPU support
|
||||
|
||||
ROCm requires CPUs that support PCIe™ atomics. Modern CPUs after the release of
|
||||
1st generation AMD Zen CPU and Intel™ Haswell support PCIe atomics.
|
||||
474
docs/about/compatibility/openmp.md
Normal file
@@ -0,0 +1,474 @@
|
||||
# OpenMP support in ROCm
|
||||
|
||||
## Introduction
|
||||
|
||||
The ROCm™ installation includes an LLVM-based implementation that fully supports
|
||||
the OpenMP 4.5 standard and a subset of OpenMP 5.0, 5.1, and 5.2 standards.
|
||||
Fortran, C/C++ compilers, and corresponding runtime libraries are included.
|
||||
Along with host APIs, the OpenMP compilers support offloading code and data onto
|
||||
GPU devices. This document briefly describes the installation location of the
|
||||
OpenMP toolchain, example usage of device offloading, and usage of `rocprof`
|
||||
with OpenMP applications. The GPUs supported are the same as those supported by
|
||||
this ROCm release. See the list of supported GPUs for [Linux](../../about/compatibility/linux-support.md) and [Windows](../../about/compatibility/windows-support.md).
|
||||
|
||||
The ROCm OpenMP compiler is implemented using LLVM compiler technology.
|
||||
The following image illustrates the internal steps taken to translate a user’s application into an executable that can offload computation to the AMDGPU. The compilation is a two-pass process. Pass 1 compiles the application to generate the CPU code and Pass 2 links the CPU code to the AMDGPU device code.
|
||||
|
||||

|
||||
|
||||
### Installation
|
||||
|
||||
The OpenMP toolchain is automatically installed as part of the standard ROCm
|
||||
installation and is available under `/opt/rocm-{version}/llvm`. The
|
||||
sub-directories are:
|
||||
|
||||
* bin: Compilers (`flang` and `clang`) and other binaries.
|
||||
* examples: The usage section below shows how to compile and run these programs.
|
||||
* include: Header files.
|
||||
* lib: Libraries including those required for target offload.
|
||||
* lib-debug: Debug versions of the above libraries.
|
||||
|
||||
## OpenMP: usage
|
||||
|
||||
The example programs can be compiled and run by pointing the environment
|
||||
variable `ROCM_PATH` to the ROCm install directory.
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
export ROCM_PATH=/opt/rocm-{version}
|
||||
cd $ROCM_PATH/share/openmp-extras/examples/openmp/veccopy
|
||||
sudo make run
|
||||
```
|
||||
|
||||
```{note}
|
||||
`sudo` is required since we are building inside the `/opt` directory.
|
||||
Alternatively, copy the files to your home directory first.
|
||||
```
|
||||
|
||||
The above invocation of Make compiles and runs the program. Note the options
|
||||
that are required for target offload from an OpenMP program:
|
||||
|
||||
```bash
|
||||
-fopenmp --offload-arch=<gpu-arch>
|
||||
```
|
||||
|
||||
```{note}
|
||||
The compiler also accepts the alternative offloading notation:
|
||||
|
||||
```bash
|
||||
-fopenmp -fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa -march=<gpu-arch>
|
||||
```
|
||||
|
||||
Obtain the value of `gpu-arch` by running the following command:
|
||||
|
||||
```bash
|
||||
% /opt/rocm-{version}/bin/rocminfo | grep gfx
|
||||
```
|
||||
|
||||
[//]: # (dated link below, needs updating)
|
||||
|
||||
See the complete list of compiler command-line references
|
||||
[here](https://github.com/RadeonOpenCompute/llvm-project/blob/amd-stg-open/clang/docs/CommandGuide/clang.rst).
|
||||
|
||||
### Using `rocprof` with OpenMP
|
||||
|
||||
The following steps describe a typical workflow for using `rocprof` with OpenMP
|
||||
code compiled with AOMP:
|
||||
|
||||
1. Run `rocprof` with the program command line:
|
||||
|
||||
```bash
|
||||
% rocprof <application> <args>
|
||||
```
|
||||
|
||||
This produces a `results.csv` file in the user’s current directory that
|
||||
shows basic stats such as kernel names, grid size, number of registers used,
|
||||
etc. The user can choose to specify the preferred output file name using the
|
||||
o option.
|
||||
|
||||
2. Add options for a detailed result:
|
||||
|
||||
```bash
|
||||
--stats: % rocprof --stats <application> <args>
|
||||
```
|
||||
|
||||
The stats option produces timestamps for the kernels. Look into the output
|
||||
CSV file for the field, `DurationNs`, which is useful in getting an
|
||||
understanding of the critical kernels in the code.
|
||||
|
||||
Apart from `--stats`, the option `--timestamp` on produces a timestamp for
|
||||
the kernels.
|
||||
|
||||
3. After learning about the required kernels, the user can take a detailed look
|
||||
at each one of them. `rocprof` has support for hardware counters: a set of
|
||||
basic and a set of derived ones. See the complete list of counters using
|
||||
options --list-basic and --list-derived. `rocprof` accepts either a text or
|
||||
an XML file as an input.
|
||||
|
||||
For more details on `rocprof`, refer to the {doc}`ROCProfilerV1 User Manual <rocprofiler:rocprofv1>`.
|
||||
|
||||
### Using tracing options
|
||||
|
||||
**Prerequisite:** When using the `--sys-trace` option, compile the OpenMP
|
||||
program with:
|
||||
|
||||
```bash
|
||||
-Wl,-rpath,/opt/rocm-{version}/lib -lamdhip64
|
||||
```
|
||||
|
||||
The following tracing options are widely used to generate useful information:
|
||||
|
||||
* **`--hsa-trace`**: This option is used to get a JSON output file with the HSA
|
||||
API execution traces and a flat profile in a CSV file.
|
||||
|
||||
* **`--sys-trace`**: This allows programmers to trace both HIP and HSA calls.
|
||||
Since this option results in loading ``libamdhip64.so``, follow the
|
||||
prerequisite as mentioned above.
|
||||
|
||||
A CSV and a JSON file are produced by the above trace options. The CSV file
|
||||
presents the data in a tabular format, and the JSON file can be visualized using
|
||||
Google Chrome at chrome://tracing/ or [Perfetto](https://perfetto.dev/).
|
||||
Navigate to Chrome or Perfetto and load the JSON file to see the timeline of the
|
||||
HSA calls.
|
||||
|
||||
For more details on tracing, refer to the {doc}`ROCProfilerV1 User Manual <rocprofiler:rocprofv1>`.
|
||||
|
||||
### Environment variables
|
||||
|
||||
:::{table}
|
||||
:widths: auto
|
||||
| Environment Variable | Purpose |
|
||||
| --------------------------- | ---------------------------- |
|
||||
| `OMP_NUM_TEAMS` | To set the number of teams for kernel launch, which is otherwise chosen by the implementation by default. You can set this number (subject to implementation limits) for performance tuning. |
|
||||
| `LIBOMPTARGET_KERNEL_TRACE` | To print useful statistics for device operations. Setting it to 1 and running the program emits the name of every kernel launched, the number of teams and threads used, and the corresponding register usage. Setting it to 2 additionally emits timing information for kernel launches and data transfer operations between the host and the device. |
|
||||
| `LIBOMPTARGET_INFO` | To print informational messages from the device runtime as the program executes. Setting it to a value of 1 or higher, prints fine-grain information and setting it to -1 prints complete information. |
|
||||
| `LIBOMPTARGET_DEBUG` | To get detailed debugging information about data transfer operations and kernel launch when using a debug version of the device library. Set this environment variable to 1 to get the detailed information from the library. |
|
||||
| `GPU_MAX_HW_QUEUES` | To set the number of HSA queues in the OpenMP runtime. The HSA queues are created on demand up to the maximum value as supplied here. The queue creation starts with a single initialized queue to avoid unnecessary allocation of resources. The provided value is capped if it exceeds the recommended, device-specific value. |
|
||||
| `LIBOMPTARGET_AMDGPU_MAX_ASYNC_COPY_BYTES` | To set the threshold size up to which data transfers are initiated asynchronously. The default threshold size is 1*1024*1024 bytes (1MB). |
|
||||
| `OMPX_FORCE_SYNC_REGIONS` | To force the runtime to execute all operations synchronously, i.e., wait for an operation to complete immediately. This affects data transfers and kernel execution. While it is mainly designed for debugging, it may have a minor positive effect on performance in certain situations. |
|
||||
:::
|
||||
|
||||
## OpenMP: features
|
||||
|
||||
The OpenMP programming model is greatly enhanced with the following new features
|
||||
implemented in the past releases.
|
||||
|
||||
(openmp_usm)=
|
||||
|
||||
### Asynchronous behavior in OpenMP target regions
|
||||
|
||||
* Controlling Asynchronous Behavior
|
||||
|
||||
The OpenMP offloading runtime executes in an asynchronous fashion by default, allowing multiple data transfers to start concurrently. However, if the data to be transferred becomes larger than the default threshold of 1MB, the runtime falls back to a synchronous data transfer. The buffers that have been locked already are always executed asynchronously.
|
||||
You can overrule this default behavior by setting `LIBOMPTARGET_AMDGPU_MAX_ASYNC_COPY_BYTES` and `OMPX_FORCE_SYNC_REGIONS`. See the [Environment Variables](#environment-variables) table for details.
|
||||
|
||||
* Multithreaded Offloading on the Same Device
|
||||
|
||||
The `libomptarget` plugin for GPU offloading allows creation of separate configurable HSA queues per chiplet, which enables two or more threads to concurrently offload to the same device.
|
||||
|
||||
* Parallel Memory Copy Invocations
|
||||
|
||||
Implicit asynchronous execution of single target region enables parallel memory copy invocations.
|
||||
|
||||
### Unified shared memory
|
||||
|
||||
Unified Shared Memory (USM) provides a pointer-based approach to memory
|
||||
management. To implement USM, fulfill the following system requirements along
|
||||
with Xnack capability.
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
* Linux Kernel versions above 5.14
|
||||
* Latest KFD driver packaged in ROCm stack
|
||||
* Xnack, as USM support can only be tested with applications compiled with Xnack
|
||||
capability
|
||||
|
||||
#### Xnack capability
|
||||
|
||||
When enabled, Xnack capability allows GPU threads to access CPU (system) memory,
|
||||
allocated with OS-allocators, such as `malloc`, `new`, and `mmap`. Xnack must be
|
||||
enabled both at compile- and run-time. To enable Xnack support at compile-time,
|
||||
use:
|
||||
|
||||
```bash
|
||||
--offload-arch=gfx908:xnack+
|
||||
```
|
||||
|
||||
Or use another functionally equivalent option Xnack-any:
|
||||
|
||||
```bash
|
||||
--offload-arch=gfx908
|
||||
```
|
||||
|
||||
To enable Xnack functionality at runtime on a per-application basis,
|
||||
use environment variable:
|
||||
|
||||
```bash
|
||||
HSA_XNACK=1
|
||||
```
|
||||
|
||||
When Xnack support is not needed:
|
||||
|
||||
* Build the applications to maximize resource utilization using:
|
||||
|
||||
```bash
|
||||
--offload-arch=gfx908:xnack-
|
||||
```
|
||||
|
||||
* At runtime, set the `HSA_XNACK` environment variable to 0.
|
||||
|
||||
#### Unified shared memory pragma
|
||||
|
||||
This OpenMP pragma is available on MI200 through `xnack+` support.
|
||||
|
||||
```bash
|
||||
omp requires unified_shared_memory
|
||||
```
|
||||
|
||||
As stated in the OpenMP specifications, this pragma makes the map clause on
|
||||
target constructs optional. By default, on MI200, all memory allocated on the
|
||||
host is fine grain. Using the map clause on a target clause is allowed, which
|
||||
transforms the access semantics of the associated memory to coarse grain.
|
||||
|
||||
```bash
|
||||
A simple program demonstrating the use of this feature is:
|
||||
$ cat parallel_for.cpp
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#define N 64
|
||||
#pragma omp requires unified_shared_memory
|
||||
int main() {
|
||||
int n = N;
|
||||
int *a = new int[n];
|
||||
int *b = new int[n];
|
||||
|
||||
for(int i = 0; i < n; i++)
|
||||
b[i] = i;
|
||||
|
||||
#pragma omp target parallel for map(to:b[:n])
|
||||
for(int i = 0; i < n; i++)
|
||||
a[i] = b[i];
|
||||
|
||||
for(int i = 0; i < n; i++)
|
||||
if(a[i] != i)
|
||||
printf("error at %d: expected %d, got %d\n", i, i+1, a[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
$ clang++ -O2 -target x86_64-pc-linux-gnu -fopenmp --offload-arch=gfx90a:xnack+ parallel_for.cpp
|
||||
$ HSA_XNACK=1 ./a.out
|
||||
```
|
||||
|
||||
In the above code example, pointer “a” is not mapped in the target region, while
|
||||
pointer “b” is. Both are valid pointers on the GPU device and passed by-value to
|
||||
the kernel implementing the target region. This means the pointer values on the
|
||||
host and the device are the same.
|
||||
|
||||
The difference between the memory pages pointed to by these two variables is
|
||||
that the pages pointed by “a” are in fine-grain memory, while the pages pointed
|
||||
to by “b” are in coarse-grain memory during and after the execution of the
|
||||
target region. This is accomplished in the OpenMP runtime library with calls to
|
||||
the ROCr runtime to set the pages pointed by “b” as coarse grain.
|
||||
|
||||
### OMPT target support
|
||||
|
||||
The OpenMP runtime in ROCm implements a subset of the OMPT device APIs, as
|
||||
described in the OpenMP specification document. These APIs allow first-party
|
||||
tools to examine the profile and kernel traces that execute on a device. A tool
|
||||
can register callbacks for data transfer and kernel dispatch entry points or use
|
||||
APIs to start and stop tracing for device-related activities such as data
|
||||
transfer and kernel dispatch timings and associated metadata. If device tracing
|
||||
is enabled, trace records for device activities are collected during program
|
||||
execution and returned to the tool using the APIs described in the
|
||||
specification.
|
||||
|
||||
The following example demonstrates how a tool uses the supported OMPT target
|
||||
APIs. The `README` in `/opt/rocm/llvm/examples/tools/ompt` outlines the steps to
|
||||
be followed, and the provided example can be run as shown below:
|
||||
|
||||
```bash
|
||||
cd $ROCM_PATH/share/openmp-extras/examples/tools/ompt/veccopy-ompt-target-tracing
|
||||
sudo make run
|
||||
```
|
||||
|
||||
The file `veccopy-ompt-target-tracing.c` simulates how a tool initiates device
|
||||
activity tracing. The file `callbacks.h` shows the callbacks registered and
|
||||
implemented by the tool.
|
||||
|
||||
### Floating point atomic operations
|
||||
|
||||
The MI200-series GPUs support the generation of hardware floating-point atomics
|
||||
using the OpenMP atomic pragma. The support includes single- and
|
||||
double-precision floating-point atomic operations. The programmer must ensure
|
||||
that the memory subjected to the atomic operation is in coarse-grain memory by
|
||||
mapping it explicitly with the help of map clauses when not implicitly mapped by
|
||||
the compiler as per the [OpenMP
|
||||
specifications](https://www.openmp.org/specifications/). This makes these
|
||||
hardware floating-point atomic instructions “fast,” as they are faster than
|
||||
using a default compare-and-swap loop scheme, but at the same time “unsafe,” as
|
||||
they are not supported on fine-grain memory. The operation in
|
||||
`unified_shared_memory` mode also requires programmers to map the memory
|
||||
explicitly when not implicitly mapped by the compiler.
|
||||
|
||||
To request fast floating-point atomic instructions at the file level, use
|
||||
compiler flag `-munsafe-fp-atomics` or a hint clause on a specific pragma:
|
||||
|
||||
```bash
|
||||
double a = 0.0;
|
||||
#pragma omp atomic hint(AMD_fast_fp_atomics)
|
||||
a = a + 1.0;
|
||||
```
|
||||
|
||||
```{note}
|
||||
`AMD_unsafe_fp_atomics` is an alias for `AMD_fast_fp_atomics`, and
|
||||
`AMD_safe_fp_atomics` is implemented with a compare-and-swap loop.
|
||||
```
|
||||
|
||||
To disable the generation of fast floating-point atomic instructions at the file
|
||||
level, build using the option `-msafe-fp-atomics` or use a hint clause on a
|
||||
specific pragma:
|
||||
|
||||
```bash
|
||||
double a = 0.0;
|
||||
#pragma omp atomic hint(AMD_safe_fp_atomics)
|
||||
a = a + 1.0;
|
||||
```
|
||||
|
||||
The hint clause value always has a precedence over the compiler flag, which
|
||||
allows programmers to create atomic constructs with a different behavior than
|
||||
the rest of the file.
|
||||
|
||||
See the example below, where the user builds the program using
|
||||
`-msafe-fp-atomics` to select a file-wide “safe atomic” compilation. However,
|
||||
the fast atomics hint clause over variable “a” takes precedence and operates on
|
||||
“a” using a fast/unsafe floating-point atomic, while the variable “b” in the
|
||||
absence of a hint clause is operated upon using safe floating-point atomics as
|
||||
per the compiler flag.
|
||||
|
||||
```bash
|
||||
double a = 0.0;.
|
||||
#pragma omp atomic hint(AMD_fast_fp_atomics)
|
||||
a = a + 1.0;
|
||||
|
||||
double b = 0.0;
|
||||
#pragma omp atomic
|
||||
b = b + 1.0;
|
||||
```
|
||||
|
||||
### AddressSanitizer tool
|
||||
|
||||
AddressSanitizer (ASan) is a memory error detector tool utilized by applications to
|
||||
detect various errors ranging from spatial issues such as out-of-bound access to
|
||||
temporal issues such as use-after-free. The AOMP compiler supports ASan for AMD
|
||||
GPUs with applications written in both HIP and OpenMP.
|
||||
|
||||
**Features supported on host platform (Target x86_64):**
|
||||
|
||||
* Use-after-free
|
||||
* Buffer overflows
|
||||
* Heap buffer overflow
|
||||
* Stack buffer overflow
|
||||
* Global buffer overflow
|
||||
* Use-after-return
|
||||
* Use-after-scope
|
||||
* Initialization order bugs
|
||||
|
||||
**Features supported on AMDGPU platform (`amdgcn-amd-amdhsa`):**
|
||||
|
||||
* Heap buffer overflow
|
||||
* Global buffer overflow
|
||||
|
||||
**Software (kernel/OS) requirements:** Unified Shared Memory support with Xnack
|
||||
capability. See the section on [Unified Shared Memory](#unified-shared-memory)
|
||||
for prerequisites and details on Xnack.
|
||||
|
||||
**Example:**
|
||||
|
||||
* Heap buffer overflow
|
||||
|
||||
```bash
|
||||
void main() {
|
||||
....... // Some program statements
|
||||
....... // Some program statements
|
||||
#pragma omp target map(to : A[0:N], B[0:N]) map(from: C[0:N])
|
||||
{
|
||||
#pragma omp parallel for
|
||||
for(int i =0 ; i < N; i++){
|
||||
C[i+10] = A[i] + B[i];
|
||||
} // end of for loop
|
||||
}
|
||||
....... // Some program statements
|
||||
}// end of main
|
||||
```
|
||||
|
||||
See the complete sample code for heap buffer overflow
|
||||
[here](https://github.com/ROCm-Developer-Tools/aomp/blob/aomp-dev/examples/tools/asan/heap_buffer_overflow/openmp/vecadd-HBO.cpp).
|
||||
|
||||
* Global buffer overflow
|
||||
|
||||
```bash
|
||||
#pragma omp declare target
|
||||
int A[N],B[N],C[N];
|
||||
#pragma omp end declare target
|
||||
void main(){
|
||||
...... // some program statements
|
||||
...... // some program statements
|
||||
#pragma omp target data map(to:A[0:N],B[0:N]) map(from: C[0:N])
|
||||
{
|
||||
#pragma omp target update to(A,B)
|
||||
#pragma omp target parallel for
|
||||
for(int i=0; i<N; i++){
|
||||
C[i]=A[i*100]+B[i+22];
|
||||
} // end of for loop
|
||||
#pragma omp target update from(C)
|
||||
}
|
||||
........ // some program statements
|
||||
} // end of main
|
||||
```
|
||||
|
||||
See the complete sample code for global buffer overflow
|
||||
[here](https://github.com/ROCm-Developer-Tools/aomp/blob/aomp-dev/examples/tools/asan/global_buffer_overflow/openmp/vecadd-GBO.cpp).
|
||||
|
||||
### Clang compiler option for kernel optimization
|
||||
|
||||
You can use the clang compiler option `-fopenmp-target-fast` for kernel optimization if certain constraints implied by its component options are satisfied. `-fopenmp-target-fast` enables the following options:
|
||||
|
||||
* `-fopenmp-target-ignore-env-vars`: It enables code generation of specialized kernels including no-loop and Cross-team reductions.
|
||||
|
||||
* `-fopenmp-assume-no-thread-state`: It enables the compiler to assume that no thread in a parallel region modifies an Internal Control Variable (`ICV`), thus potentially reducing the device runtime code execution.
|
||||
|
||||
* `-fopenmp-assume-no-nested-parallelism`: It enables the compiler to assume that no thread in a parallel region encounters a parallel region, thus potentially reducing the device runtime code execution.
|
||||
|
||||
* `-O3` if no `-O*` is specified by the user.
|
||||
|
||||
### Specialized kernels
|
||||
|
||||
Clang will attempt to generate specialized kernels based on compiler options and OpenMP constructs. The following specialized kernels are supported:
|
||||
|
||||
* No-loop
|
||||
* Big-jump-loop
|
||||
* Cross-team reductions
|
||||
|
||||
To enable the generation of specialized kernels, follow these guidelines:
|
||||
|
||||
* Do not specify teams, threads, and schedule-related environment variables. The `num_teams` clause in an OpenMP target construct acts as an override and prevents the generation of the no-loop kernel. If the specification of `num_teams` clause is a user requirement then clang tries to generate the big-jump-loop kernel instead of the no-loop kernel.
|
||||
|
||||
* Assert the absence of the teams, threads, and schedule-related environment variables by adding the command-line option `-fopenmp-target-ignore-env-vars`.
|
||||
|
||||
* To automatically enable the specialized kernel generation, use `-Ofast` or `-fopenmp-target-fast` for compilation.
|
||||
|
||||
* To disable specialized kernel generation, use `-fno-openmp-target-ignore-env-vars`.
|
||||
|
||||
#### No-loop kernel generation
|
||||
|
||||
The no-loop kernel generation feature optimizes the compiler performance by generating a specialized kernel for certain OpenMP target constructs such as target teams distribute parallel for. The specialized kernel generation feature assumes every thread executes a single iteration of the user loop, which leads the runtime to launch a total number of GPU threads equal to or greater than the iteration space size of the target region loop. This allows the compiler to generate code for the loop body without an enclosing loop, resulting in reduced control-flow complexity and potentially better performance.
|
||||
|
||||
#### Big-jump-loop kernel generation
|
||||
|
||||
A no-loop kernel is not generated if the OpenMP teams construct uses a `num_teams` clause. Instead, the compiler attempts to generate a different specialized kernel called the big-jump-loop kernel. The compiler launches the kernel with a grid size determined by the number of teams specified by the OpenMP `num_teams` clause and the `blocksize` chosen either by the compiler or specified by the corresponding OpenMP clause.
|
||||
|
||||
#### Cross-team optimized reduction kernel generation
|
||||
|
||||
If the OpenMP construct has a reduction clause, the compiler attempts to generate optimized code by utilizing efficient cross-team communication. New APIs for cross-team reduction are implemented in the device runtime and are automatically generated by clang.
|
||||
24
docs/about/compatibility/user-kernel-space-compat-matrix.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# User/kernel-space support matrix
|
||||
|
||||
ROCm™ provides forward and backward compatibility between the Kernel Fusion
|
||||
Driver (KFD) and its user space software for +/- 2 releases. This table shows
|
||||
the compatibility combinations that are currently supported.
|
||||
|
||||
| KFD | Tested user space versions |
|
||||
|:------|:--------------------------:|
|
||||
| 5.0.2 | 5.1.0, 5.2.0 |
|
||||
| 5.1.0 | 5.0.2 |
|
||||
| 5.1.3 | 5.2.0, 5.3.0 |
|
||||
| 5.2.0 | 5.0.2, 5.1.3 |
|
||||
| 5.2.3 | 5.3.0, 5.4.0 |
|
||||
| 5.3.0 | 5.1.3, 5.2.3 |
|
||||
| 5.3.3 | 5.4.0, 5.5.0 |
|
||||
| 5.4.0 | 5.2.3, 5.3.3 |
|
||||
| 5.4.3 | 5.5.0, 5.6.0 |
|
||||
| 5.4.4 | 5.5.0 |
|
||||
| 5.5.0 | 5.3.3, 5.4.3 |
|
||||
| 5.5.1 | 5.6.0, 5.7.0 |
|
||||
| 5.6.0 | 5.4.3, 5.5.1 |
|
||||
| 5.6.1 | 5.7.0 |
|
||||
| 5.7.0 | 5.5.0, 5.6.1 |
|
||||
| 5.7.1 | 5.5.0, 5.6.1 |
|
||||
80
docs/about/compatibility/windows-support.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# GPU and OS support (Windows)
|
||||
|
||||
(windows-support)=
|
||||
|
||||
## Supported SKUs
|
||||
|
||||
AMD HIP SDK supports the following Windows variants.
|
||||
|
||||
| Distribution |Processor Architectures| Validated update |
|
||||
|---------------------|-----------------------|--------------------|
|
||||
| Windows 10 | x86-64 | 22H2 (GA) |
|
||||
| Windows 11 | x86-64 | 22H2 (GA) |
|
||||
| Windows Server 2022 | x86-64 | |
|
||||
|
||||
## Windows-supported GPUs
|
||||
|
||||
The table below shows supported GPUs for Radeon Pro™ and Radeon™ GPUs. Please
|
||||
click the tabs below to switch between GPU product lines. If a GPU is not listed
|
||||
on this table, the GPU is not officially supported by AMD.
|
||||
|
||||
::::{tab-set}
|
||||
|
||||
:::{tab-item} Radeon Pro™
|
||||
:sync: radeonpro
|
||||
|
||||
| Name | Architecture |[LLVM Target](https://www.llvm.org/docs/AMDGPUUsage.html#processors) | Runtime | HIP SDK |
|
||||
|:----:|:------------:|:--------------------------------------------------------------------:|:-------:|:----------------:|
|
||||
| AMD Radeon Pro™ W7900 | RDNA3 | gfx1100 | ✅ | ✅ |
|
||||
| AMD Radeon Pro™ W7800 | RDNA3 | gfx1100 | ✅ | ✅ |
|
||||
| AMD Radeon Pro™ W6800 | RDNA2 | gfx1030 | ✅ | ✅ |
|
||||
| AMD Radeon Pro™ W6600 | RDNA2 | gfx1032 | ✅ | ❌ |
|
||||
| AMD Radeon Pro™ W5500 | RDNA1 | gfx1012 | ❌ | ❌ |
|
||||
| AMD Radeon Pro™ VII | GCN5.1 | gfx906 | ❌ | ❌ |
|
||||
|
||||
:::
|
||||
|
||||
:::{tab-item} Radeon™
|
||||
:sync: radeon
|
||||
|
||||
| Name | Architecture | [LLVM Target](https://www.llvm.org/docs/AMDGPUUsage.html#processors) | Runtime | HIP SDK |
|
||||
|:----:|:------------:|:--------------------------------------------------------------------:|:-------:|:----------------:|
|
||||
| AMD Radeon™ RX 7900 XTX | RDNA3 | gfx1100 | ✅ | ✅ |
|
||||
| AMD Radeon™ RX 7900 XT | RDNA3 | gfx1100 | ✅ | ✅ |
|
||||
| AMD Radeon™ RX 7600 | RDNA3 | gfx1102 | ✅ | ✅ |
|
||||
| AMD Radeon™ RX 6950 XT | RDNA2 | gfx1030 | ✅ | ✅ |
|
||||
| AMD Radeon™ RX 6900 XT | RDNA2 | gfx1030 | ✅ | ✅ |
|
||||
| AMD Radeon™ RX 6800 XT | RDNA2 | gfx1030 | ✅ | ✅ |
|
||||
| AMD Radeon™ RX 6800 | RDNA2 | gfx1030 | ✅ | ✅ |
|
||||
| AMD Radeon™ RX 6750 XT | RDNA2 | gfx1031 | ✅ | ❌ |
|
||||
| AMD Radeon™ RX 6700 XT | RDNA2 | gfx1031 | ✅ | ❌ |
|
||||
| AMD Radeon™ RX 6700 | RDNA2 | gfx1031 | ✅ | ❌ |
|
||||
| AMD Radeon™ RX 6650 XT | RDNA2 | gfx1032 | ✅ | ❌ |
|
||||
| AMD Radeon™ RX 6600 XT | RDNA2 | gfx1032 | ✅ | ❌ |
|
||||
| AMD Radeon™ RX 6600 | RDNA2 | gfx1032 | ✅ | ❌ |
|
||||
|
||||
:::
|
||||
|
||||
::::
|
||||
|
||||
### Component support
|
||||
|
||||
ROCm components are described in [What is ROCm?](../../what-is-rocm.md) Support
|
||||
on Windows is provided with two levels on enablement.
|
||||
|
||||
* **Runtime**: Runtime enables the use of the HIP and OpenCL runtimes only.
|
||||
* **HIP SDK**: Runtime plus additional components are listed in [Libraries](../../reference/library-index.md).
|
||||
Note that some math libraries are Linux exclusive.
|
||||
|
||||
### Support status
|
||||
|
||||
✅: **Supported** - AMD enables these GPUs in our software distributions for
|
||||
the corresponding ROCm product.
|
||||
⚠️: **Deprecated** - Support will be removed in a future release.
|
||||
❌: **Unsupported** - This configuration is not enabled in our software
|
||||
distributions.
|
||||
|
||||
## CPU support
|
||||
|
||||
ROCm requires CPUs that support PCIe™ atomics. Modern CPUs after the release of
|
||||
1st generation AMD Zen CPU and Intel™ Haswell support PCIe atomics.
|
||||
@@ -1,147 +1,9 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="ROCm licensing terms">
|
||||
<meta name="keywords" content="license, licensing terms">
|
||||
</head>
|
||||
# License
|
||||
|
||||
# ROCm license
|
||||
> Note: This license applies to the [ROCm repository](https://github.com/RadeonOpenCompute/ROCm) that primarily contains documentation. For other licensing information, refer to the [Licensing Terms page](./licensing).
|
||||
|
||||
```{include} ../../LICENSE
|
||||
```
|
||||
|
||||
:::{note}
|
||||
The preceding license applies to the [ROCm repository](https://github.com/ROCm/ROCm), which
|
||||
primarily contains documentation. For licenses related to other ROCm components, refer to the
|
||||
following section.
|
||||
:::
|
||||
|
||||
## ROCm component licenses
|
||||
|
||||
ROCm is released by Advanced Micro Devices, Inc. (AMD) and is licensed per component separately.
|
||||
The following table is a list of ROCm components with links to their respective license
|
||||
terms. These components may include third party components subject to
|
||||
additional licenses. Please review individual repositories for more information.
|
||||
|
||||
<!-- spellcheck-disable -->
|
||||
| Component | License |
|
||||
|:---------------------|:-------------------------|
|
||||
| [AMD Compute Language Runtime (CLR)](https://github.com/ROCm/rocm-systems/tree/develop/projects/clr) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/clr/LICENSE.md) |
|
||||
| [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/amd-staging/LICENSE) |
|
||||
| [aomp](https://github.com/ROCm/aomp/) | [Apache 2.0](https://github.com/ROCm/aomp/blob/aomp-dev/LICENSE) |
|
||||
| [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) |
|
||||
| [AQLprofile](https://github.com/ROCm/rocm-systems/tree/develop/projects/aqlprofile/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/aqlprofile/LICENSE.md) |
|
||||
| [Code Object Manager (Comgr)](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/comgr) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/comgr/LICENSE.txt) |
|
||||
| [Composable Kernel](https://github.com/ROCm/composable_kernel) | [MIT](https://github.com/ROCm/composable_kernel/blob/develop/LICENSE) |
|
||||
| [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/rocm/LICENSE.txt) |
|
||||
| [HIP](https://github.com/ROCm/rocm-systems/tree/develop/projects/hip/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/hip/LICENSE.md) |
|
||||
| [hipamd](https://github.com/ROCm/rocm-systems/tree/develop/projects/clr/hipamd/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/clr/hipamd/LICENSE.md) |
|
||||
| [hipBLAS](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hipblas/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hipblas/LICENSE.md) |
|
||||
| [hipBLASLt](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hipblaslt/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hipblaslt/LICENSE.md) |
|
||||
| [HIPCC](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/hipcc) | [MIT](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/hipcc/LICENSE.txt) |
|
||||
| [hipCUB](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hipcub/) | [Custom](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hipcub/LICENSE.txt) |
|
||||
| [hipFFT](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hipfft/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hipfft/LICENSE.md) |
|
||||
| [hipfort](https://github.com/ROCm/hipfort/) | [MIT](https://github.com/ROCm/hipfort/blob/develop/LICENSE) |
|
||||
| [HIPIFY](https://github.com/ROCm/HIPIFY/) | [MIT](https://github.com/ROCm/HIPIFY/blob/amd-staging/LICENSE.txt) |
|
||||
| [hipRAND](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hiprand/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hiprand/LICENSE.md) |
|
||||
| [hipSOLVER](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hipsolver/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hipsolver/LICENSE.md) |
|
||||
| [hipSPARSE](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hipsparse/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hipsparse/LICENSE.md) |
|
||||
| [hipSPARSELt](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hipsparselt/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hipsparselt/LICENSE.md) |
|
||||
| [hipTensor](https://github.com/ROCm/rocm-libraries/tree/develop/projects/hiptensor/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/hiptensor/LICENSE) |
|
||||
| [llvm-project](https://github.com/ROCm/llvm-project/) | [Apache](https://github.com/ROCm/llvm-project/blob/amd-staging/LICENSE.TXT) |
|
||||
| [llvm-project/flang](https://github.com/ROCm/llvm-project/tree/amd-staging/flang) | [Apache 2.0](https://github.com/ROCm/llvm-project/blob/amd-staging/flang/LICENSE.TXT) |
|
||||
| [MIGraphX](https://github.com/ROCm/AMDMIGraphX/) | [MIT](https://github.com/ROCm/AMDMIGraphX/blob/develop/LICENSE) |
|
||||
| [MIOpen](https://github.com/ROCm/rocm-libraries/tree/develop/projects/miopen/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/miopen/LICENSE.md) |
|
||||
| [MIVisionX](https://github.com/ROCm/MIVisionX/) | [MIT](https://github.com/ROCm/MIVisionX/blob/develop/LICENSE.txt) |
|
||||
| [rocAL](https://github.com/ROCm/rocAL) | [MIT](https://github.com/ROCm/rocAL/blob/develop/LICENSE.txt) |
|
||||
| [rocALUTION](https://github.com/ROCm/rocALUTION/) | [MIT](https://github.com/ROCm/rocALUTION/blob/develop/LICENSE.md) |
|
||||
| [rocBLAS](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocblas/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocblas/LICENSE.md) |
|
||||
| [ROCdbgapi](https://github.com/ROCm/ROCdbgapi/) | [MIT](https://github.com/ROCm/ROCdbgapi/blob/amd-staging/LICENSE.txt) |
|
||||
| [rocDecode](https://github.com/ROCm/rocDecode) | [MIT](https://github.com/ROCm/rocDecode/blob/develop/LICENSE) |
|
||||
| [rocFFT](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocfft/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocfft/LICENSE.md) |
|
||||
| [ROCgdb](https://github.com/ROCm/ROCgdb/) | [GNU General Public License v3.0](https://github.com/ROCm/ROCgdb/blob/amd-staging/COPYING3) |
|
||||
| [rocJPEG](https://github.com/ROCm/rocJPEG/) | [MIT](https://github.com/ROCm/rocJPEG/blob/develop/LICENSE) |
|
||||
| [ROCK-Kernel-Driver](https://github.com/ROCm/ROCK-Kernel-Driver/) | [GPL 2.0 WITH Linux-syscall-note](https://github.com/ROCm/ROCK-Kernel-Driver/blob/master/COPYING) |
|
||||
| [rocminfo](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocminfo/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocminfo/License.txt) |
|
||||
| [ROCm Bandwidth Test](https://github.com/ROCm/rocm_bandwidth_test/) | [MIT](https://github.com/ROCm/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [ROCm CMake](https://github.com/ROCm/rocm-cmake/) | [MIT](https://github.com/ROCm/rocm-cmake/blob/develop/LICENSE) |
|
||||
| [ROCm Communication Collectives Library (RCCL)](https://github.com/ROCm/rccl/) | [Custom](https://github.com/ROCm/rccl/blob/develop/LICENSE.txt) |
|
||||
| [ROCm-Core](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocm-core/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocm-core/LICENSE.md) |
|
||||
| [ROCm Compute Profiler](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocprofiler-compute/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocprofiler-compute/LICENSE.md) |
|
||||
| [ROCm Data Center (RDC)](https://github.com/ROCm/rocm-systems/tree/develop/projects/rdc/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/rdc/LICENSE.md) |
|
||||
| [ROCm-Device-Libs](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/device-libs) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/device-libs/LICENSE.TXT) |
|
||||
| [ROCm-OpenCL-Runtime](https://github.com/ROCm/rocm-systems/tree/develop/projects/clr/opencl/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/clr/opencl/LICENSE.md) |
|
||||
| [ROCm Performance Primitives (RPP)](https://github.com/ROCm/rpp) | [MIT](https://github.com/ROCm/rpp/blob/develop/LICENSE) |
|
||||
| [ROCm SMI Lib](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocm-smi-lib/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocm-smi-lib/LICENSE.md) |
|
||||
| [ROCm Systems Profiler](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocprofiler-systems/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocprofiler-systems/LICENSE.md) |
|
||||
| [ROCm Validation Suite](https://github.com/ROCm/ROCmValidationSuite/) | [MIT](https://github.com/ROCm/ROCmValidationSuite/blob/master/LICENSE) |
|
||||
| [rocPRIM](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocprim/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocprim/LICENSE.md) |
|
||||
| [ROCProfiler](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocprofiler/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocprofiler/LICENSE.md) |
|
||||
| [ROCprofiler-SDK](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocprofiler-sdk/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocprofiler-sdk/LICENSE.md) |
|
||||
| [rocPyDecode](https://github.com/ROCm/rocPyDecode) | [MIT](https://github.com/ROCm/rocPyDecode/blob/develop/LICENSE.txt) |
|
||||
| [rocRAND](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocrand/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocrand/LICENSE.md) |
|
||||
| [ROCr Debug Agent](https://github.com/ROCm/rocr_debug_agent/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocr_debug_agent/blob/amd-staging/LICENSE.txt) |
|
||||
| [ROCR-Runtime](https://github.com/ROCm/rocm-systems/tree/develop/projects/rocr-runtime/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocm-systems/blob/develop/projects/rocr-runtime/LICENSE.txt) |
|
||||
| [rocSHMEM](https://github.com/ROCm/rocSHMEM/) | [MIT](https://github.com/ROCm/rocSHMEM/blob/develop/LICENSE.md) |
|
||||
| [rocSOLVER](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocsolver/) | [BSD-2-Clause](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocsolver/LICENSE.md) |
|
||||
| [rocSPARSE](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocsparse/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocsparse/LICENSE.md) |
|
||||
| [rocThrust](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocthrust/) | [Apache 2.0](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocthrust/LICENSE) |
|
||||
| [ROCTracer](https://github.com/ROCm/rocm-systems/tree/develop/projects/roctracer/) | [MIT](https://github.com/ROCm/rocm-systems/blob/develop/projects/roctracer/LICENSE.md) |
|
||||
| [rocWMMA](https://github.com/ROCm/rocm-libraries/tree/develop/projects/rocwmma/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/projects/rocwmma/LICENSE.md) |
|
||||
| [Tensile](https://github.com/ROCm/rocm-libraries/tree/develop/shared/tensile/) | [MIT](https://github.com/ROCm/rocm-libraries/blob/develop/shared/tensile/LICENSE.md) |
|
||||
| [TransferBench](https://github.com/ROCm/TransferBench) | [MIT](https://github.com/ROCm/TransferBench/blob/develop/LICENSE.md) |
|
||||
|
||||
Open sourced ROCm components are released via public GitHub
|
||||
repositories, packages on [https://repo.radeon.com](https://repo.radeon.com) and other distribution channels.
|
||||
Proprietary products are only available on [https://repo.radeon.com](https://repo.radeon.com).
|
||||
Proprietary components are organized in a proprietary subdirectory in the package
|
||||
repositories to distinguish from open sourced packages.
|
||||
|
||||
```{note}
|
||||
The following additional terms and conditions apply to your use of ROCm technical documentation.
|
||||
```{include} ./licensing.md
|
||||
```
|
||||
|
||||
©2023 - 2025 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
The information presented in this document is for informational purposes only
|
||||
and may contain technical inaccuracies, omissions, and typographical errors. The
|
||||
information contained herein is subject to change and may be rendered inaccurate
|
||||
for many reasons, including but not limited to product and roadmap changes,
|
||||
component and motherboard version changes, new model and/or product releases,
|
||||
product differences between differing manufacturers, software changes, BIOS
|
||||
flashes, firmware upgrades, or the like. Any computer system has risks of
|
||||
security vulnerabilities that cannot be completely prevented or mitigated. AMD
|
||||
assumes no obligation to update or otherwise correct or revise this information.
|
||||
However, AMD reserves the right to revise this information and to make changes
|
||||
from time to time to the content hereof without obligation of AMD to notify any
|
||||
person of such revisions or changes.
|
||||
|
||||
THIS INFORMATION IS PROVIDED “AS IS.” AMD MAKES NO REPRESENTATIONS OR WARRANTIES
|
||||
WITH RESPECT TO THE CONTENTS HEREOF AND ASSUMES NO RESPONSIBILITY FOR ANY
|
||||
INACCURACIES, ERRORS, OR OMISSIONS THAT MAY APPEAR IN THIS INFORMATION. AMD
|
||||
SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT,
|
||||
MERCHANTABILITY, OR FITNESS FOR ANY PARTICULAR PURPOSE. IN NO EVENT WILL AMD BE
|
||||
LIABLE TO ANY PERSON FOR ANY RELIANCE, DIRECT, INDIRECT, SPECIAL, OR OTHER
|
||||
CONSEQUENTIAL DAMAGES ARISING FROM THE USE OF ANY INFORMATION CONTAINED HEREIN,
|
||||
EVEN IF AMD IS EXPRESSLY ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
AMD, the AMD Arrow logo, ROCm, and combinations thereof are trademarks of
|
||||
Advanced Micro Devices, Inc. Other product names used in this publication are
|
||||
for identification purposes only and may be trademarks of their respective
|
||||
companies.
|
||||
|
||||
### Package licensing
|
||||
|
||||
:::{attention}
|
||||
ROCprof Trace Decoder and AOCC CPU optimizations are provided in binary form, subject to the license agreement enclosed on [GitHub](https://github.com/ROCm/rocprof-trace-decoder/blob/amd-mainline/LICENSE) for ROCprof Trace Decoder, and [Developer Central](https://www.amd.com/en/developer/aocc.html) for AOCC. By using, installing,
|
||||
copying or distributing ROCprof Trace Decoder or AOCC CPU Optimizations, you agree to
|
||||
the terms and conditions of this license agreement. If you do not agree to the
|
||||
terms of this agreement, do not install, copy or use ROCprof Trace Decoder or the
|
||||
AOCC CPU Optimizations.
|
||||
:::
|
||||
|
||||
For the rest of the ROCm packages, you can find the licensing information at the
|
||||
following location: `/opt/rocm/share/doc/<component-name>/` or in the locations
|
||||
specified in the preceding table.
|
||||
|
||||
For example, you can fetch the licensing information of the `amd_comgr`
|
||||
component (Code Object Manager) from the `/opt/rocm/share/doc/amd_comgr/LICENSE.txt` file.
|
||||
|
||||
127
docs/about/licensing.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# ROCm licensing terms
|
||||
|
||||
ROCm™ is released by Advanced Micro Devices, Inc. and is licensed per component separately.
|
||||
The following table is a list of ROCm components with links to their respective license
|
||||
terms. These components may include third party components subject to
|
||||
additional licenses. Please review individual repositories for more information.
|
||||
|
||||
The table shows ROCm components, the name of license, and link to the license terms.
|
||||
The table is ordered to follow the ROCm manifest file.
|
||||
|
||||
<!-- spellcheck-disable -->
|
||||
| Component | License |
|
||||
|:---------------------|:-------------------------|
|
||||
| [AMDMIGraphX](https://github.com/ROCmSoftwarePlatform/AMDMIGraphX/) | [MIT](https://github.com/ROCmSoftwarePlatform/AMDMIGraphX/blob/develop/LICENSE) |
|
||||
| [HIPCC](https://github.com/ROCm-Developer-Tools/HIPCC/blob/develop/LICENSE.txt) | [MIT](https://github.com/ROCm-Developer-Tools/HIPCC/blob/develop/LICENSE.txt) |
|
||||
| [HIPIFY](https://github.com/ROCm-Developer-Tools/HIPIFY/) | [MIT](https://github.com/ROCm-Developer-Tools/HIPIFY/blob/amd-staging/LICENSE.txt) |
|
||||
| [HIP](https://github.com/ROCm-Developer-Tools/HIP/) | [MIT](https://github.com/ROCm-Developer-Tools/HIP/blob/develop/LICENSE.txt) |
|
||||
| [MIOpenGEMM](https://github.com/ROCmSoftwarePlatform/MIOpenGEMM/) | [MIT](https://github.com/ROCmSoftwarePlatform/MIOpenGEMM/blob/master/LICENSE.txt) |
|
||||
| [MIOpen](https://github.com/ROCmSoftwarePlatform/MIOpen/) | [MIT](https://github.com/ROCmSoftwarePlatform/MIOpen/blob/master/LICENSE.txt) |
|
||||
| [MIVisionX](https://github.com/GPUOpen-ProfessionalCompute-Libraries/MIVisionX/) | [MIT](https://github.com/GPUOpen-ProfessionalCompute-Libraries/MIVisionX/blob/master/LICENSE.txt) |
|
||||
| [RCP](https://github.com/GPUOpen-Tools/radeon_compute_profiler/) | [MIT](https://github.com/GPUOpen-Tools/radeon_compute_profiler/blob/master/LICENSE) |
|
||||
| [ROCK-Kernel-Driver](https://github.com/RadeonOpenCompute/ROCK-Kernel-Driver/) | [GPL 2.0 WITH Linux-syscall-note](https://github.com/RadeonOpenCompute/ROCK-Kernel-Driver/blob/master/COPYING) |
|
||||
| [ROCR-Runtime](https://github.com/RadeonOpenCompute/ROCR-Runtime/) | [The University of Illinois/NCSA](https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/master/LICENSE.txt) |
|
||||
| [ROCT-Thunk-Interface](https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/) | [MIT](https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/blob/master/LICENSE.md) |
|
||||
| [ROCclr](https://github.com/ROCm-Developer-Tools/ROCclr/) | [MIT](https://github.com/ROCm-Developer-Tools/ROCclr/blob/develop/LICENSE.txt) |
|
||||
| [ROCdbgapi](https://github.com/ROCm-Developer-Tools/ROCdbgapi/) | [MIT](https://github.com/ROCm-Developer-Tools/ROCdbgapi/blob/amd-master/LICENSE.txt) |
|
||||
| [ROCgdb](https://github.com/ROCm-Developer-Tools/ROCgdb/) | [GNU General Public License v2.0](https://github.com/ROCm-Developer-Tools/ROCgdb/blob/amd-master/COPYING) |
|
||||
| [ROCm-CompilerSupport](https://github.com/RadeonOpenCompute/ROCm-CompilerSupport/) | [The University of Illinois/NCSA](https://github.com/RadeonOpenCompute/ROCm-CompilerSupport/blob/amd-stg-open/LICENSE.txt) |
|
||||
| [ROCm-Device-Libs](https://github.com/RadeonOpenCompute/ROCm-Device-Libs/) | [The University of Illinois/NCSA](https://github.com/RadeonOpenCompute/ROCm-Device-Libs/blob/amd-stg-open/LICENSE.TXT) |
|
||||
| [ROCm-OpenCL-Runtime/api/opencl/khronos/icd](https://github.com/KhronosGroup/OpenCL-ICD-Loader/) | [Apache 2.0](https://github.com/KhronosGroup/OpenCL-ICD-Loader/blob/main/LICENSE) |
|
||||
| [ROCm-OpenCL-Runtime](https://github.com/RadeonOpenCompute/ROCm-OpenCL-Runtime/) | [MIT](https://github.com/RadeonOpenCompute/ROCm-OpenCL-Runtime/blob/develop/LICENSE.txt) |
|
||||
| [ROCmValidationSuite](https://github.com/ROCm-Developer-Tools/ROCmValidationSuite/) | [MIT](https://github.com/ROCm-Developer-Tools/ROCmValidationSuite/blob/master/LICENSE) |
|
||||
| [Tensile](https://github.com/ROCmSoftwarePlatform/Tensile/) | [MIT](https://github.com/ROCmSoftwarePlatform/Tensile/blob/develop/LICENSE.md) |
|
||||
| [aomp-extras](https://github.com/ROCm-Developer-Tools/aomp-extras/) | [MIT](https://github.com/ROCm-Developer-Tools/aomp-extras/blob/aomp-dev/LICENSE) |
|
||||
| [aomp](https://github.com/ROCm-Developer-Tools/aomp/) | [Apache 2.0](https://github.com/ROCm-Developer-Tools/aomp/blob/aomp-dev/LICENSE) |
|
||||
| [atmi](https://github.com/RadeonOpenCompute/atmi/) | [MIT](https://github.com/RadeonOpenCompute/atmi/blob/master/LICENSE.txt) |
|
||||
| [clang-ocl](https://github.com/RadeonOpenCompute/clang-ocl/) | [MIT](https://github.com/RadeonOpenCompute/clang-ocl/blob/master/LICENSE) |
|
||||
| [flang](https://github.com/ROCm-Developer-Tools/flang/) | [Apache 2.0](https://github.com/ROCm-Developer-Tools/flang/blob/master/LICENSE.txt) |
|
||||
| [half](https://github.com/ROCmSoftwarePlatform/half/) | [MIT](https://github.com/ROCmSoftwarePlatform/half/blob/master/LICENSE.txt) |
|
||||
| [hipBLAS](https://github.com/ROCmSoftwarePlatform/hipBLAS/) | [MIT](https://github.com/ROCmSoftwarePlatform/hipBLAS/blob/develop/LICENSE.md) |
|
||||
| [hipCUB](https://github.com/ROCmSoftwarePlatform/hipCUB/) | [Custom](https://github.com/ROCmSoftwarePlatform/hipCUB/blob/develop/LICENSE.txt) |
|
||||
| [hipFFT](https://github.com/ROCmSoftwarePlatform/hipFFT/) | [MIT](https://github.com/ROCmSoftwarePlatform/hipFFT/blob/develop/LICENSE.md) |
|
||||
| [hipSOLVER](https://github.com/ROCmSoftwarePlatform/hipSOLVER/) | [MIT](https://github.com/ROCmSoftwarePlatform/hipSOLVER/blob/develop/LICENSE.md) |
|
||||
| [hipSPARSELt](https://github.com/ROCmSoftwarePlatform/hipSPARSELt/) | [MIT](https://github.com/ROCmSoftwarePlatform/hipSPARSELt/blob/develop/LICENSE.md) |
|
||||
| [hipSPARSE](https://github.com/ROCmSoftwarePlatform/hipSPARSE/) | [MIT](https://github.com/ROCmSoftwarePlatform/hipSPARSE/blob/develop/LICENSE.md) |
|
||||
| [hipTensor](https://github.com/ROCmSoftwarePlatform/hipTensor) | [MIT](https://github.com/ROCmSoftwarePlatform/hipTensor/blob/develop/LICENSE) |
|
||||
| [hipamd](https://github.com/ROCm-Developer-Tools/hipamd/) | [MIT](https://github.com/ROCm-Developer-Tools/hipamd/blob/develop/LICENSE.txt) |
|
||||
| [hipfort](https://github.com/ROCmSoftwarePlatform/hipfort/) | [MIT](https://github.com/ROCmSoftwarePlatform/hipfort/blob/master/LICENSE) |
|
||||
| [llvm-project](https://github.com/ROCm-Developer-Tools/llvm-project/) | [Apache](https://github.com/ROCm-Developer-Tools/llvm-project/blob/main/LICENSE.TXT) |
|
||||
| [rccl](https://github.com/ROCmSoftwarePlatform/rccl/) | [Custom](https://github.com/ROCmSoftwarePlatform/rccl/blob/develop/LICENSE.txt) |
|
||||
| [rdc](https://github.com/RadeonOpenCompute/rdc/) | [MIT](https://github.com/RadeonOpenCompute/rdc/blob/master/LICENSE) |
|
||||
| [rocALUTION](https://github.com/ROCmSoftwarePlatform/rocALUTION/) | [MIT](https://github.com/ROCmSoftwarePlatform/rocALUTION/blob/develop/LICENSE.md) |
|
||||
| [rocBLAS](https://github.com/ROCmSoftwarePlatform/rocBLAS/) | [MIT](https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/LICENSE.md) |
|
||||
| [rocFFT](https://github.com/ROCmSoftwarePlatform/rocFFT/) | [MIT](https://github.com/ROCmSoftwarePlatform/rocFFT/blob/develop/LICENSE.md) |
|
||||
| [rocPRIM](https://github.com/ROCmSoftwarePlatform/rocPRIM/) | [MIT](https://github.com/ROCmSoftwarePlatform/rocPRIM/blob/develop/LICENSE.txt) |
|
||||
| [rocRAND](https://github.com/ROCmSoftwarePlatform/rocRAND/) | [MIT](https://github.com/ROCmSoftwarePlatform/rocRAND/blob/develop/LICENSE.txt) |
|
||||
| [rocSOLVER](https://github.com/ROCmSoftwarePlatform/rocSOLVER/) | [BSD-2-Clause](https://github.com/ROCmSoftwarePlatform/rocSOLVER/blob/develop/LICENSE.md) |
|
||||
| [rocSPARSE](https://github.com/ROCmSoftwarePlatform/rocSPARSE/) | [MIT](https://github.com/ROCmSoftwarePlatform/rocSPARSE/blob/develop/LICENSE.md) |
|
||||
| [rocThrust](https://github.com/ROCmSoftwarePlatform/rocThrust/) | [Apache 2.0](https://github.com/ROCmSoftwarePlatform/rocThrust/blob/develop/LICENSE) |
|
||||
| [rocWMMA](https://github.com/ROCmSoftwarePlatform/rocWMMA/) | [MIT](https://github.com/ROCmSoftwarePlatform/rocWMMA/blob/develop/LICENSE.md) |
|
||||
| [rocm-cmake](https://github.com/RadeonOpenCompute/rocm-cmake/) | [MIT](https://github.com/RadeonOpenCompute/rocm-cmake/blob/develop/LICENSE) |
|
||||
| [rocm_bandwidth_test](https://github.com/RadeonOpenCompute/rocm_bandwidth_test/) | [The University of Illinois/NCSA](https://github.com/RadeonOpenCompute/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [rocm_smi_lib](https://github.com/RadeonOpenCompute/rocm_smi_lib/) | [The University of Illinois/NCSA](https://github.com/RadeonOpenCompute/rocm_smi_lib/blob/master/License.txt) |
|
||||
| [rocminfo](https://github.com/RadeonOpenCompute/rocminfo/) | [The University of Illinois/NCSA](https://github.com/RadeonOpenCompute/rocminfo/blob/master/License.txt) |
|
||||
| [rocprofiler](https://github.com/ROCm-Developer-Tools/rocprofiler/) | [MIT](https://github.com/ROCm-Developer-Tools/rocprofiler/blob/amd-master/LICENSE) |
|
||||
| [rocr_debug_agent](https://github.com/ROCm-Developer-Tools/rocr_debug_agent/) | [The University of Illinois/NCSA](https://github.com/ROCm-Developer-Tools/rocr_debug_agent/blob/master/LICENSE.txt) |
|
||||
| [roctracer](https://github.com/ROCm-Developer-Tools/roctracer/) | [MIT](https://github.com/ROCm-Developer-Tools/roctracer/blob/amd-master/LICENSE) |
|
||||
| rocm-llvm-alt | [AMD Proprietary License](https://www.amd.com/en/support/amd-software-eula)
|
||||
|
||||
Open sourced ROCm components are released via public GitHub
|
||||
repositories, packages on https://repo.radeon.com and other distribution channels.
|
||||
Proprietary products are only available on https://repo.radeon.com. Currently, only
|
||||
one component of ROCm, rocm-llvm-alt is governed by a proprietary license.
|
||||
Proprietary components are organized in a proprietary subdirectory in the package
|
||||
repositories to distinguish from open sourced packages.
|
||||
|
||||
The additional terms and conditions below apply to your use of ROCm technical
|
||||
documentation.
|
||||
|
||||
©2023 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
The information presented in this document is for informational purposes only
|
||||
and may contain technical inaccuracies, omissions, and typographical errors. The
|
||||
information contained herein is subject to change and may be rendered inaccurate
|
||||
for many reasons, including but not limited to product and roadmap changes,
|
||||
component and motherboard version changes, new model and/or product releases,
|
||||
product differences between differing manufacturers, software changes, BIOS
|
||||
flashes, firmware upgrades, or the like. Any computer system has risks of
|
||||
security vulnerabilities that cannot be completely prevented or mitigated. AMD
|
||||
assumes no obligation to update or otherwise correct or revise this information.
|
||||
However, AMD reserves the right to revise this information and to make changes
|
||||
from time to time to the content hereof without obligation of AMD to notify any
|
||||
person of such revisions or changes.
|
||||
|
||||
THIS INFORMATION IS PROVIDED “AS IS.” AMD MAKES NO REPRESENTATIONS OR WARRANTIES
|
||||
WITH RESPECT TO THE CONTENTS HEREOF AND ASSUMES NO RESPONSIBILITY FOR ANY
|
||||
INACCURACIES, ERRORS, OR OMISSIONS THAT MAY APPEAR IN THIS INFORMATION. AMD
|
||||
SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT,
|
||||
MERCHANTABILITY, OR FITNESS FOR ANY PARTICULAR PURPOSE. IN NO EVENT WILL AMD BE
|
||||
LIABLE TO ANY PERSON FOR ANY RELIANCE, DIRECT, INDIRECT, SPECIAL, OR OTHER
|
||||
CONSEQUENTIAL DAMAGES ARISING FROM THE USE OF ANY INFORMATION CONTAINED HEREIN,
|
||||
EVEN IF AMD IS EXPRESSLY ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
AMD, the AMD Arrow logo, ROCm, and combinations thereof are trademarks of
|
||||
Advanced Micro Devices, Inc. Other product names used in this publication are
|
||||
for identification purposes only and may be trademarks of their respective
|
||||
companies.
|
||||
|
||||
## Package licensing
|
||||
|
||||
```{attention}
|
||||
AQL Profiler and AOCC CPU optimization are both provided in binary form, each
|
||||
subject to the license agreement enclosed in the directory for the binary and is
|
||||
available here: `/opt/rocm/share/doc/rocm-llvm-alt/EULA`. By using, installing,
|
||||
copying or distributing AQL Profiler and/or AOCC CPU Optimizations, you agree to
|
||||
the terms and conditions of this license agreement. If you do not agree to the
|
||||
terms of this agreement, do not install, copy or use the AQL Profiler and/or the
|
||||
AOCC CPU Optimizations.
|
||||
```
|
||||
|
||||
For the rest of the ROCm packages, you can find the licensing information at the
|
||||
following location: `/opt/rocm/share/doc/<component-name>/`
|
||||
|
||||
For example, you can fetch the licensing information of the `_amd_comgr_`
|
||||
component (Code Object Manager) from the `amd_comgr` folder. A file named
|
||||
`LICENSE.txt` contains the license details at:
|
||||
`/opt/rocm-5.4.3/share/doc/amd_comgr/LICENSE.txt`
|
||||
25
docs/about/release-history.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# ROCm release history
|
||||
|
||||
| Version | Release Date |
|
||||
| ------- | ------------ |
|
||||
| [5.7.1](https://rocm.docs.amd.com/en/docs-5.7.1/) | Oct 13, 2023 |
|
||||
| [5.7.0](https://rocm.docs.amd.com/en/docs-5.7.0/) | Sep 15, 2023 |
|
||||
| [5.6.0](https://rocm.docs.amd.com/en/docs-5.6.0/) | Jun 28, 2023 |
|
||||
| [5.5.1](https://rocm.docs.amd.com/en/docs-5.5.1/) | May 24, 2023 |
|
||||
| [5.5.0](https://rocm.docs.amd.com/en/docs-5.5.0/) | May 1, 2023 |
|
||||
| [5.4.3](https://rocm.docs.amd.com/en/docs-5.4.3/) | Feb 7, 2023 |
|
||||
| [5.4.2](https://rocm.docs.amd.com/en/docs-5.4.2/) | Jan 13, 2023 |
|
||||
| [5.4.1](https://rocm.docs.amd.com/en/docs-5.4.1/) | Dec 15, 2022 |
|
||||
| [5.4.0](https://rocm.docs.amd.com/en/docs-5.4.0/) | Nov 30, 2022 |
|
||||
| [5.3.3](https://rocm.docs.amd.com/en/docs-5.3.3/) | Nov 17, 2022 |
|
||||
| [5.3.2](https://rocm.docs.amd.com/en/docs-5.3.2/) | Nov 9, 2022 |
|
||||
| [5.3.0](https://rocm.docs.amd.com/en/docs-5.3.0/) | Oct 4, 2022 |
|
||||
| [5.2.3](https://rocm.docs.amd.com/en/docs-5.2.3/) | Aug 18, 2022 |
|
||||
| [5.2.1](https://rocm.docs.amd.com/en/docs-5.2.1/) | Jul 21, 2022 |
|
||||
| [5.2.0](https://rocm.docs.amd.com/en/docs-5.2.0/) | Jun 28, 2022 |
|
||||
| [5.1.3](https://rocm.docs.amd.com/en/docs-5.1.3/) | May 20, 2022 |
|
||||
| [5.1.1](https://rocm.docs.amd.com/en/docs-5.1.1/) | Apr 8, 2022 |
|
||||
| [5.1.0](https://rocm.docs.amd.com/en/docs-5.1.0/) | Mar 30, 2022 |
|
||||
| [5.0.2](https://rocm.docs.amd.com/en/docs-5.0.2/) | Mar 4, 2022 |
|
||||
| [5.0.1](https://rocm.docs.amd.com/en/docs-5.0.1/) | Feb 16, 2022 |
|
||||
| [5.0.0](https://rocm.docs.amd.com/en/docs-5.0.0/) | Feb 9, 2022 |
|
||||
93
docs/about/whats-new/whats-new.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# What's new in ROCm?
|
||||
|
||||
ROCm is now supported on Windows.
|
||||
|
||||
## Windows support
|
||||
|
||||
Starting with ROCm 5.5, the HIP SDK brings a subset of ROCm to developers on Windows.
|
||||
The collection of features enabled on Windows is referred to as the HIP SDK.
|
||||
These features allow developers to use the HIP runtime, HIP math libraries
|
||||
and HIP Primitive libraries. The following table shows the differences
|
||||
between Windows and Linux releases.
|
||||
|
||||
|Component|Linux|Windows|
|
||||
|---------|-----|-------|
|
||||
|Driver|Radeon Software for Linux |AMD Software Pro Edition|
|
||||
|Compiler|`hipcc`/`amdclang++`|`hipcc`/`clang++`|
|
||||
|Debugger|`rocgdb`|no debugger available|
|
||||
|Profiler|`rocprof`|[Radeon GPU Profiler](https://gpuopen.com/rgp/)|
|
||||
|Porting Tools|HIPIFY|Coming Soon|
|
||||
|Runtime|HIP (Open Sourced)|HIP (closed source)|
|
||||
|Math Libraries|Supported|Supported|
|
||||
|Primitives Libraries|Supported|Supported|
|
||||
|Communication Libraries|Supported|Not Available|
|
||||
|AI Libraries|MIOpen, MIGraphX|Not Available|
|
||||
|System Management|`rocm-smi-lib`, RDC, `rocminfo`|`amdsmi`, `hipInfo`|
|
||||
|AI Frameworks|PyTorch, TensorFlow, etc.|Not Available|
|
||||
|CMake HIP Language|Enabled|Unsupported|
|
||||
|Visual Studio| Not applicable| Plugin Available|
|
||||
|HIP Ray Tracing| Supported|Supported|
|
||||
|
||||
AMD is continuing to invest in Windows support and AMD plans to release enhanced
|
||||
features in subsequent revisions.
|
||||
|
||||
```{note}
|
||||
The 5.5 Windows Installer collectively groups the Math and Primitives
|
||||
libraries.
|
||||
```
|
||||
|
||||
```{note}
|
||||
GPU support on Windows and Linux may differ. You must refer to
|
||||
Windows and Linux GPU support tables separately.
|
||||
```
|
||||
|
||||
```{note}
|
||||
HIP Ray Tracing is not distributed via ROCm in Linux.
|
||||
```
|
||||
|
||||
## ROCm release versioning
|
||||
|
||||
Linux OS releases set the canonical version numbers for ROCm. Windows will
|
||||
follow Linux version numbers as Windows releases are based on Linux ROCm
|
||||
releases. However, not all Linux ROCm releases will have a corresponding Windows
|
||||
release. The following table shows the ROCm releases on Windows and Linux. Releases
|
||||
with both Windows and Linux are referred to as a joint release. Releases with
|
||||
only Linux support are referred to as a skipped release from the Windows
|
||||
perspective.
|
||||
|
||||
|Release version|Linux|Windows|
|
||||
|---------------|-----|-------|
|
||||
|5.5|✅|✅|
|
||||
|5.6|✅|❌|
|
||||
|
||||
ROCm Linux releases are versioned with following the Major.Minor.Patch
|
||||
version number system. Windows releases will only be versioned with Major.Minor.
|
||||
|
||||
In general, Windows releases will trail Linux releases. Software developers that
|
||||
wish to support both Linux and Windows using a single ROCm version should
|
||||
refrain from upgrading ROCm unless there is a joint release.
|
||||
|
||||
## Windows documentation implications
|
||||
|
||||
The ROCm documentation website contains both Windows and Linux documentation.
|
||||
Just below each article title, a convenient article information section states
|
||||
whether the page applies to Linux only, Windows only or both OSes. To find the
|
||||
exact Windows documentation for a release of the HIP SDK, please view the ROCm documentation with the same
|
||||
Major.Minor version number while ignoring the Patch version. The Patch version
|
||||
only matters for Linux releases. For convenience,
|
||||
Windows documentation will continue to be included in the overall ROCm
|
||||
documentation for the skipped Windows releases.
|
||||
|
||||
Windows release notes will contain only information pertinent to Windows.
|
||||
The software developer must read all the previous ROCm release notes (including)
|
||||
skipped ROCm versions on Windows for information on all the changes present in
|
||||
the Windows release.
|
||||
|
||||
## Windows builds from source
|
||||
|
||||
Not all source code required to build Windows from source is available under a
|
||||
permissive open source license. Build instructions on Windows is only provided
|
||||
for projects that can be built from source on Windows using a toolchain that
|
||||
has closed source build prerequisites. The ROCm manifest file is not valid for
|
||||
Windows. AMD does not release a manifest or tag our components in Windows.
|
||||
Users may use corresponding Linux tags to build on Windows.
|
||||
@@ -1,136 +0,0 @@
|
||||
ROCm Version,7.2.0,7.1.1,7.1.0,7.0.2,7.0.1/7.0.0,6.4.3,6.4.2,6.4.1,6.4.0,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.5, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>` [#os-compatibility-past-60]_,Ubuntu 24.04.3,Ubuntu 24.04.3,Ubuntu 24.04.3,Ubuntu 24.04.3,Ubuntu 24.04.3,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,,,
|
||||
,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2"
|
||||
,,,,,,,,,,,,,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
|
||||
,"RHEL 10.1, 10.0, 9.7, 9.6, 9.4","RHEL 10.1, 10.0, 9.7, 9.6, 9.4","RHEL 10.0, 9.6, 9.4","RHEL 10.0, 9.6, 9.4","RHEL 9.6, 9.4","RHEL 9.6, 9.4","RHEL 9.6, 9.4","RHEL 9.6, 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2"
|
||||
,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8"
|
||||
,SLES 15 SP7,SLES 15 SP7,SLES 15 SP7,SLES 15 SP7,SLES 15 SP7,"SLES 15 SP7, SP6","SLES 15 SP7, SP6",SLES 15 SP6,SLES 15 SP6,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4"
|
||||
,,,,,,,,,,,,,,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9
|
||||
,"Oracle Linux 10, 9, 8","Oracle Linux 10, 9, 8","Oracle Linux 10, 9, 8","Oracle Linux 10, 9, 8","Oracle Linux 9, 8","Oracle Linux 9, 8","Oracle Linux 9, 8","Oracle Linux 9, 8","Oracle Linux 9, 8",Oracle Linux 8.10,Oracle Linux 8.10,Oracle Linux 8.10,Oracle Linux 8.10,Oracle Linux 8.9,Oracle Linux 8.9,Oracle Linux 8.9,Oracle Linux 8.9,Oracle Linux 8.9,Oracle Linux 8.9,Oracle Linux 8.9,,,
|
||||
,"Debian 13, 12","Debian 13, 12","Debian 13, 12","Debian 13, 12",Debian 12,Debian 12,Debian 12,Debian 12,Debian 12,Debian 12,Debian 12,Debian 12,,,,,,,,,,,
|
||||
,,,,Azure Linux 3.0,Azure Linux 3.0,Azure Linux 3.0,Azure Linux 3.0,Azure Linux 3.0,Azure Linux 3.0,Azure Linux 3.0,Azure Linux 3.0,,,,,,,,,,,,
|
||||
,Rocky Linux 9,Rocky Linux 9,Rocky Linux 9,Rocky Linux 9,Rocky Linux 9,,,,,,,,,,,,,,,,,,
|
||||
,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA4,CDNA4,CDNA4,CDNA4,CDNA4,,,,,,,,,,,,,,,,,,
|
||||
,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2
|
||||
,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA
|
||||
,RDNA4,RDNA4,RDNA4,RDNA4,RDNA4,RDNA4,RDNA4,RDNA4,,,,,,,,,,,,,,,
|
||||
,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3
|
||||
,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2
|
||||
,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>` [#gpu-compatibility-past-60]_,gfx950,gfx950,gfx950,gfx950,gfx950,,,,,,,,,,,,,,,,,,
|
||||
,gfx1201,gfx1201,gfx1201,gfx1201,gfx1201,gfx1201,gfx1201,gfx1201,,,,,,,,,,,,,,,
|
||||
,gfx1200,gfx1200,gfx1200,gfx1200,gfx1200,gfx1200,gfx1200,gfx1200,,,,,,,,,,,,,,,
|
||||
,gfx1101,gfx1101,gfx1101,gfx1101,gfx1101,gfx1101,gfx1101,gfx1101,,,,,,,,,,,,,,,
|
||||
,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100
|
||||
,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030
|
||||
,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942,gfx942, gfx942, gfx942, gfx942, gfx942, gfx942, gfx942
|
||||
,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a
|
||||
,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`PyTorch <../compatibility/ml-compatibility/pytorch-compatibility>`,"2.9.1, 2.8.0, 2.7.1","2.9, 2.8, 2.7","2.8, 2.7, 2.6","2.8, 2.7, 2.6","2.7, 2.6, 2.5","2.6, 2.5, 2.4, 2.3","2.6, 2.5, 2.4, 2.3","2.6, 2.5, 2.4, 2.3","2.6, 2.5, 2.4, 2.3","2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.20.0, 2.19.1, 2.18.1","2.20.0, 2.19.1, 2.18.1","2.20.0, 2.19.1, 2.18.1","2.19.1, 2.18.1, 2.17.1 [#tf-mi350-past-60]_","2.19.1, 2.18.1, 2.17.1 [#tf-mi350-past-60]_","2.18.1, 2.17.1, 2.16.2","2.18.1, 2.17.1, 2.16.2","2.18.1, 2.17.1, 2.16.2","2.18.1, 2.17.1, 2.16.2","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
|
||||
:doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.8.0,0.7.1,0.7.1,0.6.0,0.6.0,0.4.35,0.4.35,0.4.35,0.4.35,0.4.31,0.4.31,0.4.31,0.4.31,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
|
||||
:doc:`verl <../compatibility/ml-compatibility/verl-compatibility>` [#verl_compat-past-60]_,N/A,N/A,N/A,N/A,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.3.0.post0,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`Stanford Megatron-LM <../compatibility/ml-compatibility/stanford-megatron-lm-compatibility>` [#stanford-megatron-lm_compat-past-60]_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,85f95ae,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`DGL <../compatibility/ml-compatibility/dgl-compatibility>` [#dgl_compat-past-60]_,N/A,N/A,N/A,N/A,2.4.0,2.4.0,N/A,N/A,2.4.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`Megablocks <../compatibility/ml-compatibility/megablocks-compatibility>` [#megablocks_compat-past-60]_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.7.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`Ray <../compatibility/ml-compatibility/ray-compatibility>` [#ray_compat-past-60]_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,2.48.0.post0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`llama.cpp <../compatibility/ml-compatibility/llama-cpp-compatibility>` [#llama-cpp_compat-past-60]_,N/A,N/A,N/A,N/A,b6652,b6356,b6356,b6356,b5997,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`FlashInfer <../compatibility/ml-compatibility/flashinfer-compatibility>` [#flashinfer_compat-past-60]_,N/A,v0.2.5,N/A,N/A,N/A,N/A,N/A,v0.2.5,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.23.2,1.23.1,1.22.0,1.22.0,1.22.0,1.20.0,1.20.0,1.20.0,1.20.0,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.4.0,>=1.4.0,>=1.4.0,>=1.4.0,>=1.4.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.17.0,>=1.17.0,>=1.17.0,>=1.17.0,>=1.17.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
Thrust,2.8.5,2.8.5,2.8.5,2.6.0,2.6.0,2.5.0,2.5.0,2.5.0,2.5.0,2.3.2,2.3.2,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
CUB,2.8.5,2.8.5,2.8.5,2.6.0,2.6.0,2.5.0,2.5.0,2.5.0,2.5.0,2.3.2,2.3.2,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
DRIVER & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`AMD GPU Driver <rocm-install-on-linux:reference/user-kernel-space-compat-matrix>`,"30.30.0, 30.20.1, 30.20.0 [#mi325x_KVM-past-60]_, 30.10.2, 30.10.1 [#driver_patch-past-60]_, 30.10, 6.4.x","30.20.1, 30.20.0 [#mi325x_KVM-past-60]_, 30.10.2, 30.10.1 [#driver_patch-past-60]_, 30.10, 6.4.x","30.20.0 [#mi325x_KVM-past-60]_, 30.10.2, 30.10.1 [#driver_patch-past-60]_, 30.10, 6.4.x","30.10.2, 30.10.1 [#driver_patch-past-60]_, 30.10, 6.4.x, 6.3.x","30.10.1 [#driver_patch-past-60]_, 30.10, 6.4.x, 6.3.x, 6.2.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.4.x, 6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.2.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.15.0,2.14.0,2.14.0,2.13.0,2.13.0,2.12.0,2.12.0,2.12.0,2.12.0,2.11.0,2.11.0,2.11.0,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0
|
||||
:doc:`MIOpen <miopen:index>`,3.5.1,3.5.1,3.5.1,3.5.0,3.5.0,3.4.0,3.4.0,3.4.0,3.4.0,3.3.0,3.3.0,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.5.0,3.4.0,3.4.0,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0
|
||||
:doc:`rocAL <rocal:index>`,2.5.0,2.4.0,2.4.0,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`rocDecode <rocdecode:index>`,1.5.0,1.4.0,1.4.0,1.0.0,1.0.0,0.10.0,0.10.0,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A
|
||||
:doc:`rocJPEG <rocjpeg:index>`,1.3.0,1.2.0,1.2.0,1.1.0,1.1.0,0.8.0,0.8.0,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.8.0,0.7.0,0.7.0,0.6.0,0.6.0,0.3.1,0.3.1,0.3.1,0.3.1,0.2.0,0.2.0,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`RPP <rpp:index>`,2.2.0,2.1.0,2.1.0,2.0.0,2.0.0,1.9.10,1.9.10,1.9.10,1.9.10,1.9.1,1.9.1,1.9.1,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`RCCL <rccl:index>`,2.27.7,2.27.7,2.27.7,2.26.6,2.26.6,2.22.3,2.22.3,2.22.3,2.22.3,2.21.5,2.21.5,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
|
||||
:doc:`rocSHMEM <rocshmem:index>`,3.2.0,3.1.0,3.0.0,3.0.0,3.0.0,2.0.1,2.0.1,2.0.0,2.0.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
|
||||
:doc:`hipBLAS <hipblas:index>`,3.2.0,3.1.0,3.1.0,3.0.2,3.0.0,2.4.0,2.4.0,2.4.0,2.4.0,2.3.0,2.3.0,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,1.2.1,1.1.0,1.1.0,1.0.0,1.0.0,0.12.1,0.12.1,0.12.1,0.12.0,0.10.0,0.10.0,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.22,1.0.21,1.0.21,1.0.20,1.0.20,1.0.18,1.0.18,1.0.18,1.0.18,1.0.17,1.0.17,1.0.17,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13
|
||||
:doc:`hipfort <hipfort:index>`,0.7.1,0.7.1,0.7.1,0.7.0,0.7.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.1,0.5.1,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0
|
||||
:doc:`hipRAND <hiprand:index>`,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0,2.12.0,2.12.0,2.12.0,2.12.0,2.11.1,2.11.1,2.11.1,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16
|
||||
:doc:`hipSOLVER <hipsolver:index>`,3.2.0,3.1.0,3.1.0,3.0.0,3.0.0,2.4.0,2.4.0,2.4.0,2.4.0,2.3.0,2.3.0,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,4.2.0,4.1.0,4.1.0,4.0.1,4.0.1,3.2.0,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.6,0.2.5,0.2.5,0.2.4,0.2.4,0.2.3,0.2.3,0.2.3,0.2.3,0.2.2,0.2.2,0.2.2,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0
|
||||
:doc:`rocALUTION <rocalution:index>`,4.1.0,4.0.1,4.0.1,4.0.0,4.0.0,3.2.3,3.2.3,3.2.3,3.2.2,3.2.1,3.2.1,3.2.1,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3
|
||||
:doc:`rocBLAS <rocblas:index>`,5.2.0,5.1.1,5.1.0,5.0.2,5.0.0,4.4.1,4.4.1,4.4.0,4.4.0,4.3.0,4.3.0,4.3.0,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.36,1.0.35,1.0.35,1.0.34,1.0.34,1.0.32,1.0.32,1.0.32,1.0.32,1.0.31,1.0.31,1.0.31,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23
|
||||
:doc:`rocRAND <rocrand:index>`,4.2.0,4.1.0,4.1.0,4.0.0,4.0.0,3.3.0,3.3.0,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.32.0,3.31.0,3.31.0,3.30.1,3.30.0,3.28.2,3.28.2,3.28.0,3.28.0,3.27.0,3.27.0,3.27.0,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,4.2.0,4.1.0,4.1.0,4.0.2,4.0.2,3.4.0,3.4.0,3.4.0,3.4.0,3.3.0,3.3.0,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2
|
||||
:doc:`rocWMMA <rocwmma:index>`,2.2.0,2.1.0,2.0.0,2.0.0,2.0.0,1.7.0,1.7.0,1.7.0,1.7.0,1.6.0,1.6.0,1.6.0,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0
|
||||
:doc:`Tensile <tensile:src/index>`,4.44.0,4.44.0,4.44.0,4.44.0,4.44.0,4.43.0,4.43.0,4.43.0,4.43.0,4.42.0,4.42.0,4.42.0,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`hipCUB <hipcub:index>`,4.2.0,4.1.0,4.1.0,4.0.0,4.0.0,3.4.0,3.4.0,3.4.0,3.4.0,3.3.0,3.3.0,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`hipTensor <hiptensor:index>`,2.2.0,2.0.0,2.0.0,2.0.0,2.0.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0
|
||||
:doc:`rocPRIM <rocprim:index>`,4.2.0,4.1.0,4.1.0,4.0.1,4.0.0,3.4.1,3.4.1,3.4.0,3.4.0,3.3.0,3.3.0,3.3.0,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`rocThrust <rocthrust:index>`,4.2.0,4.1.0,4.1.0,4.0.0,4.0.0,3.3.0,3.3.0,3.3.0,3.3.0,3.3.0,3.3.0,3.3.0,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
SUPPORT LIBS,,,,,,,,,,,,,,,,,,,,,,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,7.2.26015,7.1.52802,7.1.25424,7.0.51831,7.0.51830,6.4.43483,6.4.43483,6.4.43483,6.4.43482,6.3.42134,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,7.2.0,7.1.1,7.1.0,7.0.2,7.0.1/7.0.0,6.4.3,6.4.2,6.4.1,6.4.0,6.3.3,6.3.2,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.5,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`AMD SMI <amdsmi:index>`,26.2.1,26.2.0,26.1.0,26.0.2,26.0.0,25.5.1,25.5.1,25.4.2,25.3.0,24.7.1,24.7.1,24.7.1,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.8.0,7.8.0,7.8.0,7.8.0,7.8.0,7.7.0,7.5.0,7.5.0,7.5.0,7.4.0,7.4.0,7.4.0,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60105,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
PERFORMANCE TOOLS,,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,2.6.0,2.6.0,2.6.0,2.6.0,2.6.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.4.0,3.3.1,3.3.0,3.2.3,3.2.3,3.1.1,3.1.1,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,1.3.0,1.2.1,1.2.0,1.1.1,1.1.0,1.0.2,1.0.2,1.0.1,1.0.0,0.1.2,0.1.1,0.1.0,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.70200,2.0.70101,2.0.70100,2.0.70002,2.0.70000,2.0.60403,2.0.60402,2.0.60401,2.0.60400,2.0.60303,2.0.60302,2.0.60301,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60105,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,1.1.0,1.0.0,1.0.0,1.0.0,1.0.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.70200,4.1.70101,4.1.70100,4.1.70002,4.1.70000,4.1.60403,4.1.60402,4.1.60401,4.1.60400,4.1.60303,4.1.60302,4.1.60301,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60105,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
DEVELOPMENT TOOLS,,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`HIPIFY <hipify:index>`,22.0.0,20.0.0,20.0.0,20.0.0,20.0.0,19.0.0,19.0.0,19.0.0,19.0.0,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.4,0.77.4,0.77.4,0.77.4,0.77.3,0.77.2,0.77.2,0.77.2,0.77.2,0.77.0,0.77.0,0.77.0,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,16.3.0,16.3.0,16.3.0,16.3.0,16.3.0,15.2.0,15.2.0,15.2.0,15.2.0,15.2.0,15.2.0,15.2.0,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,0.3.0,N/A,N/A
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.1.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.4,2.0.4,2.0.4,2.0.4,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
`Flang <https://github.com/ROCm/flang>`_,22.0.0.26014,20.0.025444,20.0.025425,20.0.0.25385,20.0.0.25314,19.0.0.25224,19.0.0.25224,19.0.0.25184,19.0.0.25133,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
:doc:`llvm-project <llvm-project:index>`,22.0.0.26014,20.0.025444,20.0.025425,20.0.0.25385,20.0.0.25314,19.0.0.25224,19.0.0.25224,19.0.0.25184,19.0.0.25133,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,22.0.0.26014,20.0.025444,20.0.025425,20.0.0.25385,20.0.0.25314,19.0.0.25224,19.0.0.25224,19.0.0.25184,19.0.0.25133,18.0.0.25012,18.0.0.25012,18.0.0.24491,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,,,,,,,,,,,,,,
|
||||
RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,,,,,,,,,,,,,,
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,7.2.26015,7.1.52802,7.1.25424,7.0.51831,7.0.51830,6.4.43484,6.4.43484,6.4.43483,6.4.43482,6.3.42134,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
:doc:`HIP <hip:index>`,7.2.26015,7.1.52802,7.1.25424,7.0.51831,7.0.51830,6.4.43484,6.4.43484,6.4.43483,6.4.43482,6.3.42134,6.3.42134,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0
|
||||
:doc:`ROCr Runtime <rocr-runtime:index>`,1.18.0,1.18.0,1.18.0,1.18.0,1.18.0,1.15.0,1.15.0,1.15.0,1.15.0,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0
|
||||
|
@@ -1,221 +0,0 @@
|
||||
.. meta::
|
||||
:description: ROCm compatibility matrix
|
||||
:keywords: GPU, architecture, hardware, compatibility, system, requirements, components, libraries
|
||||
|
||||
**************************************************************************************
|
||||
Compatibility matrix
|
||||
**************************************************************************************
|
||||
|
||||
Use this matrix to view the ROCm compatibility and system requirements across successive major and minor releases.
|
||||
|
||||
You can also refer to the :ref:`past versions of ROCm compatibility matrix<past-rocm-compatibility-matrix>`.
|
||||
|
||||
GPUs listed in the following table support compute workloads (no display
|
||||
information or graphics). If you’re using ROCm with AMD Radeon GPUs or Ryzen APUs for graphics
|
||||
workloads, see the :doc:`Use ROCm on Radeon and Ryzen <radeon:index>` to verify
|
||||
compatibility and system requirements.
|
||||
|
||||
.. |br| raw:: html
|
||||
|
||||
<br/>
|
||||
|
||||
.. container:: format-big-table
|
||||
|
||||
.. csv-table::
|
||||
:header: "ROCm Version", "7.2.0", "7.1.1", "6.4.0"
|
||||
:stub-columns: 1
|
||||
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>` [#os-compatibility]_,Ubuntu 24.04.3,Ubuntu 24.04.3,Ubuntu 24.04.2
|
||||
,Ubuntu 22.04.5,Ubuntu 22.04.5,Ubuntu 22.04.5
|
||||
,"RHEL 10.1, 10.0, 9.7, 9.6, 9.4","RHEL 10.1, 10.0, 9.7, 9.6, 9.4","RHEL 9.5, 9.4"
|
||||
,RHEL 8.10,RHEL 8.10,RHEL 8.10
|
||||
,SLES 15 SP7,SLES 15 SP7,SLES 15 SP6
|
||||
,"Oracle Linux 10, 9, 8","Oracle Linux 10, 9, 8","Oracle Linux 9, 8"
|
||||
,"Debian 13, 12","Debian 13, 12",Debian 12
|
||||
,,,Azure Linux 3.0
|
||||
,Rocky Linux 9,Rocky Linux 9,
|
||||
,.. _architecture-support-compatibility-matrix:,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA4,CDNA4,
|
||||
,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2
|
||||
,CDNA,CDNA,CDNA
|
||||
,RDNA4,RDNA4,
|
||||
,RDNA3,RDNA3,RDNA3
|
||||
,RDNA2,RDNA2,RDNA2
|
||||
,.. _gpu-support-compatibility-matrix:,,
|
||||
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>` [#gpu-compatibility]_,gfx950,gfx950,
|
||||
,gfx1201,gfx1201,
|
||||
,gfx1200,gfx1200,
|
||||
,gfx1101,gfx1101,
|
||||
,gfx1100,gfx1100,gfx1100
|
||||
,gfx1030,gfx1030,gfx1030
|
||||
,gfx942,gfx942,gfx942
|
||||
,gfx90a,gfx90a,gfx90a
|
||||
,gfx908,gfx908,gfx908
|
||||
,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix:,,
|
||||
:doc:`PyTorch <../compatibility/ml-compatibility/pytorch-compatibility>`,"2.9.1, 2.8.0, 2.7.1","2.9, 2.8, 2.7","2.6, 2.5, 2.4, 2.3"
|
||||
:doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.20.0, 2.19.1, 2.18.1","2.20.0, 2.19.1, 2.18.1","2.18.1, 2.17.1, 2.16.2"
|
||||
:doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.8.0,0.7.1,0.4.35
|
||||
:doc:`DGL <../compatibility/ml-compatibility/dgl-compatibility>` [#dgl_compat]_,N/A,N/A,2.4.0
|
||||
:doc:`llama.cpp <../compatibility/ml-compatibility/llama-cpp-compatibility>` [#llama-cpp_compat]_,N/A,N/A,b5997
|
||||
:doc:`FlashInfer <../compatibility/ml-compatibility/flashinfer-compatibility>` [#flashinfer_compat]_,N/A,v0.2.5,N/A
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.23.2,1.23.1,1.20.0
|
||||
,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix:,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.4.0,>=1.4.0,>=1.3.0
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.17.0,>=1.17.0,>=1.15.0
|
||||
,,,
|
||||
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix:,,
|
||||
Thrust,2.8.5,2.8.5,2.5.0
|
||||
CUB,2.8.5,2.8.5,2.5.0
|
||||
,,,
|
||||
DRIVER & USER SPACE [#kfd_support]_,.. _kfd-userspace-support-compatibility-matrix:,,
|
||||
:doc:`AMD GPU Driver <rocm-install-on-linux:reference/user-kernel-space-compat-matrix>`,"30.30.0, 30.20.1, 30.20.0 [#mi325x_KVM]_, |br| 30.10.2, 30.10.1 [#driver_patch]_, |br| 30.10, 6.4.x","30.20.1, 30.20.0 [#mi325x_KVM]_, |br| 30.10.2, 30.10.1 [#driver_patch]_, |br| 30.10, 6.4.x","6.4.x, 6.3.x, 6.2.x, 6.1.x"
|
||||
,,,
|
||||
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix:,,
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.2.0,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.15.0,2.14.0,2.12.0
|
||||
:doc:`MIOpen <miopen:index>`,3.5.1,3.5.1,3.4.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.5.0,3.4.0,3.2.0
|
||||
:doc:`rocAL <rocal:index>`,2.5.0,2.4.0,2.2.0
|
||||
:doc:`rocDecode <rocdecode:index>`,1.5.0,1.4.0,0.10.0
|
||||
:doc:`rocJPEG <rocjpeg:index>`,1.3.0,1.2.0,0.8.0
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.8.0,0.7.0,0.3.1
|
||||
:doc:`RPP <rpp:index>`,2.2.0,2.1.0,1.9.10
|
||||
,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix:,,
|
||||
:doc:`RCCL <rccl:index>`,2.27.7,2.27.7,2.22.3
|
||||
:doc:`rocSHMEM <rocshmem:index>`,3.2.0,3.1.0,2.0.0
|
||||
,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix:,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0
|
||||
:doc:`hipBLAS <hipblas:index>`,3.2.0,3.1.0,2.4.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,1.2.1,1.1.0,0.12.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.22,1.0.21,1.0.18
|
||||
:doc:`hipfort <hipfort:index>`,0.7.1,0.7.1,0.6.0
|
||||
:doc:`hipRAND <hiprand:index>`,3.1.0,3.1.0,2.12.0
|
||||
:doc:`hipSOLVER <hipsolver:index>`,3.2.0,3.1.0,2.4.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,4.2.0,4.1.0,3.2.0
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.6,0.2.5,0.2.3
|
||||
:doc:`rocALUTION <rocalution:index>`,4.1.0,4.0.1,3.2.2
|
||||
:doc:`rocBLAS <rocblas:index>`,5.2.0,5.1.1,4.4.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.36,1.0.35,1.0.32
|
||||
:doc:`rocRAND <rocrand:index>`,4.2.0,4.1.0,3.3.0
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.32.0,3.31.0,3.28.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,4.2.0,4.1.0,3.4.0
|
||||
:doc:`rocWMMA <rocwmma:index>`,2.2.0,2.1.0,1.7.0
|
||||
:doc:`Tensile <tensile:src/index>`,4.44.0,4.44.0,4.43.0
|
||||
,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix:,,
|
||||
:doc:`hipCUB <hipcub:index>`,4.2.0,4.1.0,3.4.0
|
||||
:doc:`hipTensor <hiptensor:index>`,2.2.0,2.0.0,1.5.0
|
||||
:doc:`rocPRIM <rocprim:index>`,4.2.0,4.1.0,3.4.0
|
||||
:doc:`rocThrust <rocthrust:index>`,4.2.0,4.1.0,3.3.0
|
||||
,,,
|
||||
SUPPORT LIBS,,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,7.2.26015,7.1.52802,6.4.43482
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,7.2.0,7.1.1,6.4.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr]_,N/A [#ROCT-rocr]_,N/A [#ROCT-rocr]_
|
||||
,,,
|
||||
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix:,,
|
||||
:doc:`AMD SMI <amdsmi:index>`,26.2.1,26.2.0,25.3.0
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,1.2.0,1.2.0,0.3.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.8.0,7.8.0,7.5.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.3.0,1.3.0,1.1.0
|
||||
,,,
|
||||
PERFORMANCE TOOLS,,,
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,2.6.0,2.6.0,1.4.0
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.4.0,3.3.1,3.1.0
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,1.3.0,1.2.1,1.0.0
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.70200,2.0.70101,2.0.60400
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,1.1.0,1.0.0,0.6.0
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.70200,4.1.70101,4.1.60400
|
||||
,,,
|
||||
DEVELOPMENT TOOLS,,,
|
||||
:doc:`HIPIFY <hipify:index>`,22.0.0,20.0.0,19.0.0
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.14.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.4,0.77.4,0.77.2
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,16.3.0,16.3.0,15.2.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.5.0,0.5.0,0.4.0
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.1.0,2.1.0,2.0.4
|
||||
,,,
|
||||
COMPILERS,.. _compilers-support-compatibility-matrix:,,
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1
|
||||
`Flang <https://github.com/ROCm/flang>`_,22.0.0.26014,20.0.025444,19.0.0.25133
|
||||
:doc:`llvm-project <llvm-project:index>`,22.0.0.26014,20.0.025444,19.0.0.25133
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,22.0.0.26014,20.0.025444,19.0.0.25133
|
||||
,,,
|
||||
RUNTIMES,.. _runtime-support-compatibility-matrix:,,
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,7.2.26015,7.1.52802,6.4.43482
|
||||
:doc:`HIP <hip:index>`,7.2.26015,7.1.52802,6.4.43482
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0
|
||||
:doc:`ROCr Runtime <rocr-runtime:index>`,1.18.0,1.18.0,1.15.0
|
||||
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#os-compatibility] Some operating systems are supported on specific GPUs. For detailed information about operating systems supported on ROCm 7.2.0, see the latest :ref:`supported_distributions`. For version specific information, see `ROCm 7.1.1 <https://rocm.docs.amd.com/projects/install-on-linux/en/docs-7.1.1/reference/system-requirements.html#supported-operating-systems>`__, and `ROCm 6.4.0 <https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.4.0/reference/system-requirements.html#supported-operating-systems>`__.
|
||||
.. [#gpu-compatibility] Some GPUs have limited operating system support. For detailed information about GPUs supporting ROCm 7.2.0, see the latest :ref:`supported_GPUs`. For version specific information, see `ROCm 7.1.1 <https://rocm.docs.amd.com/projects/install-on-linux/en/docs-7.1.1/reference/system-requirements.html#supported-gpus>`__, `ROCm 7.1.0 <https://rocm.docs.amd.com/projects/install-on-linux/en/docs-7.1.0/reference/system-requirements.html#supported-gpus>`__, and `ROCm 6.4.0 <https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.4.0/reference/system-requirements.html#supported-gpus>`__.
|
||||
.. [#dgl_compat] DGL is only supported on ROCm 7.0.0, 6.4.3 and 6.4.0.
|
||||
.. [#llama-cpp_compat] llama.cpp is only supported on ROCm 7.0.0 and 6.4.x.
|
||||
.. [#flashinfer_compat] FlashInfer is only supported on ROCm 7.1.1 and 6.4.1.
|
||||
.. [#mi325x_KVM] For AMD Instinct MI325X KVM SR-IOV users, do not use AMD GPU Driver (amdgpu) 30.20.0.
|
||||
.. [#driver_patch] AMD GPU Driver (amdgpu) 30.10.1 is a quality release that resolves an issue identified in the 30.10 release. There are no other significant changes or feature additions in ROCm 7.0.1 from ROCm 7.0.0. AMD GPU Driver (amdgpu) 30.10.1 is compatible with ROCm 7.0.1 and ROCm 7.0.0.
|
||||
.. [#kfd_support] As of ROCm 6.4.0, forward and backward compatibility between the AMD GPU Driver (amdgpu) and its user space software is provided up to a year apart. For earlier ROCm releases, the compatibility is provided for +/- 2 releases. The supported user space versions on this page were accurate as of the time of initial ROCm release. For the most up-to-date information, see the latest version of this information at `User and AMD GPU Driver support matrix <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/user-kernel-space-compat-matrix.html>`_.
|
||||
.. [#ROCT-rocr] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package.
|
||||
|
||||
.. _OS-kernel-versions:
|
||||
|
||||
Operating systems, kernel and Glibc versions
|
||||
*********************************************
|
||||
|
||||
For detailed information on operating system supported on ROCm 7.2.0 and associated Kernel and Glibc version, see the latest :ref:`supported_distributions`. For version specific information, see `ROCm 7.1.1 <https://rocm.docs.amd.com/projects/install-on-linux/en/docs-7.1.1/reference/system-requirements.html#supported-operating-systems>`__, and `ROCm 6.4.0 <https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.4.0/reference/system-requirements.html#supported-operating-systems>`__.
|
||||
|
||||
.. note::
|
||||
|
||||
* See `Red Hat Enterprise Linux Release Dates <https://access.redhat.com/articles/3078>`_ to learn about the specific kernel versions supported on Red Hat Enterprise Linux (RHEL).
|
||||
* See `List of SUSE Linux Enterprise Server kernel <https://www.suse.com/support/kb/doc/?id=000019587>`_ to learn about the specific kernel version supported on SUSE Linux Enterprise Server (SLES).
|
||||
..
|
||||
Footnotes and ref anchors in below historical tables should be appended with "-past-60", to differentiate from the
|
||||
footnote references in the above, latest, compatibility matrix. It also allows to easily find & replace.
|
||||
An easy way to work is to download the historical.CSV file, and update open it in excel. Then when content is ready,
|
||||
delete the columns you don't need, to build the current compatibility matrix to use in above table. Find & replace all
|
||||
instances of "-past-60" to make it ready for above table.
|
||||
|
||||
|
||||
.. _past-rocm-compatibility-matrix:
|
||||
|
||||
Past versions of ROCm compatibility matrix
|
||||
***************************************************
|
||||
|
||||
Expand for full historical view of:
|
||||
|
||||
.. dropdown:: ROCm 6.0 - Present
|
||||
|
||||
You can `download the entire .csv <../downloads/compatibility-matrix-historical-6.0.csv>`_ for offline reference.
|
||||
|
||||
.. csv-table::
|
||||
:file: compatibility-matrix-historical-6.0.csv
|
||||
:header-rows: 1
|
||||
:stub-columns: 1
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#os-compatibility-past-60] Some operating systems are supported on specific GPUs. For detailed information, see :ref:`supported_distributions` and select the required ROCm version for version specific support.
|
||||
.. [#gpu-compatibility-past-60] Some GPUs have limited operating system support. For detailed information, see :ref:`supported_GPUs` and select the required ROCm version for version specific support.
|
||||
.. [#tf-mi350-past-60] TensorFlow 2.17.1 is not supported on AMD Instinct MI350 Series GPUs. Use TensorFlow 2.19.1 or 2.18.1 with MI350 Series GPUs instead.
|
||||
.. [#verl_compat-past-60] verl is only supported on ROCm 7.0.0 and 6.2.0.
|
||||
.. [#stanford-megatron-lm_compat-past-60] Stanford Megatron-LM is only supported on ROCm 6.3.0.
|
||||
.. [#dgl_compat-past-60] DGL is only supported on ROCm 7.0.0, 6.4.3 and 6.4.0.
|
||||
.. [#megablocks_compat-past-60] Megablocks is only supported on ROCm 6.3.0.
|
||||
.. [#ray_compat-past-60] Ray is only supported on ROCm 7.0.0 and 6.4.1.
|
||||
.. [#llama-cpp_compat-past-60] llama.cpp is only supported on ROCm 7.0.0 and 6.4.x.
|
||||
.. [#flashinfer_compat-past-60] FlashInfer is only supported on ROCm 7.1.1 and 6.4.1.
|
||||
.. [#mi325x_KVM-past-60] For AMD Instinct MI325X KVM SR-IOV users, do not use AMD GPU Driver (amdgpu) 30.20.0.
|
||||
.. [#driver_patch-past-60] AMD GPU Driver (amdgpu) 30.10.1 is a quality release that resolves an issue identified in the 30.10 release. There are no other significant changes or feature additions in ROCm 7.0.1 from ROCm 7.0.0. AMD GPU Driver (amdgpu) 30.10.1 is compatible with ROCm 7.0.1 and ROCm 7.0.0.
|
||||
.. [#kfd_support-past-60] As of ROCm 6.4.0, forward and backward compatibility between the AMD GPU Driver (amdgpu) and its user space software is provided up to a year apart. For earlier ROCm releases, the compatibility is provided for +/- 2 releases. The supported user space versions on this page were accurate as of the time of initial ROCm release. For the most up-to-date information, see the latest version of this information at `User and AMD GPU Driver support matrix <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/user-kernel-space-compat-matrix.html>`_.
|
||||
.. [#ROCT-rocr-past-60] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package.
|
||||
|
||||
@@ -1,364 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: Deep Graph Library (DGL) compatibility
|
||||
:keywords: GPU, CPU, deep graph library, DGL, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
********************************************************************************
|
||||
DGL compatibility
|
||||
********************************************************************************
|
||||
|
||||
Deep Graph Library (`DGL <https://www.dgl.ai/>`__) is an easy-to-use, high-performance, and scalable
|
||||
Python package for deep learning on graphs. DGL is framework agnostic, meaning
|
||||
that if a deep graph model is a component in an end-to-end application, the rest of
|
||||
the logic is implemented using PyTorch.
|
||||
|
||||
DGL provides a high-performance graph object that can reside on either CPUs or GPUs.
|
||||
It bundles structural data features for better control and provides a variety of functions
|
||||
for computing with graph objects, including efficient and customizable message passing
|
||||
primitives for Graph Neural Networks.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of DGL is maintained in the official `https://github.com/ROCm/dgl
|
||||
<https://github.com/ROCm/dgl>`__ repository, which differs from the
|
||||
`https://github.com/dmlc/dgl <https://github.com/dmlc/dgl>`__ upstream repository.
|
||||
|
||||
- To get started and install DGL on ROCm, use the prebuilt :ref:`Docker images <dgl-docker-compat>`,
|
||||
which include ROCm, DGL, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm DGL installation guide <rocm-install-on-linux:install/3rd-party/dgl-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://www.dgl.ai/pages/start.html>`__
|
||||
for additional context.
|
||||
|
||||
.. _dgl-docker-compat:
|
||||
|
||||
Compatibility matrix
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `DGL images <https://hub.docker.com/r/rocm/dgl/tags>`__
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and associated
|
||||
inventories represent the latest available DGL version from the official Docker Hub.
|
||||
Click the |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker image
|
||||
- ROCm
|
||||
- DGL
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- GPU
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4.0.amd0_rocm7.0.0_ubuntu24.04_py3.12_pytorch_2.8.0/images/sha256-943698ddf54c22a7bcad2e5b4ff467752e29e4ba6d0c926789ae7b242cbd92dd"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `7.0.0 <https://repo.radeon.com/rocm/apt/7.0/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.8.0 <https://github.com/pytorch/pytorch/releases/tag/v2.8.0>`__
|
||||
- 24.04
|
||||
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4.0.amd0_rocm7.0.0_ubuntu24.04_py3.12_pytorch_2.6.0/images/sha256-b2ec286a035eb7d0a6aab069561914d21a3cac462281e9c024501ba5ccedfbf7"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `7.0.0 <https://repo.radeon.com/rocm/apt/7.0/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.6.0 <https://github.com/pytorch/pytorch/releases/tag/v2.6.0>`__
|
||||
- 24.04
|
||||
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4.0.amd0_rocm7.0.0_ubuntu22.04_py3.10_pytorch_2.7.1/images/sha256-d27aee16df922ccf0bcd9107bfcb6d20d34235445d456c637e33ca6f19d11a51"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `7.0.0 <https://repo.radeon.com/rocm/apt/7.0/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.7.1 <https://github.com/pytorch/pytorch/releases/tag/v2.7.1>`__
|
||||
- 22.04
|
||||
- `3.10.16 <https://www.python.org/downloads/release/python-31016/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4.0.amd0_rocm6.4.3_ubuntu24.04_py3.12_pytorch_2.6.0/images/sha256-f3ba6a3c9ec9f6c1cde28449dc9780e0c4c16c4140f4b23f158565fbfd422d6b"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `6.4.3 <https://repo.radeon.com/rocm/apt/6.4.3/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.6.0 <https://github.com/pytorch/pytorch/releases/tag/v2.6.0>`__
|
||||
- 24.04
|
||||
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4_rocm6.4_ubuntu24.04_py3.12_pytorch_release_2.6.0/images/sha256-8ce2c3bcfaa137ab94a75f9e2ea711894748980f57417739138402a542dd5564"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.6.0 <https://github.com/pytorch/pytorch/releases/tag/v2.6.0>`__
|
||||
- 24.04
|
||||
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4_rocm6.4_ubuntu24.04_py3.12_pytorch_release_2.4.1/images/sha256-cf1683283b8eeda867b690229c8091c5bbf1edb9f52e8fb3da437c49a612ebe4"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.4.1 <https://github.com/pytorch/pytorch/releases/tag/v2.4.1>`__
|
||||
- 24.04
|
||||
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4_rocm6.4_ubuntu22.04_py3.10_pytorch_release_2.4.1/images/sha256-4834f178c3614e2d09e89e32041db8984c456d45dfd20286e377ca8635686554"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.4.1 <https://github.com/pytorch/pytorch/releases/tag/v2.4.1>`__
|
||||
- 22.04
|
||||
- `3.10.16 <https://www.python.org/downloads/release/python-31016/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/dgl/dgl-2.4_rocm6.4_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-88740a2c8ab4084b42b10c3c6ba984cab33dd3a044f479c6d7618e2b2cb05e69"><i class="fab fa-docker fa-lg"></i> rocm/dgl</a>
|
||||
|
||||
- `6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`__
|
||||
- `2.4.0 <https://github.com/dmlc/dgl/releases/tag/v2.4.0>`__
|
||||
- `2.3.0 <https://github.com/pytorch/pytorch/releases/tag/v2.3.0>`__
|
||||
- 22.04
|
||||
- `3.10.16 <https://www.python.org/downloads/release/python-31016/>`__
|
||||
- MI300X, MI250X
|
||||
|
||||
|
||||
.. _dgl-key-rocm-libraries:
|
||||
|
||||
Key ROCm libraries for DGL
|
||||
================================================================================
|
||||
|
||||
DGL on ROCm depends on specific libraries that affect its features and performance.
|
||||
Using the DGL Docker container or building it with the provided Docker file or a ROCm base image is recommended.
|
||||
If you prefer to build it yourself, ensure the following dependencies are installed:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- ROCm 7.0.0 Version
|
||||
- ROCm 6.4.x Version
|
||||
- Purpose
|
||||
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
|
||||
- 1.1.0
|
||||
- 1.1.0
|
||||
- Enables faster execution of core operations like matrix multiplication
|
||||
(GEMM), convolutions and transformations.
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- 3.0.0
|
||||
- 2.4.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- 1.0.0
|
||||
- 0.12.0
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- 4.0.0
|
||||
- 3.4.0
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- 1.0.20
|
||||
- 1.0.18
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- 3.0.0
|
||||
- 2.12.0
|
||||
- Provides fast random number generation for GPUs.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- 3.0.0
|
||||
- 2.4.0
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- 4.0.1
|
||||
- 3.2.0
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- 0.2.4
|
||||
- 0.2.3
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
* - `hipTensor <https://github.com/ROCm/hipTensor>`_
|
||||
- 2.0.0
|
||||
- 1.5.0
|
||||
- Optimizes for high-performance tensor operations, such as contractions.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- 3.5.0
|
||||
- 3.4.0
|
||||
- Optimizes deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`_
|
||||
- 2.13.0
|
||||
- 2.12.0
|
||||
- Adds graph-level optimizations, ONNX models and mixed precision support
|
||||
and enable Ahead-of-Time (AOT) Compilation.
|
||||
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`_
|
||||
- 3.3.0
|
||||
- 3.2.0
|
||||
- Optimizes acceleration for computer vision and AI workloads like
|
||||
preprocessing, augmentation, and inferencing.
|
||||
* - `rocAL <https://github.com/ROCm/rocAL>`_
|
||||
- 3.3.0
|
||||
- 2.2.0
|
||||
- Accelerates the data pipeline by offloading intensive preprocessing and
|
||||
augmentation tasks. rocAL is part of MIVisionX.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- 2.26.6
|
||||
- 2.22.3
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
* - `rocDecode <https://github.com/ROCm/rocDecode>`_
|
||||
- 1.0.0
|
||||
- 0.10.0
|
||||
- Provides hardware-accelerated data decoding capabilities, particularly
|
||||
for image, video, and other dataset formats.
|
||||
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`_
|
||||
- 1.1.0
|
||||
- 0.8.0
|
||||
- Provides hardware-accelerated JPEG image decoding and encoding.
|
||||
* - `RPP <https://github.com/ROCm/RPP>`_
|
||||
- 2.0.0
|
||||
- 1.9.10
|
||||
- Speeds up data augmentation, transformation, and other preprocessing steps.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- 4.0.0
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`_
|
||||
- 2.0.0
|
||||
- 1.7.0
|
||||
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
|
||||
multiplication (GEMM) and accumulation operations with mixed precision
|
||||
support.
|
||||
|
||||
.. _dgl-supported-features-latest:
|
||||
|
||||
Supported features with ROCm 7.0.0
|
||||
================================================================================
|
||||
|
||||
Many functions and methods available upstream are also supported in DGL on ROCm.
|
||||
Instead of listing them all, support is grouped into the following categories to provide a general overview.
|
||||
|
||||
* DGL Base
|
||||
* DGL Backend
|
||||
* DGL Data
|
||||
* DGL Dataloading
|
||||
* DGL Graph
|
||||
* DGL Function
|
||||
* DGL Ops
|
||||
* DGL Sampling
|
||||
* DGL Transforms
|
||||
* DGL Utils
|
||||
* DGL Distributed
|
||||
* DGL Geometry
|
||||
* DGL Mpops
|
||||
* DGL NN
|
||||
* DGL Optim
|
||||
* DGL Sparse
|
||||
* GraphBolt
|
||||
|
||||
.. _dgl-unsupported-features-latest:
|
||||
|
||||
Unsupported features with ROCm 7.0.0
|
||||
================================================================================
|
||||
|
||||
* TF32 Support (only supported for PyTorch 2.7 and above)
|
||||
* Kineto/ROCTracer integration
|
||||
|
||||
.. _dgl-unsupported-functions:
|
||||
|
||||
Unsupported functions with ROCm 7.0.0
|
||||
================================================================================
|
||||
|
||||
* ``bfs``
|
||||
* ``format``
|
||||
* ``multiprocess_sparse_adam_state_dict``
|
||||
* ``half_spmm``
|
||||
* ``segment_mm``
|
||||
* ``gather_mm_idx_b``
|
||||
* ``sample_labors_prob``
|
||||
* ``sample_labors_noprob``
|
||||
* ``sparse_admin``
|
||||
|
||||
.. _dgl-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
DGL can be used for Graph Learning, and building popular graph models like
|
||||
GAT, GCN, and GraphSage. Using these models, a variety of use cases are supported:
|
||||
|
||||
- Recommender systems
|
||||
- Network Optimization and Analysis
|
||||
- 1D (Temporal) and 2D (Image) Classification
|
||||
- Drug Discovery
|
||||
|
||||
For use cases and recommendations, refer to the `AMD ROCm blog <https://rocm.blogs.amd.com/>`__,
|
||||
where you can search for DGL examples and best practices to optimize your workloads on AMD GPUs.
|
||||
|
||||
* Although multiple use cases of DGL have been tested and verified, a few have been
|
||||
outlined in the `DGL in the Real World: Running GNNs on Real Use Cases
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/dgl_blog2/README.html>`__ blog
|
||||
post, which walks through four real-world graph neural network (GNN) workloads
|
||||
implemented with the Deep Graph Library on ROCm. It covers tasks ranging from
|
||||
heterogeneous e-commerce graphs and multiplex networks (GATNE) to molecular graph
|
||||
regression (GNN-FiLM) and EEG-based neurological diagnosis (EEG-GCNN). For each use
|
||||
case, the authors detail: the dataset and task, how DGL is used, and their experience
|
||||
porting to ROCm. It is shown that DGL codebases often run without modification, with
|
||||
seamless integration of graph operations, message passing, sampling, and convolution.
|
||||
|
||||
* The `Graph Neural Networks (GNNs) at Scale: DGL with ROCm on AMD Hardware
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/why-graph-neural/README.html>`__
|
||||
blog post introduces the Deep Graph Library (DGL) and its enablement on the AMD ROCm platform,
|
||||
bringing high-performance graph neural network (GNN) training to AMD GPUs. DGL bridges
|
||||
the gap between dense tensor frameworks and the irregular nature of graph data through a
|
||||
graph-first, message-passing abstraction. Its design ensures scalability, flexibility, and
|
||||
interoperability across frameworks like PyTorch and TensorFlow. AMD’s ROCm integration
|
||||
enables DGL to run efficiently on HIP-based GPUs, supported by prebuilt Docker containers
|
||||
and open-source repositories. This marks a major step in AMD's mission to advance open,
|
||||
scalable AI ecosystems beyond traditional architectures.
|
||||
|
||||
You can pre-process datasets and begin training on AMD GPUs through:
|
||||
|
||||
* Single-GPU training/inference
|
||||
* Multi-GPU training
|
||||
|
||||
|
||||
Previous versions
|
||||
===============================================================================
|
||||
See :doc:`rocm-install-on-linux:install/3rd-party/previous-versions/dgl-history` to find documentation for previous releases
|
||||
of the ``ROCm/dgl`` Docker image.
|
||||
@@ -1,113 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: FlashInfer compatibility
|
||||
:keywords: GPU, LLM, FlashInfer, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
********************************************************************************
|
||||
FlashInfer compatibility
|
||||
********************************************************************************
|
||||
|
||||
`FlashInfer <https://docs.flashinfer.ai/index.html>`__ is a library and kernel generator
|
||||
for Large Language Models (LLMs) that provides a high-performance implementation of graphics
|
||||
processing units (GPUs) kernels. FlashInfer focuses on LLM serving and inference, as well
|
||||
as advanced performance across diverse scenarios.
|
||||
|
||||
FlashInfer features highly efficient attention kernels, load-balanced scheduling, and memory-optimized
|
||||
techniques, while supporting customized attention variants. It’s compatible with ``torch.compile``, and
|
||||
offers high-performance LLM-specific operators, with easy integration through PyTorch, and C++ APIs.
|
||||
|
||||
.. note::
|
||||
|
||||
The ROCm port of FlashInfer is under active development, and some features are not yet available.
|
||||
For the latest feature compatibility matrix, refer to the ``README`` of the
|
||||
`https://github.com/ROCm/flashinfer <https://github.com/ROCm/flashinfer>`__ repository.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of FlashInfer is maintained in the official `https://github.com/ROCm/flashinfer
|
||||
<https://github.com/ROCm/flashinfer>`__ repository, which differs from the
|
||||
`https://github.com/flashinfer-ai/flashinfer <https://github.com/flashinfer-ai/flashinfer>`__
|
||||
upstream repository.
|
||||
|
||||
- To get started and install FlashInfer on ROCm, use the prebuilt :ref:`Docker images <flashinfer-docker-compat>`,
|
||||
which include ROCm, FlashInfer, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm FlashInfer installation guide <rocm-install-on-linux:install/3rd-party/flashinfer-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://docs.flashinfer.ai/installation.html>`__
|
||||
for additional context.
|
||||
|
||||
.. _flashinfer-docker-compat:
|
||||
|
||||
Compatibility matrix
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `FlashInfer images <https://hub.docker.com/r/rocm/flashinfer/tags>`__
|
||||
with ROCm backends on Docker Hub. The following Docker image tag and associated
|
||||
inventories represent the latest available FlashInfer version from the official Docker Hub.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker image
|
||||
- ROCm
|
||||
- FlashInfer
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- GPU
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/flashinfer/flashinfer-0.2.5.amd2_rocm7.1.1_ubuntu24.04_py3.12_pytorch2.8/images/sha256-9ab6426750a11dbab9bcddeaccaf492683bfd96a1d60b21dd9fc3a609a98175b"><i class="fab fa-docker fa-lg"></i> rocm/flashinfer</a>
|
||||
- `7.1.1 <https://repo.radeon.com/rocm/apt/7.1.1/>`__
|
||||
- `v0.2.5 <https://github.com/flashinfer-ai/flashinfer/releases/tag/v0.2.5>`__
|
||||
- `2.8.0 <https://github.com/ROCm/pytorch/releases/tag/v2.8.0>`__
|
||||
- 24.04
|
||||
- `3.12 <https://www.python.org/downloads/release/python-3129/>`__
|
||||
- MI325X, MI300X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/flashinfer/flashinfer-0.2.5_rocm6.4_ubuntu24.04_py3.12_pytorch2.7/images/sha256-558914838821c88c557fb6d42cfbc1bdb67d79d19759f37c764a9ee801f93313"><i class="fab fa-docker fa-lg"></i> rocm/flashinfer</a>
|
||||
- `6.4.1 <https://repo.radeon.com/rocm/apt/6.4.1/>`__
|
||||
- `v0.2.5 <https://github.com/flashinfer-ai/flashinfer/releases/tag/v0.2.5>`__
|
||||
- `2.7.1 <https://github.com/ROCm/pytorch/releases/tag/v2.7.1>`__
|
||||
- 24.04
|
||||
- `3.12 <https://www.python.org/downloads/release/python-3129/>`__
|
||||
- MI300X
|
||||
|
||||
.. _flashinfer-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
FlashInfer on ROCm enables you to perform LLM inference for both prefill and decode:
|
||||
during prefill, your model efficiently processes input prompts to build KV caches
|
||||
and internal activations; during decode, it generates tokens sequentially based on
|
||||
prior outputs and context. Use the attention mode supported upstream (Multi-Head
|
||||
Attention, Grouped-Query Attention, or Multi-Query Attention) that matches your
|
||||
model configuration.
|
||||
|
||||
FlashInfer on ROCm also includes capabilities such as load balancing,
|
||||
sparse and dense attention optimizations, and single and batch decode, alongside
|
||||
prefill for high‑performance execution on MI300X GPUs.
|
||||
|
||||
For currently supported use cases and recommendations, refer to the `AMD ROCm blog <https://rocm.blogs.amd.com/search.html?q=flashinfer>`__,
|
||||
where you can search for examples and best practices to optimize your workloads on AMD GPUs.
|
||||
|
||||
Previous versions
|
||||
===============================================================================
|
||||
See :doc:`rocm-install-on-linux:install/3rd-party/previous-versions/flashinfer-history` to find documentation for previous releases
|
||||
of the ``ROCm/flashinfer`` Docker image.
|
||||
@@ -1,349 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: JAX compatibility
|
||||
:keywords: GPU, JAX, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
*******************************************************************************
|
||||
JAX compatibility
|
||||
*******************************************************************************
|
||||
|
||||
`JAX <https://docs.jax.dev/en/latest/notebooks/thinking_in_jax.html>`__ is a library
|
||||
for array-oriented numerical computation (similar to NumPy), with automatic differentiation
|
||||
and just-in-time (JIT) compilation to enable high-performance machine learning research.
|
||||
|
||||
JAX provides an API that combines automatic differentiation and the
|
||||
Accelerated Linear Algebra (XLA) compiler to achieve high-performance machine
|
||||
learning at scale. JAX uses composable transformations of Python and NumPy through
|
||||
JIT compilation, automatic vectorization, and parallelization.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of JAX is maintained in the official `https://github.com/ROCm/rocm-jax
|
||||
<https://github.com/ROCm/rocm-jax>`__ repository, which differs from the
|
||||
`https://github.com/jax-ml/jax <https://github.com/jax-ml/jax>`__ upstream repository.
|
||||
|
||||
- To get started and install JAX on ROCm, use the prebuilt :ref:`Docker images <jax-docker-compat>`,
|
||||
which include ROCm, JAX, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm JAX installation guide <rocm-install-on-linux:install/3rd-party/jax-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://jax.readthedocs.io/en/latest/installation.html#amd-gpu-linux>`__
|
||||
for additional context.
|
||||
|
||||
Version support
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
AMD releases official `ROCm JAX Docker images <https://hub.docker.com/r/rocm/jax/tags>`_
|
||||
quarterly alongside new ROCm releases. These images undergo full AMD testing.
|
||||
`Community ROCm JAX Docker images <https://hub.docker.com/r/rocm/jax-community/tags>`_
|
||||
follow upstream JAX releases and use the latest available ROCm version.
|
||||
|
||||
JAX Plugin-PJRT with JAX/JAXLIB compatibility
|
||||
================================================================================
|
||||
|
||||
Portable JIT Runtime (PJRT) is an open, stable interface for device runtime and
|
||||
compiler. The following table details the ROCm version compatibility matrix
|
||||
between JAX Plugin–PJRT and JAX/JAXLIB.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - JAX Plugin-PJRT
|
||||
- JAX/JAXLIB
|
||||
- ROCm
|
||||
* - 0.8.0
|
||||
- 0.8.0
|
||||
- 7.2.0
|
||||
* - 0.7.1
|
||||
- 0.7.1
|
||||
- 7.1.1, 7.1.0
|
||||
* - 0.6.0
|
||||
- 0.6.2, 0.6.0
|
||||
- 7.0.2, 7.0.1, 7.0.0
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* The `nanoGPT in JAX <https://rocm.blogs.amd.com/artificial-intelligence/nanoGPT-JAX/README.html>`_
|
||||
blog explores the implementation and training of a Generative Pre-trained
|
||||
Transformer (GPT) model in JAX, inspired by Andrej Karpathy’s JAX-based
|
||||
nanoGPT. Comparing how essential GPT components—such as self-attention
|
||||
mechanisms and optimizers—are realized in JAX and JAX, also highlights
|
||||
JAX’s unique features.
|
||||
|
||||
* The `Optimize GPT Training: Enabling Mixed Precision Training in JAX using
|
||||
ROCm on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-mixed-precision/README.html>`_
|
||||
blog post provides a comprehensive guide on enhancing the training efficiency
|
||||
of GPT models by implementing mixed precision techniques in JAX, specifically
|
||||
tailored for AMD GPUs utilizing the ROCm platform.
|
||||
|
||||
* The `Supercharging JAX with Triton Kernels on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-triton/README.html>`_
|
||||
blog demonstrates how to develop a custom fused dropout-activation kernel for
|
||||
matrices using Triton, integrate it with JAX, and benchmark its performance
|
||||
using ROCm.
|
||||
|
||||
* The `Distributed fine-tuning with JAX on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
|
||||
outlines the process of fine-tuning a Bidirectional Encoder Representations
|
||||
from Transformers (BERT)-based large language model (LLM) using JAX for a text
|
||||
classification task. The blog post discusses techniques for parallelizing the
|
||||
fine-tuning across multiple AMD GPUs and assess the model's performance on a
|
||||
holdout dataset. During the fine-tuning, a BERT-base-cased transformer model
|
||||
and the General Language Understanding Evaluation (GLUE) benchmark dataset was
|
||||
used on a multi-GPU setup.
|
||||
|
||||
* The `MI300X workload optimization guide <https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html>`_
|
||||
provides detailed guidance on optimizing workloads for the AMD Instinct MI300X
|
||||
GPU using ROCm. The page is aimed at helping users achieve optimal
|
||||
performance for deep learning and other high-performance computing tasks on
|
||||
the MI300X GPU.
|
||||
|
||||
For more use cases and recommendations, see `ROCm JAX blog posts <https://rocm.blogs.amd.com/blog/tag/jax.html>`_.
|
||||
|
||||
.. _jax-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
AMD validates and publishes `JAX images <https://hub.docker.com/r/rocm/jax/tags>`__
|
||||
with ROCm backends on Docker Hub.
|
||||
|
||||
For ``jax-community`` images, see `rocm/jax-community
|
||||
<https://hub.docker.com/r/rocm/jax-community/tags>`__ on Docker Hub.
|
||||
|
||||
To find the right image tag, see the :ref:`JAX on ROCm installation
|
||||
documentation <rocm-install-on-linux:jax-docker-support>` for a list of
|
||||
available ``rocm/jax`` images.
|
||||
|
||||
.. _key_rocm_libraries:
|
||||
|
||||
Key ROCm libraries for JAX
|
||||
================================================================================
|
||||
|
||||
The following ROCm libraries represent potential targets that could be utilized
|
||||
by JAX on ROCm for various computational tasks. The actual libraries used will
|
||||
depend on the specific implementation and operations performed.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- :version-ref:`hipBLAS rocm_version`
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- :version-ref:`hipBLASLt rocm_version`
|
||||
- hipBLASLt is an extension of hipBLAS, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- :version-ref:`hipCUB rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- :version-ref:`hipFFT rocm_version`
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- :version-ref:`hipRAND rocm_version`
|
||||
- Provides fast random number generation for GPUs.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- :version-ref:`hipSOLVER rocm_version`
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- :version-ref:`hipSPARSE rocm_version`
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- :version-ref:`hipSPARSELt rocm_version`
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- :version-ref:`MIOpen rocm_version`
|
||||
- Optimized for deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- :version-ref:`RCCL rocm_version`
|
||||
- Optimized for multi-GPU communication for operations like all-reduce,
|
||||
broadcast, and scatter.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- :version-ref:`rocThrust rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
|
||||
.. note::
|
||||
|
||||
This table shows ROCm libraries that could potentially be utilized by JAX. Not
|
||||
all libraries may be used in every configuration, and the actual library usage
|
||||
will depend on the specific operations and implementation details.
|
||||
|
||||
Supported data types and modules
|
||||
===============================================================================
|
||||
|
||||
The following tables lists the supported public JAX API data types and modules.
|
||||
|
||||
Supported data types
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
ROCm supports all the JAX data types of `jax.dtypes <https://docs.jax.dev/en/latest/jax.dtypes.html>`_
|
||||
module, `jax.numpy.dtype <https://docs.jax.dev/en/latest/_autosummary/jax.numpy.dtype.html>`_
|
||||
and `default_dtype <https://docs.jax.dev/en/latest/default_dtypes.html>`_ .
|
||||
The ROCm supported data types in JAX are collected in the following table.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
|
||||
* - ``bfloat16``
|
||||
- 16-bit bfloat (brain floating point).
|
||||
|
||||
* - ``bool``
|
||||
- Boolean.
|
||||
|
||||
* - ``complex128``
|
||||
- 128-bit complex.
|
||||
|
||||
* - ``complex64``
|
||||
- 64-bit complex.
|
||||
|
||||
* - ``float16``
|
||||
- 16-bit (half precision) floating-point.
|
||||
|
||||
* - ``float32``
|
||||
- 32-bit (single precision) floating-point.
|
||||
|
||||
* - ``float64``
|
||||
- 64-bit (double precision) floating-point.
|
||||
|
||||
* - ``half``
|
||||
- 16-bit (half precision) floating-point.
|
||||
|
||||
* - ``int16``
|
||||
- Signed 16-bit integer.
|
||||
|
||||
* - ``int32``
|
||||
- Signed 32-bit integer.
|
||||
|
||||
* - ``int64``
|
||||
- Signed 64-bit integer.
|
||||
|
||||
* - ``int8``
|
||||
- Signed 8-bit integer.
|
||||
|
||||
* - ``uint16``
|
||||
- Unsigned 16-bit (word) integer.
|
||||
|
||||
* - ``uint32``
|
||||
- Unsigned 32-bit (dword) integer.
|
||||
|
||||
* - ``uint64``
|
||||
- Unsigned 64-bit (qword) integer.
|
||||
|
||||
* - ``uint8``
|
||||
- Unsigned 8-bit (byte) integer.
|
||||
|
||||
.. note::
|
||||
|
||||
JAX data type support is affected by the :ref:`key_rocm_libraries` and it's
|
||||
collected on :doc:`ROCm data types and precision support <rocm:reference/precision-support>`
|
||||
page.
|
||||
|
||||
Supported modules
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
For a complete and up-to-date list of JAX public modules (for example, ``jax.numpy``,
|
||||
``jax.scipy``, ``jax.lax``), their descriptions, and usage, please refer directly to the
|
||||
`official JAX API documentation <https://jax.readthedocs.io/en/latest/jax.html>`_.
|
||||
|
||||
.. note::
|
||||
|
||||
Since version 0.1.56, JAX has full support for ROCm, and the
|
||||
:ref:`Known issues and important notes <jax_comp_known_issues>` section
|
||||
contains details about limitations specific to the ROCm backend. The list of
|
||||
JAX API modules are maintained by the JAX project and is subject to change.
|
||||
Refer to the official Jax documentation for the most up-to-date information.
|
||||
|
||||
Key features and enhancements for ROCm 7.1
|
||||
===============================================================================
|
||||
|
||||
- Enabled compilation of multihost HLO runner Python bindings.
|
||||
|
||||
- Backported multihost HLO runner bindings and some related changes to
|
||||
:code:`FunctionalHloRunner`.
|
||||
|
||||
- Added :code:`requirements_lock_3_12` to enable building for Python 3.12.
|
||||
|
||||
- Removed hardcoded NHWC convolution layout for ``fp16`` precision to address the performance drops for ``fp16`` precision on gfx12xx GPUs.
|
||||
|
||||
|
||||
- ROCprofiler-SDK integration:
|
||||
|
||||
- Integrated ROCprofiler-SDK (v3) to XLA to improve profiling of GPU events,
|
||||
support both time-based and step-based profiling.
|
||||
|
||||
- Added unit tests for :code:`rocm_collector` and :code:`rocm_tracer`.
|
||||
|
||||
- Added Triton unsupported conversion from ``f8E4M3FNUZ`` to ``fp16`` with
|
||||
rounding mode.
|
||||
|
||||
- Introduced :code:`CudnnFusedConvDecomposer` to revert fused convolutions
|
||||
when :code:`ConvAlgorithmPicker` fails to find a fused algorithm, and removed
|
||||
unfused fallback paths from :code:`RocmFusedConvRunner`.
|
||||
|
||||
Key features and enhancements for ROCm 7.0
|
||||
===============================================================================
|
||||
|
||||
- Upgraded XLA backend: Integrates a newer XLA version, enabling better
|
||||
optimizations, broader operator support, and potential performance gains.
|
||||
|
||||
- RNN support: Native RNN support (including LSTMs via ``jax.experimental.rnn``)
|
||||
now available on ROCm, aiding sequence model development.
|
||||
|
||||
- Comprehensive linear algebra capabilities: Offers robust ``jax.linalg``
|
||||
operations, essential for scientific and machine learning tasks.
|
||||
|
||||
- Expanded AMD GPU architecture support: Provides ongoing support for gfx1101
|
||||
GPUs and introduces support for gfx950 and gfx12xx GPUs.
|
||||
|
||||
- Mixed FP8 precision support: Enables ``lax.dot_general`` operations with mixed FP8
|
||||
types, offering pathways for memory and compute efficiency.
|
||||
|
||||
- Streamlined PyPi packaging: Provides reliable PyPi wheels for JAX on ROCm,
|
||||
simplifying the installation process.
|
||||
|
||||
- Pallas experimental kernel development: Continued Pallas framework
|
||||
enhancements for custom GPU kernels, including new intrinsics (specific
|
||||
kernel behaviors under review).
|
||||
|
||||
- Improved build system and CI: Enhanced ROCm build system and CI for greater
|
||||
reliability and maintainability.
|
||||
|
||||
- Enhanced distributed computing setup: Improved JAX setup in multi-GPU
|
||||
distributed environments.
|
||||
|
||||
.. _jax_comp_known_issues:
|
||||
|
||||
Known issues and notes for ROCm 7.0
|
||||
===============================================================================
|
||||
|
||||
- ``nn.dot_product_attention``: Certain configurations of ``jax.nn.dot_product_attention``
|
||||
may cause segmentation faults, though the majority of use cases work correctly.
|
||||
|
||||
- SVD with dynamic shapes: SVD on inputs with dynamic/symbolic shapes might result in an error.
|
||||
SVD with static shapes is unaffected.
|
||||
|
||||
- QR decomposition with symbolic shapes: QR decomposition operations may fail when using
|
||||
symbolic/dynamic shapes in shape polymorphic contexts.
|
||||
|
||||
- Pallas kernels: Specific advanced Pallas kernels may exhibit variations in
|
||||
numerical output or resource usage. These are actively reviewed as part of
|
||||
Pallas's experimental development.
|
||||
@@ -1,275 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: llama.cpp compatibility
|
||||
:keywords: GPU, GGML, llama.cpp, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
********************************************************************************
|
||||
llama.cpp compatibility
|
||||
********************************************************************************
|
||||
|
||||
`llama.cpp <https://github.com/ggml-org/llama.cpp>`__ is an open-source framework
|
||||
for Large Language Model (LLM) inference that runs on both central processing units
|
||||
(CPUs) and graphics processing units (GPUs). It is written in plain C/C++, providing
|
||||
a simple, dependency-free setup.
|
||||
|
||||
The framework supports multiple quantization options, from 1.5-bit to 8-bit integers,
|
||||
to accelerate inference and reduce memory usage. Originally built as a CPU-first library,
|
||||
llama.cpp is easy to integrate with other programming environments and is widely
|
||||
adopted across diverse platforms, including consumer devices.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of llama.cpp is maintained in the official `https://github.com/ROCm/llama.cpp
|
||||
<https://github.com/ROCm/llama.cpp>`__ repository, which differs from the
|
||||
`https://github.com/ggml-org/llama.cpp <https://github.com/ggml-org/llama.cpp>`__ upstream repository.
|
||||
|
||||
- To get started and install llama.cpp on ROCm, use the prebuilt :ref:`Docker images <llama-cpp-docker-compat>`,
|
||||
which include ROCm, llama.cpp, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm llama.cpp installation guide <rocm-install-on-linux:install/3rd-party/llama-cpp-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md>`__
|
||||
for additional context.
|
||||
|
||||
.. _llama-cpp-docker-compat:
|
||||
|
||||
Compatibility matrix
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `llama.cpp images <https://hub.docker.com/r/rocm/llama.cpp/tags>`__
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and associated
|
||||
inventories represent the latest available llama.cpp versions from the official Docker Hub.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. important::
|
||||
|
||||
Tag endings of ``_full``, ``_server``, and ``_light`` serve different purposes for entrypoints as follows:
|
||||
|
||||
- Full: This image includes both the main executable file and the tools to convert ``LLaMA`` models into ``ggml`` and convert into 4-bit quantization.
|
||||
- Server: This image only includes the server executable file.
|
||||
- Light: This image only includes the main executable file.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Full Docker
|
||||
- Server Docker
|
||||
- Light Docker
|
||||
- llama.cpp
|
||||
- ROCm
|
||||
- Ubuntu
|
||||
- GPU
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6652.amd0_rocm7.0.0_ubuntu24.04_full/images/sha256-a94f0c7a598cc6504ff9e8371c016d7a2f93e69bf54a36c870f9522567201f10g"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6652.amd0_rocm7.0.0_ubuntu24.04_server/images/sha256-be175932c3c96e882dfbc7e20e0e834f58c89c2925f48b222837ee929dfc47ee"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6652.amd0_rocm7.0.0_ubuntu24.04_light/images/sha256-d8ba0c70603da502c879b1f8010b439c8e7fa9f6cbdac8bbbbbba97cb41ebc9e"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6652 <https://github.com/ROCm/llama.cpp/tree/release/b6652>`__
|
||||
- `7.0.0 <https://repo.radeon.com/rocm/apt/7.0/>`__
|
||||
- 24.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6652.amd0_rocm7.0.0_ubuntu22.04_full/images/sha256-37582168984f25dce636cc7288298e06d94472ea35f65346b3541e6422b678ee"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6652.amd0_rocm7.0.0_ubuntu22.04_server/images/sha256-7e70578e6c3530c6591cc2c26da24a9ee68a20d318e12241de93c83224f83720"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6652.amd0_rocm7.0.0_ubuntu22.04_light/images/sha256-9a5231acf88b4a229677bc2c636ea3fe78a7a80f558bd80910b919855de93ad5"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6652 <https://github.com/ROCm/llama.cpp/tree/release/b6652>`__
|
||||
- `7.0.0 <https://repo.radeon.com/rocm/apt/7.0/>`__
|
||||
- 22.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.3_ubuntu24.04_full/images/sha256-5960fc850024a8a76451f9eaadd89b7e59981ae9f393b407310c1ddf18892577"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.3_ubuntu24.04_server/images/sha256-1b79775d9f546065a6aaf9ca426e1dd4ed4de0b8f6ee83687758cc05af6538e6"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.3_ubuntu24.04_light/images/sha256-8f863c4c2857ae42bebd64e4f1a0a1e7cc3ec4503f243e32b4a4dcad070ec361"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6356 <https://github.com/ROCm/llama.cpp/tree/release/b6356>`__
|
||||
- `6.4.3 <https://repo.radeon.com/rocm/apt/6.4.3/>`__
|
||||
- 24.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.3_ubuntu22.04_full/images/sha256-888879b3ee208f9247076d7984524b8d1701ac72611689e89854a1588bec9867"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.3_ubuntu22.04_server/images/sha256-90e4ff99a66743e33fd00728cd71a768588e5f5ef355aaa196669fe65ac70672"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.3_ubuntu22.04_light/images/sha256-bd447a049939cb99054f8fbf3f2352870fe906a75e2dc3339c845c08b9c53f9b"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6356 <https://github.com/ROCm/llama.cpp/tree/release/b6356>`__
|
||||
- `6.4.3 <https://repo.radeon.com/rocm/apt/6.4.3/>`__
|
||||
- 22.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.2_ubuntu24.04_full/images/sha256-5b3a1bc4889c1fcade434b937fbf9cc1c22ff7dc0317c130339b0c9238bc88c4"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.2_ubuntu24.04_server/images/sha256-5228ff99d0f627a9032d668f4381b2e80dc1e301adc3e0821f26d8354b175271"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.2_ubuntu24.04_light/images/sha256-b12723b332a826a89b7252dddf868cbe4d1a869562fc4aa4032f59e1a683b968"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6356 <https://github.com/ROCm/llama.cpp/tree/release/b6356>`__
|
||||
- `6.4.2 <https://repo.radeon.com/rocm/apt/6.4.2/>`__
|
||||
- 24.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.2_ubuntu22.04_full/images/sha256-cd6e21a6a73f59b35dd5309b09dd77654a94d783bf13a55c14eb8dbf8e9c2615"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.2_ubuntu22.04_server/images/sha256-c2b4689ab2c47e6626e8fea22d7a63eb03d47c0fde9f5ef8c9f158d15c423e58"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.2_ubuntu22.04_light/images/sha256-1acc28f29ed87db9cbda629cb29e1989b8219884afe05f9105522be929e94da4"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6356 <https://github.com/ROCm/llama.cpp/tree/release/b6356>`__
|
||||
- `6.4.2 <https://repo.radeon.com/rocm/apt/6.4.2/>`__
|
||||
- 22.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.1_ubuntu24.04_full/images/sha256-2f8ae8a44510d96d52dea6cb398b224f7edeb7802df7ec488c6f63d206b3cdc9"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.1_ubuntu24.04_server/images/sha256-fece497ff9f4a28b12f645de52766941da8ead8471aa1ea84b61d4b4568e51f2"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.1_ubuntu24.04_light/images/sha256-3e14352fa6f8c6128b23cf9342531c20dbfb522550b626e09d83b260a1947022"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6356 <https://github.com/ROCm/llama.cpp/tree/release/b6356>`__
|
||||
- `6.4.1 <https://repo.radeon.com/rocm/apt/6.4.1/>`__
|
||||
- 24.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.1_ubuntu22.04_full/images/sha256-80763062ef0bec15038c35fd01267f1fc99a5dd171d4b48583cc668b15efad69"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.1_ubuntu22.04_server/images/sha256-db2a6c957555ed83b819bbc54aea884a93192da0fb512dae63d32e0dc4e8ab8f"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b6356_rocm6.4.1_ubuntu22.04_light/images/sha256-c6dbb07cc655fb079d5216e4b77451cb64a9daa0585d23b6fb8b32cb22021197"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b6356 <https://github.com/ROCm/llama.cpp/tree/release/b6356>`__
|
||||
- `6.4.1 <https://repo.radeon.com/rocm/apt/6.4.1/>`__
|
||||
- 22.04
|
||||
- MI325X, MI300X, MI210
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b5997_rocm6.4.0_ubuntu24.04_full/images/sha256-f78f6c81ab2f8e957469415fe2370a1334fe969c381d1fe46050c85effaee9d5"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b5997_rocm6.4.0_ubuntu24.04_server/images/sha256-275ad9e18f292c26a00a2de840c37917e98737a88a3520bdc35fd3fc5c9a6a9b"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/llama.cpp/llama.cpp-b5997_rocm6.4.0_ubuntu24.04_light/images/sha256-cc324e6faeedf0e400011f07b49d2dc41a16bae257b2b7befa0f4e2e97231320"><i class="fab fa-docker fa-lg"></i> rocm/llama.cpp</a>
|
||||
- `b5997 <https://github.com/ROCm/llama.cpp/tree/release/b5997>`__
|
||||
- `6.4.0 <https://repo.radeon.com/rocm/apt/6.4/>`__
|
||||
- 24.04
|
||||
- MI300X, MI210
|
||||
|
||||
.. _llama-cpp-key-rocm-libraries:
|
||||
|
||||
Key ROCm libraries for llama.cpp
|
||||
================================================================================
|
||||
|
||||
llama.cpp functionality on ROCm is determined by its underlying library
|
||||
dependencies. These ROCm components affect the capabilities, performance, and
|
||||
feature set available to developers. Ensure you have the required libraries for
|
||||
your corresponding ROCm version.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- ROCm 7.0.0 version
|
||||
- ROCm 6.4.x version
|
||||
- Purpose
|
||||
- Usage
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`__
|
||||
- 3.0.0
|
||||
- 2.4.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations such as matrix multiplication, matrix-vector
|
||||
products, and tensor contractions. Utilized in both dense and batched
|
||||
linear algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`__
|
||||
- 1.0.0
|
||||
- 0.12.0
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- By setting the flag ``ROCBLAS_USE_HIPBLASLT``, you can dispatch hipblasLt
|
||||
kernels where possible.
|
||||
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`__
|
||||
- 2.0.0
|
||||
- 1.7.0
|
||||
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
|
||||
multiplication (GEMM) and accumulation operations with mixed precision
|
||||
support.
|
||||
- Can be used to enhance the flash attention performance on AMD compute, by enabling
|
||||
the flag during compile time.
|
||||
|
||||
.. _llama-cpp-uses-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
llama.cpp can be applied in a variety of scenarios, particularly when you need to meet one or more of the following requirements:
|
||||
|
||||
- Plain C/C++ implementation with no external dependencies
|
||||
- Support for 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory usage
|
||||
- Custom HIP (Heterogeneous-compute Interface for Portability) kernels for running large language models (LLMs) on AMD GPUs (graphics processing units)
|
||||
- CPU (central processing unit) + GPU (graphics processing unit) hybrid inference for partially accelerating models larger than the total available VRAM (video random-access memory)
|
||||
|
||||
llama.cpp is also used in a range of real-world applications, including:
|
||||
|
||||
- Games such as `Lucy's Labyrinth <https://github.com/MorganRO8/Lucys_Labyrinth>`__:
|
||||
A simple maze game where AI-controlled agents attempt to trick the player.
|
||||
- Tools such as `Styled Lines <https://marketplace.unity.com/packages/tools/ai-ml-integration/style-text-webgl-ios-stand-alone-llm-llama-cpp-wrapper-292902>`__:
|
||||
A proprietary, asynchronous inference wrapper for Unity3D game development, including pre-built mobile and web platform wrappers and a model example.
|
||||
- Various other AI applications use llama.cpp as their inference engine;
|
||||
for a detailed list, see the `user interfaces (UIs) section <https://github.com/ggml-org/llama.cpp?tab=readme-ov-file#description>`__.
|
||||
|
||||
For more use cases and recommendations, refer to the `AMD ROCm blog <https://rocm.blogs.amd.com/>`__,
|
||||
where you can search for llama.cpp examples and best practices to optimize your workloads on AMD GPUs.
|
||||
|
||||
- The `Llama.cpp Meets Instinct: A New Era of Open-Source AI Acceleration <https://rocm.blogs.amd.com/ecosystems-and-partners/llama-cpp/README.html>`__
|
||||
blog post outlines how the open-source llama.cpp framework enables efficient LLM inference—including interactive inference with ``llama-cli``,
|
||||
server deployment with ``llama-server``, GGUF model preparation and quantization, performance benchmarking, and optimizations tailored for
|
||||
AMD Instinct GPUs within the ROCm ecosystem.
|
||||
|
||||
|
||||
Previous versions
|
||||
===============================================================================
|
||||
See :doc:`rocm-install-on-linux:install/3rd-party/previous-versions/llama-cpp-history` to find documentation for previous releases
|
||||
of the ``ROCm/llama.cpp`` Docker image.
|
||||
@@ -1,104 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: Megablocks compatibility
|
||||
:keywords: GPU, megablocks, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
********************************************************************************
|
||||
Megablocks compatibility
|
||||
********************************************************************************
|
||||
|
||||
`Megablocks <https://github.com/databricks/megablocks>`__ is a lightweight library
|
||||
for mixture-of-experts `(MoE) <https://huggingface.co/blog/moe>`__ training.
|
||||
The core of the system is efficient "dropless-MoE" and standard MoE layers.
|
||||
Megablocks is integrated with `https://github.com/stanford-futuredata/Megatron-LM
|
||||
<https://github.com/stanford-futuredata/Megatron-LM>`__,
|
||||
where data and pipeline parallel training of MoEs is supported.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of Megablocks is maintained in the official `https://github.com/ROCm/megablocks
|
||||
<https://github.com/ROCm/megablocks>`__ repository, which differs from the
|
||||
`https://github.com/stanford-futuredata/Megatron-LM <https://github.com/stanford-futuredata/Megatron-LM>`__ upstream repository.
|
||||
|
||||
- To get started and install Megablocks on ROCm, use the prebuilt :ref:`Docker image <megablocks-docker-compat>`,
|
||||
which includes ROCm, Megablocks, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm Megablocks installation guide <rocm-install-on-linux:install/3rd-party/megablocks-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://github.com/databricks/megablocks>`__
|
||||
for additional context.
|
||||
|
||||
.. _megablocks-docker-compat:
|
||||
|
||||
Compatibility matrix
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `Megablocks images <https://hub.docker.com/r/rocm/megablocks/tags>`__
|
||||
with ROCm backends on Docker Hub. The following Docker image tag and associated
|
||||
inventories represent the latest available Megablocks version from the official Docker Hub.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker image
|
||||
- ROCm
|
||||
- Megablocks
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- GPU
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/megablocks/megablocks-0.7.0_rocm6.3.0_ubuntu24.04_py3.12_pytorch2.4.0/images/sha256-372ff89b96599019b8f5f9db469c84add2529b713456781fa62eb9a148659ab4"><i class="fab fa-docker fa-lg"></i> rocm/megablocks</a>
|
||||
- `6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_
|
||||
- `0.7.0 <https://github.com/databricks/megablocks/releases/tag/v0.7.0>`_
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 24.04
|
||||
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`_
|
||||
- MI300X
|
||||
|
||||
Supported models and features with ROCm 6.3.0
|
||||
================================================================================
|
||||
|
||||
This section summarizes the Megablocks features supported by ROCm.
|
||||
|
||||
* Distributed Pre-training
|
||||
* Activation Checkpointing and Recomputation
|
||||
* Distributed Optimizer
|
||||
* Mixture-of-Experts
|
||||
* dropless-Mixture-of-Experts
|
||||
|
||||
.. _megablocks-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* The `Efficient MoE training on AMD ROCm: How-to use Megablocks on AMD GPUs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/megablocks/README.html>`__
|
||||
blog post guides how to leverage the ROCm platform for pre-training using the
|
||||
Megablocks framework. It introduces a streamlined approach for training Mixture-of-Experts
|
||||
(MoE) models using the Megablocks library on AMD hardware. Focusing on GPT-2, it
|
||||
demonstrates how block-sparse computations can enhance scalability and efficiency in MoE
|
||||
training. The guide provides step-by-step instructions for setting up the environment,
|
||||
including cloning the repository, building the Docker image, and running the training container.
|
||||
Additionally, it offers insights into utilizing the ``oscar-1GB.json`` dataset for pre-training
|
||||
language models. By leveraging Megablocks and the ROCm platform, you can optimize your MoE
|
||||
training workflows for large-scale transformer models.
|
||||
|
||||
It features how to pre-process datasets and how to begin pre-training on AMD GPUs through:
|
||||
|
||||
* Single-GPU pre-training
|
||||
* Multi-GPU pre-training
|
||||
|
||||
@@ -1,498 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: PyTorch compatibility
|
||||
:keywords: GPU, PyTorch, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
********************************************************************************
|
||||
PyTorch compatibility
|
||||
********************************************************************************
|
||||
|
||||
`PyTorch <https://pytorch.org/>`__ is an open-source tensor library designed for
|
||||
deep learning. PyTorch on ROCm provides mixed-precision and large-scale training
|
||||
using `MIOpen <https://github.com/ROCm/MIOpen>`__ and
|
||||
`RCCL <https://github.com/ROCm/rccl>`__ libraries.
|
||||
|
||||
PyTorch provides two high-level features:
|
||||
|
||||
- Tensor computation (like NumPy) with strong GPU acceleration
|
||||
|
||||
- Deep neural networks built on a tape-based autograd system (rapid computation
|
||||
of multiple partial derivatives or gradients)
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
ROCm support for PyTorch is upstreamed into the official PyTorch repository.
|
||||
ROCm development is aligned with the stable release of PyTorch, while upstream
|
||||
PyTorch testing uses the stable release of ROCm to maintain consistency:
|
||||
|
||||
- The ROCm-supported version of PyTorch is maintained in the official `https://github.com/ROCm/pytorch
|
||||
<https://github.com/ROCm/pytorch>`__ repository, which differs from the
|
||||
`https://github.com/pytorch/pytorch <https://github.com/pytorch/pytorch>`__ upstream repository.
|
||||
|
||||
- To get started and install PyTorch on ROCm, use the prebuilt :ref:`Docker images <pytorch-docker-compat>`,
|
||||
which include ROCm, PyTorch, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://pytorch.org/get-started/locally/>`__ or
|
||||
`Previous versions <https://pytorch.org/get-started/previous-versions/>`__ for additional context.
|
||||
|
||||
PyTorch includes tooling that generates HIP source code from the CUDA backend.
|
||||
This approach allows PyTorch to support ROCm without requiring manual code
|
||||
modifications. For more information, see :doc:`HIPIFY <hipify:index>`.
|
||||
|
||||
Version support
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
AMD releases official `ROCm PyTorch Docker images <https://hub.docker.com/r/rocm/pytorch/tags>`_
|
||||
quarterly alongside new ROCm releases. These images undergo full AMD testing.
|
||||
|
||||
.. _pytorch-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/training/benchmark-docker/pytorch-training>`
|
||||
guides how to leverage the ROCm platform for training AI models. It covers the
|
||||
steps, tools, and best practices for optimizing training workflows on AMD GPUs
|
||||
using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning
|
||||
and inference of machine learning models, particularly large language models
|
||||
(LLMs), on systems with a single GPU. This topic provides a detailed guide for
|
||||
setting up, optimizing, and executing fine-tuning and inference workflows in
|
||||
such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning
|
||||
models on systems with multiple GPUs.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/rocm-for-ai/inference-optimization/workload>`
|
||||
provides detailed guidance on optimizing workloads for the AMD Instinct MI300X
|
||||
GPU using ROCm. This guide helps users achieve optimal performance for
|
||||
deep learning and other high-performance computing tasks on the MI300X
|
||||
GPU.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads. It outlines the
|
||||
use of PyTorch on the ROCm platform and focuses on efficiently leveraging AMD
|
||||
GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`__.
|
||||
|
||||
.. _pytorch-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
AMD validates and publishes `PyTorch images <https://hub.docker.com/r/rocm/pytorch/tags>`__
|
||||
with ROCm backends on Docker Hub.
|
||||
|
||||
To find the right image tag, see the :ref:`PyTorch on ROCm installation
|
||||
documentation <rocm-install-on-linux:pytorch-docker-support>` for a list of
|
||||
available ``rocm/pytorch`` images.
|
||||
|
||||
Key ROCm libraries for PyTorch
|
||||
================================================================================
|
||||
|
||||
PyTorch functionality on ROCm is determined by its underlying library
|
||||
dependencies. These ROCm components affect the capabilities, performance, and
|
||||
feature set available to developers.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`__
|
||||
- :version-ref:`"Composable Kernel" rocm_version`
|
||||
- Enables faster execution of core operations like matrix multiplication
|
||||
(GEMM), convolutions and transformations.
|
||||
- Speeds up ``torch.permute``, ``torch.view``, ``torch.matmul``,
|
||||
``torch.mm``, ``torch.bmm``, ``torch.nn.Conv2d``, ``torch.nn.Conv3d``
|
||||
and ``torch.nn.MultiheadAttention``.
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`__
|
||||
- :version-ref:`hipBLAS rocm_version`
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations such as matrix multiplication, matrix-vector
|
||||
products, and tensor contractions. Utilized in both dense and batched
|
||||
linear algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`__
|
||||
- :version-ref:`hipBLASLt rocm_version`
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- Accelerates operations such as ``torch.matmul``, ``torch.mm``, and the
|
||||
matrix multiplications used in convolutional and linear layers.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`__
|
||||
- :version-ref:`hipCUB rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations such as ``torch.sum``, ``torch.cumsum``,
|
||||
``torch.sort`` irregular shapes often involve scanning, sorting, and
|
||||
filtering, which hipCUB handles efficiently.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`__
|
||||
- :version-ref:`hipFFT rocm_version`
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
- Used in functions like the ``torch.fft`` module.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`__
|
||||
- :version-ref:`hipRAND rocm_version`
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``torch.rand``, ``torch.randn``, and stochastic layers like
|
||||
``torch.nn.Dropout`` rely on hipRAND.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`__
|
||||
- :version-ref:`hipSOLVER rocm_version`
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
- Supports functions like ``torch.linalg.solve``,
|
||||
``torch.linalg.eig``, and ``torch.linalg.svd``.
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`__
|
||||
- :version-ref:`hipSPARSE rocm_version`
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`__
|
||||
- :version-ref:`hipSPARSELt rocm_version`
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipTensor <https://github.com/ROCm/hipTensor>`__
|
||||
- :version-ref:`hipTensor rocm_version`
|
||||
- Optimizes for high-performance tensor operations, such as contractions.
|
||||
- Accelerates tensor algebra, especially in deep learning and scientific
|
||||
computing.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`__
|
||||
- :version-ref:`MIOpen rocm_version`
|
||||
- Optimizes deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs), recurrent neural
|
||||
networks (RNNs), and other layers. Used in operations like
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`__
|
||||
- :version-ref:`MIGraphX rocm_version`
|
||||
- Adds graph-level optimizations, ONNX models and mixed precision support
|
||||
and enable Ahead-of-Time (AOT) Compilation.
|
||||
- Speeds up inference models and executes ONNX models for
|
||||
compatibility with other frameworks.
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`__
|
||||
- :version-ref:`MIVisionX rocm_version`
|
||||
- Optimizes acceleration for computer vision and AI workloads like
|
||||
preprocessing, augmentation, and inferencing.
|
||||
- Faster data preprocessing and augmentation pipelines for datasets like
|
||||
ImageNet or COCO and easy to integrate into PyTorch's ``torch.utils.data``
|
||||
and ``torchvision`` workflows.
|
||||
* - `rocAL <https://github.com/ROCm/rocAL>`__
|
||||
- :version-ref:`rocAL rocm_version`
|
||||
- Accelerates the data pipeline by offloading intensive preprocessing and
|
||||
augmentation tasks. rocAL is part of MIVisionX.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`__
|
||||
- :version-ref:`RCCL rocm_version`
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
- Distributed data parallel training (``torch.nn.parallel.DistributedDataParallel``).
|
||||
Handles communication in multi-GPU setups.
|
||||
* - `rocDecode <https://github.com/ROCm/rocDecode>`__
|
||||
- :version-ref:`rocDecode rocm_version`
|
||||
- Provides hardware-accelerated data decoding capabilities, particularly
|
||||
for image, video, and other dataset formats.
|
||||
- Can be integrated in ``torch.utils.data``, ``torchvision.transforms``
|
||||
and ``torch.distributed``.
|
||||
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`__
|
||||
- :version-ref:`rocJPEG rocm_version`
|
||||
- Provides hardware-accelerated JPEG image decoding and encoding.
|
||||
- GPU accelerated ``torchvision.io.decode_jpeg`` and
|
||||
``torchvision.io.encode_jpeg`` and can be integrated in
|
||||
``torch.utils.data`` and ``torchvision``.
|
||||
* - `RPP <https://github.com/ROCm/RPP>`__
|
||||
- :version-ref:`RPP rocm_version`
|
||||
- Speeds up data augmentation, transformation, and other preprocessing steps.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads to speed up data processing.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`__
|
||||
- :version-ref:`rocThrust rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Utilized in backend operations for tensor computations requiring
|
||||
parallel processing.
|
||||
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`__
|
||||
- :version-ref:`rocWMMA rocm_version`
|
||||
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
|
||||
multiplication (GEMM) and accumulation operations with mixed precision
|
||||
support.
|
||||
- Linear layers (``torch.nn.Linear``), convolutional layers
|
||||
(``torch.nn.Conv2d``), attention layers, general tensor operations that
|
||||
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
|
||||
more.
|
||||
|
||||
Supported modules and data types
|
||||
================================================================================
|
||||
|
||||
The following section outlines the supported data types, modules, and domain
|
||||
libraries available in PyTorch on ROCm.
|
||||
|
||||
Supported data types
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The tensor data type is specified using the ``dtype`` attribute or argument.
|
||||
PyTorch supports many data types for different use cases.
|
||||
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`__
|
||||
single data types:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
* - ``torch.float8_e4m3fn``
|
||||
- 8-bit floating point, e4m3
|
||||
* - ``torch.float8_e5m2``
|
||||
- 8-bit floating point, e5m2
|
||||
* - ``torch.float16`` or ``torch.half``
|
||||
- 16-bit floating point
|
||||
* - ``torch.bfloat16``
|
||||
- 16-bit floating point
|
||||
* - ``torch.float32`` or ``torch.float``
|
||||
- 32-bit floating point
|
||||
* - ``torch.float64`` or ``torch.double``
|
||||
- 64-bit floating point
|
||||
* - ``torch.complex32`` or ``torch.chalf``
|
||||
- 32-bit complex numbers
|
||||
* - ``torch.complex64`` or ``torch.cfloat``
|
||||
- 64-bit complex numbers
|
||||
* - ``torch.complex128`` or ``torch.cdouble``
|
||||
- 128-bit complex numbers
|
||||
* - ``torch.uint8``
|
||||
- 8-bit integer (unsigned)
|
||||
* - ``torch.uint16``
|
||||
- 16-bit integer (unsigned);
|
||||
Not natively supported in ROCm
|
||||
* - ``torch.uint32``
|
||||
- 32-bit integer (unsigned);
|
||||
Not natively supported in ROCm
|
||||
* - ``torch.uint64``
|
||||
- 64-bit integer (unsigned);
|
||||
Not natively supported in ROCm
|
||||
* - ``torch.int8``
|
||||
- 8-bit integer (signed)
|
||||
* - ``torch.int16`` or ``torch.short``
|
||||
- 16-bit integer (signed)
|
||||
* - ``torch.int32`` or ``torch.int``
|
||||
- 32-bit integer (signed)
|
||||
* - ``torch.int64`` or ``torch.long``
|
||||
- 64-bit integer (signed)
|
||||
* - ``torch.bool``
|
||||
- Boolean
|
||||
* - ``torch.quint8``
|
||||
- Quantized 8-bit integer (unsigned)
|
||||
* - ``torch.qint8``
|
||||
- Quantized 8-bit integer (signed)
|
||||
* - ``torch.qint32``
|
||||
- Quantized 32-bit integer (signed)
|
||||
* - ``torch.quint4x2``
|
||||
- Quantized 4-bit integer (unsigned)
|
||||
|
||||
.. note::
|
||||
|
||||
Unsigned types, except ``uint8``, have limited support in eager mode. They
|
||||
primarily exist to assist usage with ``torch.compile``.
|
||||
|
||||
See :doc:`ROCm precision support <rocm:reference/precision-support>` for the
|
||||
native hardware support of data types.
|
||||
|
||||
Supported modules
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
For a complete and up-to-date list of PyTorch core modules (for example., ``torch``,
|
||||
``torch.nn``, ``torch.cuda``, ``torch.backends.cuda`` and
|
||||
``torch.backends.cudnn``), their descriptions, and usage, please refer directly
|
||||
to the `official PyTorch documentation <https://pytorch.org/docs/stable/index.html>`_.
|
||||
|
||||
Core PyTorch functionality on ROCm includes tensor operations, neural network
|
||||
layers, automatic differentiation, distributed training, mixed-precision
|
||||
training, compilation features, and domain-specific libraries for audio, vision,
|
||||
text processing, and more.
|
||||
|
||||
Supported domain libraries
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
PyTorch offers specialized `domain libraries <https://pytorch.org/domains/>`_ with
|
||||
GPU acceleration that build on its core features to support specific application
|
||||
areas. The table below lists the PyTorch domain libraries that are compatible
|
||||
with ROCm.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Library
|
||||
- Description
|
||||
|
||||
* - `torchaudio <https://docs.pytorch.org/audio/stable/index.html>`_
|
||||
- Audio and signal processing library for PyTorch. Provides utilities for
|
||||
audio I/O, signal and data processing functions, datasets, model
|
||||
implementations, and application components for audio and speech
|
||||
processing tasks.
|
||||
|
||||
**Note:** To ensure GPU-acceleration with ``torchaudio.transforms``,
|
||||
you need to explicitly move audio data (waveform tensor) to GPU using
|
||||
``.to('cuda')``.
|
||||
|
||||
* - `torchtune <https://meta-pytorch.org/torchtune/stable/index.html>`_
|
||||
- PyTorch-native library designed for fine-tuning large language models
|
||||
(LLMs). Provides supports the full fine-tuning workflow and offers
|
||||
compatibility with popular production inference systems.
|
||||
|
||||
**Note:** Only official release exists.
|
||||
|
||||
* - `torchvision <https://docs.pytorch.org/vision/stable/index.html>`_
|
||||
- Computer vision library that is part of the PyTorch project. Provides
|
||||
popular datasets, model architectures, and common image transformations
|
||||
for computer vision applications.
|
||||
|
||||
* - `torchdata <https://meta-pytorch.org/data/beta/index.html#torchdata>`_
|
||||
- Beta library of common modular data loading primitives for easily
|
||||
constructing flexible and performant data pipelines, with features still
|
||||
in prototype stage.
|
||||
|
||||
* - `torchrec <https://meta-pytorch.org/torchrec/>`_
|
||||
- PyTorch domain library for common sparsity and parallelism primitives
|
||||
needed for large-scale recommender systems, enabling authors to train
|
||||
models with large embedding tables shared across many GPUs.
|
||||
|
||||
**Note:** ``torchrec`` does not implement ROCm-specific kernels. ROCm
|
||||
acceleration is provided through the underlying PyTorch framework and
|
||||
ROCm library integration.
|
||||
|
||||
* - `torchserve <https://docs.pytorch.org/serve/>`_
|
||||
- Performant, flexible and easy-to-use tool for serving PyTorch models in
|
||||
production, providing features for model management, batch processing,
|
||||
and scalable deployment.
|
||||
|
||||
**Note:** `torchserve <https://docs.pytorch.org/serve/>`_ is no longer
|
||||
actively maintained. Last official release is sent out with PyTorch 2.4.
|
||||
|
||||
* - `torchrl <https://docs.pytorch.org/rl/stable/index.html>`_
|
||||
- Open-source, Python-first Reinforcement Learning library for PyTorch
|
||||
with a focus on high modularity and good runtime performance, providing
|
||||
low and high-level RL abstractions and reusable functionals for cost
|
||||
functions, returns, and data processing.
|
||||
|
||||
**Note:** Only official release exists.
|
||||
|
||||
* - `tensordict <https://docs.pytorch.org/tensordict/stable/index.html>`_
|
||||
- Dictionary-like class that simplifies operations on batches of tensors,
|
||||
enhancing code readability, compactness, and modularity by abstracting
|
||||
tailored operations and reducing errors through automatic operation
|
||||
dispatching.
|
||||
|
||||
**Note:** Only official release exists.
|
||||
|
||||
Key features and enhancements for PyTorch 2.9 with ROCm 7.1.1
|
||||
================================================================================
|
||||
- Scaled Dot Product Attention (SDPA) upgraded to use AOTriton version 0.11b.
|
||||
|
||||
- Default hipBLASLt support enabled for gfx908 architecture on ROCm 6.3 and later.
|
||||
|
||||
- MIOpen now supports channels last memory format for 3D convolutions and batch normalization.
|
||||
|
||||
- NHWC convolution operations in MIOpen optimized by eliminating unnecessary transpose operations.
|
||||
|
||||
- Improved tensor.item() performance by removing redundant synchronization.
|
||||
|
||||
- Enhanced performance for element-wise operations and reduction kernels.
|
||||
|
||||
- Added support for grouped GEMM operations through fbgemm_gpu generative AI components.
|
||||
|
||||
- Resolved device error in Inductor when using CUDA graph trees with HIP.
|
||||
|
||||
- Corrected logsumexp scaling in AOTriton-based SDPA implementation.
|
||||
|
||||
- Added stream graph capture status validation in memory copy synchronization functions.
|
||||
|
||||
Key features and enhancements for PyTorch 2.8 with ROCm 7.1
|
||||
================================================================================
|
||||
|
||||
- MIOpen deep learning optimizations: Further optimized NHWC BatchNorm feature.
|
||||
|
||||
- Added float8 support for the DeepSpeed extension, allowing for decreased
|
||||
memory footprint and increased throughput in training and inference workloads.
|
||||
|
||||
- ``torch.nn.functional.scaled_dot_product_attention`` now calling optimized
|
||||
flash attention kernel automatically.
|
||||
|
||||
Key features and enhancements for PyTorch 2.7/2.8 with ROCm 7.0
|
||||
================================================================================
|
||||
|
||||
- Enhanced TunableOp framework: Introduces ``tensorfloat32`` support for
|
||||
TunableOp operations, improved offline tuning for ScaledGEMM operations,
|
||||
submatrix offline tuning capabilities, and better logging for BLAS operations
|
||||
without bias vectors.
|
||||
|
||||
- Expanded GPU architecture support: Provides optimized support for newer GPU
|
||||
architectures, including gfx1200 and gfx1201 with preferred hipBLASLt backend
|
||||
selection, along with improvements for gfx950 and gfx1100 Series GPUs.
|
||||
|
||||
- Advanced Triton Integration: AOTriton 0.10b introduces official support for
|
||||
gfx950 and gfx1201, along with experimental support for gfx1101, gfx1151,
|
||||
gfx1150, and gfx1200.
|
||||
|
||||
- Improved element-wise kernel performance: Delivers enhanced vectorized
|
||||
element-wise kernels with better support for heterogeneous tensor types and
|
||||
optimized input vectorization for tensors with mixed data types.
|
||||
|
||||
- MIOpen deep learning optimizations: Enables NHWC BatchNorm by default on
|
||||
ROCm 7.0+, provides ``maxpool`` forward and backward performance improvements
|
||||
targeting ResNet scenarios, and includes updated launch configurations for
|
||||
better performance.
|
||||
|
||||
- Enhanced memory and tensor operations: Features fixes for in-place ``aten``
|
||||
sum operations with specialized templated kernels, improved 3D tensor
|
||||
performance with NHWC format, and better handling of memory-bound matrix
|
||||
multiplication operations.
|
||||
|
||||
- Robust testing and quality improvements: Includes comprehensive test suite
|
||||
updates with improved tolerance handling for Navi3x architectures, generalized
|
||||
ROCm-specific test conditions, and enhanced unit test coverage for Flash
|
||||
Attention and Memory Efficient operations.
|
||||
|
||||
- Composable Kernel (CK) updates: Features updated CK submodule integration with
|
||||
the latest optimizations and performance improvements for core mathematical
|
||||
operations.
|
||||
|
||||
- Development and debugging enhancements: Includes improved source handling for
|
||||
dynamic compilation, better error handling for atomic operations, and enhanced
|
||||
state checking for trace operations.
|
||||
|
||||
- Integrate APEX fused layer normalization, which can have positive impact on
|
||||
text-to-video models.
|
||||
|
||||
- Integrate APEX distributed fused LAMB and distributed fused ADAM, which can
|
||||
have positive impact on BERT-L and Llama2-SFT.
|
||||
|
||||
- FlashAttention v3 has been integrated for AMD GPUs.
|
||||
|
||||
- `Pytorch C++ extensions <https://pytorch.org/tutorials/advanced/cpp_extension.html>`_
|
||||
provide a mechanism for compiling custom operations that can be used during
|
||||
network training or inference. For AMD platforms, ``amdclang++`` has been
|
||||
validated as the supported compiler for building these extensions.
|
||||
|
||||
Known issues and notes for PyTorch 2.7/2.8 with ROCm 7.0 and ROCm 7.1
|
||||
================================================================================
|
||||
|
||||
- The ``matmul.allow_fp16_reduced_precision_reduction`` and
|
||||
``matmul.allow_bf16_reduced_precision_reduction`` options under
|
||||
``torch.backends.cuda`` are not supported. As a result,
|
||||
reduced-precision reductions using FP16 or BF16 accumulation types are not
|
||||
available.
|
||||
@@ -1,114 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: Ray compatibility
|
||||
:keywords: GPU, Ray, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
*******************************************************************************
|
||||
Ray compatibility
|
||||
*******************************************************************************
|
||||
|
||||
Ray is a unified framework for scaling AI and Python applications from your laptop
|
||||
to a full cluster, without changing your code. Ray consists of `a core distributed
|
||||
runtime <https://docs.ray.io/en/latest/ray-core/walkthrough.html>`__ and a set of
|
||||
`AI libraries <https://docs.ray.io/en/latest/ray-air/getting-started.html>`__ for
|
||||
simplifying machine learning computations.
|
||||
|
||||
Ray is a general-purpose framework that runs many types of workloads efficiently.
|
||||
Any Python application can be scaled with Ray, without extra infrastructure.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of Ray is maintained in the official `https://github.com/ROCm/ray
|
||||
<https://github.com/ROCm/ray>`__ repository, which differs from the
|
||||
`https://github.com/ray-project/ray <https://github.com/ray-project/ray>`__ upstream repository.
|
||||
|
||||
- To get started and install Ray on ROCm, use the prebuilt :ref:`Docker image <ray-docker-compat>`,
|
||||
which includes ROCm, Ray, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm Ray installation guide <rocm-install-on-linux:install/3rd-party/ray-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://docs.ray.io/en/latest/ray-overview/installation.html>`__
|
||||
for additional context.
|
||||
|
||||
.. _ray-docker-compat:
|
||||
|
||||
Compatibility matrix
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `ROCm Ray Docker images <https://hub.docker.com/r/rocm/ray/tags>`__
|
||||
with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories represent the latest Ray version from the official Docker Hub.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker image
|
||||
- ROCm
|
||||
- Ray
|
||||
- Pytorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- GPU
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/ray/ray-2.51.1_rocm7.0.0_ubuntu22.04_py3.12_pytorch2.9.0/images/sha256-a02f6766b4ba406f88fd7e85707ec86c04b569834d869a08043ec9bcbd672168"><i class="fab fa-docker fa-lg"></i> rocm/ray</a>
|
||||
- `7.0.0 <https://repo.radeon.com/rocm/apt/7.0/>`__
|
||||
- `2.51.1 <https://github.com/ROCm/ray/tree/release/2.51.1>`__
|
||||
- 2.9.0a0+git1c57644
|
||||
- 22.04
|
||||
- `3.12.12 <https://www.python.org/downloads/release/python-31212/>`__
|
||||
- MI300X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/ray/ray-2.48.0.post0_rocm6.4.1_ubuntu24.04_py3.12_pytorch2.6.0/images/sha256-0d166fe6bdced38338c78eedfb96eff92655fb797da3478a62dd636365133cc0"><i class="fab fa-docker fa-lg"></i> rocm/ray</a>
|
||||
- `6.4.1 <https://repo.radeon.com/rocm/apt/6.4.1/>`__
|
||||
- `2.48.0.post0 <https://github.com/ROCm/ray/tree/release/2.48.0.post0>`__
|
||||
- 2.6.0+git684f6f2
|
||||
- 24.04
|
||||
- `3.12.10 <https://www.python.org/downloads/release/python-31210/>`__
|
||||
- MI300X, MI210
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* The `Reinforcement Learning from Human Feedback on AMD GPUs with verl and ROCm
|
||||
Integration <https://rocm.blogs.amd.com/artificial-intelligence/verl-large-scale/README.html>`__
|
||||
blog provides an overview of Volcano Engine Reinforcement Learning (verl)
|
||||
for large language models (LLMs) and discusses its benefits in large-scale
|
||||
reinforcement learning from human feedback (RLHF). It uses Ray as part of a
|
||||
hybrid orchestration engine to schedule and coordinate training and inference
|
||||
tasks in parallel, enabling optimized resource utilization and potential overlap
|
||||
between these phases. This dynamic resource allocation strategy significantly
|
||||
improves overall system efficiency. The blog presents verl’s performance results,
|
||||
focusing on throughput and convergence accuracy achieved on AMD Instinct™ MI300X
|
||||
GPUs. Follow this guide to get started with verl on AMD Instinct GPUs and
|
||||
accelerate your RLHF training with ROCm-optimized performance.
|
||||
|
||||
* The `Exploring Use Cases for Scalable AI: Implementing Ray with ROCm Support for Efficient ML Workflows
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/rocm-ray/README.html>`__
|
||||
blog post describes key use cases such as training and inference for large language models (LLMs),
|
||||
model serving, hyperparameter tuning, reinforcement learning, and the orchestration of large-scale
|
||||
workloads using Ray in the ROCm environment.
|
||||
|
||||
For more use cases and recommendations, see the AMD GPU tabs in the `Accelerator Support
|
||||
topic <https://docs.ray.io/en/latest/ray-core/scheduling/accelerators.html#accelerator-support>`__
|
||||
of the Ray core documentation and refer to the `AMD ROCm blog <https://rocm.blogs.amd.com/>`__,
|
||||
where you can search for Ray examples and best practices to optimize your workloads on AMD GPUs.
|
||||
|
||||
Previous versions
|
||||
===============================================================================
|
||||
See :doc:`rocm-install-on-linux:install/3rd-party/previous-versions/ray-history` to find documentation for previous releases
|
||||
of the ``ROCm/ray`` Docker image.
|
||||
@@ -1,116 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: Stanford Megatron-LM compatibility
|
||||
:keywords: Stanford, Megatron-LM, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
********************************************************************************
|
||||
Stanford Megatron-LM compatibility
|
||||
********************************************************************************
|
||||
|
||||
Stanford Megatron-LM is a large-scale language model training framework developed
|
||||
by NVIDIA at `https://github.com/NVIDIA/Megatron-LM <https://github.com/NVIDIA/Megatron-LM>`_.
|
||||
It is designed to train massive transformer-based language models efficiently by model
|
||||
and data parallelism.
|
||||
|
||||
It provides efficient tensor, pipeline, and sequence-based model parallelism for
|
||||
pre-training transformer-based language models such as GPT (Decoder Only), BERT
|
||||
(Encoder Only), and T5 (Encoder-Decoder).
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of Stanford Megatron-LM is maintained in the official `https://github.com/ROCm/Stanford-Megatron-LM
|
||||
<https://github.com/ROCm/Stanford-Megatron-LM>`__ repository, which differs from the
|
||||
`https://github.com/stanford-futuredata/Megatron-LM <https://github.com/stanford-futuredata/Megatron-LM>`__ upstream repository.
|
||||
|
||||
- To get started and install Stanford Megatron-LM on ROCm, use the prebuilt :ref:`Docker image <megatron-lm-docker-compat>`,
|
||||
which includes ROCm, Stanford Megatron-LM, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm Stanford Megatron-LM installation guide <rocm-install-on-linux:install/3rd-party/stanford-megatron-lm-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `Installation guide <https://github.com/NVIDIA/Megatron-LM>`__
|
||||
for additional context.
|
||||
|
||||
.. _megatron-lm-docker-compat:
|
||||
|
||||
Compatibility matrix
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `Stanford Megatron-LM images <https://hub.docker.com/r/rocm/stanford-megatron-lm/tags>`_
|
||||
with ROCm and Pytorch backends on Docker Hub. The following Docker image tags and associated
|
||||
inventories represent the latest Stanford Megatron-LM version from the official Docker Hub.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker image
|
||||
- ROCm
|
||||
- Stanford Megatron-LM
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- GPU
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/stanford-megatron-lm/stanford-megatron-lm85f95ae_rocm6.3.0_ubuntu24.04_py3.12_pytorch2.4.0/images/sha256-070556f078be10888a1421a2cb4f48c29f28b02bfeddae02588d1f7fc02a96a6"><i class="fab fa-docker fa-lg"></i> rocm/stanford-megatron-lm</a>
|
||||
|
||||
- `6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_
|
||||
- `85f95ae <https://github.com/stanford-futuredata/Megatron-LM/commit/85f95aef3b648075fe6f291c86714fdcbd9cd1f5>`_
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 24.04
|
||||
- `3.12.9 <https://www.python.org/downloads/release/python-3129/>`_
|
||||
- MI300X
|
||||
|
||||
Supported models and features with ROCm 6.3.0
|
||||
================================================================================
|
||||
|
||||
This section details models & features that are supported by the ROCm version on Stanford Megatron-LM.
|
||||
|
||||
Models:
|
||||
|
||||
* BERT
|
||||
* GPT
|
||||
* T5
|
||||
* ICT
|
||||
|
||||
Features:
|
||||
|
||||
* Distributed Pre-training
|
||||
* Activation Checkpointing and Recomputation
|
||||
* Distributed Optimizer
|
||||
* Mixture-of-Experts
|
||||
|
||||
.. _megatron-lm-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
The following blog post mentions Megablocks, but you can run Stanford Megatron-LM with the same steps to pre-process datasets on AMD GPUs:
|
||||
|
||||
* The `Efficient MoE training on AMD ROCm: How-to use Megablocks on AMD GPUs
|
||||
<https://rocm.blogs.amd.com/artificial-intelligence/megablocks/README.html>`__
|
||||
blog post guides how to leverage the ROCm platform for pre-training using the
|
||||
Megablocks framework. It introduces a streamlined approach for training Mixture-of-Experts
|
||||
(MoE) models using the Megablocks library on AMD hardware. Focusing on GPT-2, it
|
||||
demonstrates how block-sparse computations can enhance scalability and efficiency in MoE
|
||||
training. The guide provides step-by-step instructions for setting up the environment,
|
||||
including cloning the repository, building the Docker image, and running the training container.
|
||||
Additionally, it offers insights into utilizing the ``oscar-1GB.json`` dataset for pre-training
|
||||
language models. By leveraging Megablocks and the ROCm platform, you can optimize your MoE
|
||||
training workflows for large-scale transformer models.
|
||||
|
||||
It features how to pre-process datasets and how to begin pre-training on AMD GPUs through:
|
||||
|
||||
* Single-GPU pre-training
|
||||
* Multi-GPU pre-training
|
||||
@@ -1,435 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: TensorFlow compatibility
|
||||
:keywords: GPU, TensorFlow, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
*******************************************************************************
|
||||
TensorFlow compatibility
|
||||
*******************************************************************************
|
||||
|
||||
`TensorFlow <https://www.tensorflow.org/>`__ is an open-source library for
|
||||
solving machine learning, deep learning, and AI problems. It can solve many
|
||||
problems across different sectors and industries, but primarily focuses on
|
||||
neural network training and inference. It is one of the most popular deep
|
||||
learning frameworks and is very active in open-source development.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of TensorFlow is maintained in the official `https://github.com/ROCm/tensorflow-upstream
|
||||
<https://github.com/ROCm/tensorflow-upstream>`__ repository, which differs from the
|
||||
`https://github.com/tensorflow/tensorflow <https://github.com/tensorflow/tensorflow>`__ upstream repository.
|
||||
|
||||
- To get started and install TensorFlow on ROCm, use the prebuilt :ref:`Docker images <tensorflow-docker-compat>`,
|
||||
which include ROCm, TensorFlow, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm TensorFlow installation guide <rocm-install-on-linux:install/3rd-party/tensorflow-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the `TensorFlow API versions <https://www.tensorflow.org/versions>`__ list
|
||||
for additional context.
|
||||
|
||||
Version support
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `official TensorFlow repository <http://github.com/tensorflow/tensorflow>`__
|
||||
includes full ROCm support. AMD maintains a TensorFlow `ROCm repository
|
||||
<http://github.com/rocm/tensorflow-upstream>`__ in order to quickly add bug
|
||||
fixes, updates, and support for the latest ROCm versions.
|
||||
|
||||
.. _tensorflow-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
AMD provides preconfigured Docker images with TensorFlow and the ROCm backend.
|
||||
These images are published on `Docker Hub <https://hub.docker.com/r/rocm/tensorflow>`__ and are the
|
||||
recommended way to get started with deep learning with TensorFlow on ROCm.
|
||||
|
||||
To find the right image tag, see the :ref:`TensorFlow on ROCm installation
|
||||
documentation <rocm-install-on-linux:tensorflow-docker-support>` for a list of
|
||||
available ``rocm/tensorflow`` images.
|
||||
|
||||
|
||||
Critical ROCm libraries for TensorFlow
|
||||
===============================================================================
|
||||
|
||||
TensorFlow depends on multiple components and the supported features of those
|
||||
components can affect the TensorFlow ROCm supported feature set. The versions
|
||||
in the following table refer to the first TensorFlow version where the ROCm
|
||||
library was introduced as a dependency. The versions described
|
||||
are available in ROCm :version:`rocm_version`.
|
||||
|
||||
.. list-table::
|
||||
:widths: 25, 10, 35, 30
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`__
|
||||
- :version-ref:`hipBLAS rocm_version`
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Accelerates operations like ``tf.matmul``, ``tf.linalg.matmul``, and
|
||||
other matrix multiplications commonly used in neural network layers.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`__
|
||||
- :version-ref:`hipBLASLt rocm_version`
|
||||
- Extends hipBLAS with additional optimizations like fused kernels and
|
||||
integer tensor cores.
|
||||
- Optimizes matrix multiplications and linear algebra operations used in
|
||||
layers like dense, convolutional, and RNNs in TensorFlow.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`__
|
||||
- :version-ref:`hipCUB rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations like ``tf.reduce_sum``, ``tf.cumsum``, ``tf.sort``
|
||||
and other tensor operations in TensorFlow, especially those involving
|
||||
scanning, sorting, and filtering.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`__
|
||||
- :version-ref:`hipFFT rocm_version`
|
||||
- Accelerates Fast Fourier Transforms (FFT) for signal processing tasks.
|
||||
- Used for operations like signal processing, image filtering, and
|
||||
certain types of neural networks requiring FFT-based transformations.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`__
|
||||
- :version-ref:`hipSOLVER rocm_version`
|
||||
- Provides GPU-accelerated direct linear solvers for dense and sparse
|
||||
systems.
|
||||
- Optimizes linear algebra functions such as solving systems of linear
|
||||
equations, often used in optimization and training tasks.
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`__
|
||||
- :version-ref:`hipSPARSE rocm_version`
|
||||
- Optimizes sparse matrix operations for efficient computations on sparse
|
||||
data.
|
||||
- Accelerates sparse matrix operations in models with sparse weight
|
||||
matrices or activations, commonly used in neural networks.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`__
|
||||
- :version-ref:`MIOpen rocm_version`
|
||||
- Provides optimized deep learning primitives such as convolutions,
|
||||
pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs) and other layers. Used
|
||||
in TensorFlow for layers like ``tf.nn.conv2d``, ``tf.nn.relu``, and
|
||||
``tf.nn.lstm_cell``.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`__
|
||||
- :version-ref:`RCCL rocm_version`
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
- Distributed data parallel training (``tf.distribute.MirroredStrategy``).
|
||||
Handles communication in multi-GPU setups.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`__
|
||||
- :version-ref:`rocThrust rocm_version`
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Reduction operations like ``tf.reduce_sum``, ``tf.cumsum`` for computing
|
||||
the cumulative sum of elements along a given axis or ``tf.unique`` to
|
||||
finds unique elements in a tensor can use rocThrust.
|
||||
|
||||
Supported and unsupported features
|
||||
===============================================================================
|
||||
|
||||
The following section maps supported data types and GPU-accelerated TensorFlow
|
||||
features to their minimum supported ROCm and TensorFlow versions.
|
||||
|
||||
Data types
|
||||
---------------
|
||||
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or
|
||||
argument, and TensorFlow supports a wide range of data types for different use
|
||||
cases.
|
||||
|
||||
The basic, single data types of `tf.dtypes <https://www.tensorflow.org/api_docs/python/tf/dtypes>`__
|
||||
are as follows:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
- Since ROCm
|
||||
* - ``bfloat16``
|
||||
- 16-bit bfloat (brain floating point).
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``bool``
|
||||
- Boolean.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``complex128``
|
||||
- 128-bit complex.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``complex64``
|
||||
- 64-bit complex.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``double``
|
||||
- 64-bit (double precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``float16``
|
||||
- 16-bit (half precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``float32``
|
||||
- 32-bit (single precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``float64``
|
||||
- 64-bit (double precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``half``
|
||||
- 16-bit (half precision) floating-point.
|
||||
- 2.0.0
|
||||
- 2.0
|
||||
* - ``int16``
|
||||
- Signed 16-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``int32``
|
||||
- Signed 32-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``int64``
|
||||
- Signed 64-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``int8``
|
||||
- Signed 8-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``qint16``
|
||||
- Signed quantized 16-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``qint32``
|
||||
- Signed quantized 32-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``qint8``
|
||||
- Signed quantized 8-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``quint16``
|
||||
- Unsigned quantized 16-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``quint8``
|
||||
- Unsigned quantized 8-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``resource``
|
||||
- Handle to a mutable, dynamically allocated resource.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``string``
|
||||
- Variable-length string, represented as byte array.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``uint16``
|
||||
- Unsigned 16-bit (word) integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``uint32``
|
||||
- Unsigned 32-bit (dword) integer.
|
||||
- 1.5.0
|
||||
- 1.7
|
||||
* - ``uint64``
|
||||
- Unsigned 64-bit (qword) integer.
|
||||
- 1.5.0
|
||||
- 1.7
|
||||
* - ``uint8``
|
||||
- Unsigned 8-bit (byte) integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``variant``
|
||||
- Data of arbitrary type (known at runtime).
|
||||
- 1.4.0
|
||||
- 1.7
|
||||
|
||||
Features
|
||||
---------------
|
||||
|
||||
This table provides an overview of key features in TensorFlow and their
|
||||
availability in ROCm.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
- Since ROCm
|
||||
* - ``tf.linalg`` (Linear Algebra)
|
||||
- Operations for matrix and tensor computations, such as
|
||||
``tf.linalg.matmul`` (matrix multiplication), ``tf.linalg.inv``
|
||||
(matrix inversion) and ``tf.linalg.cholesky`` (Cholesky decomposition).
|
||||
These leverage GPUs for high-performance linear algebra operations.
|
||||
- 1.4
|
||||
- 1.8.2
|
||||
* - ``tf.nn`` (Neural Network Operations)
|
||||
- GPU-accelerated building blocks for deep learning models, such as 2D
|
||||
convolutions with ``tf.nn.conv2d``, max pooling operations with
|
||||
``tf.nn.max_pool``, activation functions like ``tf.nn.relu`` or softmax
|
||||
for output layers with ``tf.nn.softmax``.
|
||||
- 1.0
|
||||
- 1.8.2
|
||||
* - ``tf.image`` (Image Processing)
|
||||
- GPU-accelerated functions for image preprocessing and augmentations,
|
||||
such as resize images with ``tf.image.resize``, flip images horizontally
|
||||
with ``tf.image.flip_left_right`` and adjust image brightness randomly
|
||||
with ``tf.image.random_brightness``.
|
||||
- 1.1
|
||||
- 1.8.2
|
||||
* - ``tf.keras`` (High-Level API)
|
||||
- GPU acceleration for Keras layers and models, including dense layers
|
||||
(``tf.keras.layers.Dense``), convolutional layers
|
||||
(``tf.keras.layers.Conv2D``) and recurrent layers
|
||||
(``tf.keras.layers.LSTM``).
|
||||
- 1.4
|
||||
- 1.8.2
|
||||
* - ``tf.math`` (Mathematical Operations)
|
||||
- GPU-accelerated mathematical operations, such as sum across dimensions
|
||||
with ``tf.math.reduce_sum``, elementwise exponentiation with
|
||||
``tf.math.exp`` and sigmoid activation (``tf.math.sigmoid``).
|
||||
- 1.5
|
||||
- 1.8.2
|
||||
* - ``tf.signal`` (Signal Processing)
|
||||
- Functions for spectral analysis and signal transformations.
|
||||
- 1.13
|
||||
- 2.1
|
||||
* - ``tf.data`` (Data Input Pipeline)
|
||||
- GPU-accelerated data preprocessing for efficient input pipelines,
|
||||
Prefetching with ``tf.data.experimental.AUTOTUNE``. GPU-enabled
|
||||
transformations like map and batch.
|
||||
- 1.4
|
||||
- 1.8.2
|
||||
* - ``tf.distribute`` (Distributed Training)
|
||||
- Enabling to scale computations across multiple devices on a single
|
||||
machine or across multiple machines.
|
||||
- 1.13
|
||||
- 2.1
|
||||
* - ``tf.random`` (Random Number Generation)
|
||||
- GPU-accelerated random number generation
|
||||
- 1.12
|
||||
- 1.9.2
|
||||
* - ``tf.TensorArray`` (Dynamic Array Operations)
|
||||
- Enables dynamic tensor manipulation on GPUs.
|
||||
- 1.0
|
||||
- 1.8.2
|
||||
* - ``tf.sparse`` (Sparse Tensor Operations)
|
||||
- GPU-accelerated sparse matrix manipulations.
|
||||
- 1.9
|
||||
- 1.9.0
|
||||
* - ``tf.experimental.numpy``
|
||||
- GPU-accelerated NumPy-like API for numerical computations.
|
||||
- 2.4
|
||||
- 4.1.1
|
||||
* - ``tf.RaggedTensor``
|
||||
- Handling of variable-length sequences and ragged tensors with GPU
|
||||
support.
|
||||
- 1.13
|
||||
- 2.1
|
||||
* - ``tf.function`` with XLA (Accelerated Linear Algebra)
|
||||
- Enable GPU-accelerated functions in optimization.
|
||||
- 1.14
|
||||
- 2.4
|
||||
* - ``tf.quantization``
|
||||
- Quantized operations for inference, accelerated on GPUs.
|
||||
- 1.12
|
||||
- 1.9.2
|
||||
|
||||
Distributed library features
|
||||
-----------------------------------
|
||||
|
||||
Enables developers to scale computations across multiple devices on a single machine or
|
||||
across multiple machines.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
- Since ROCm
|
||||
* - ``MultiWorkerMirroredStrategy``
|
||||
- Synchronous training across multiple workers using mirrored variables.
|
||||
- 2.0
|
||||
- 3.0
|
||||
* - ``MirroredStrategy``
|
||||
- Synchronous training across multiple GPUs on one machine.
|
||||
- 1.5
|
||||
- 2.5
|
||||
* - ``TPUStrategy``
|
||||
- Efficiently trains models on Google TPUs.
|
||||
- 1.9
|
||||
- ❌
|
||||
* - ``ParameterServerStrategy``
|
||||
- Asynchronous training using parameter servers for variable management.
|
||||
- 2.1
|
||||
- 4.0
|
||||
* - ``CentralStorageStrategy``
|
||||
- Keeps variables on a single device and performs computation on multiple
|
||||
devices.
|
||||
- 2.3
|
||||
- 4.1
|
||||
* - ``CollectiveAllReduceStrategy``
|
||||
- Synchronous training across multiple devices and hosts.
|
||||
- 1.14
|
||||
- 3.5
|
||||
* - Distribution Strategies API
|
||||
- High-level API to simplify distributed training configuration and
|
||||
execution.
|
||||
- 1.10
|
||||
- 3.0
|
||||
|
||||
Unsupported TensorFlow features
|
||||
===============================================================================
|
||||
|
||||
The following are GPU-accelerated TensorFlow features not currently supported by
|
||||
ROCm.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
* - Mixed Precision with TF32
|
||||
- Mixed precision with TF32 is used for matrix multiplications,
|
||||
convolutions, and other linear algebra operations, particularly in
|
||||
deep learning workloads like CNNs and transformers.
|
||||
- 2.4
|
||||
* - ``tf.distribute.TPUStrategy``
|
||||
- Efficiently trains models on Google TPUs.
|
||||
- 1.9
|
||||
|
||||
Use cases and recommendations
|
||||
===============================================================================
|
||||
|
||||
* The `Training a Neural Collaborative Filtering (NCF) Recommender on an AMD
|
||||
GPU <https://rocm.blogs.amd.com/artificial-intelligence/ncf/README.html>`__
|
||||
blog post discusses training an NCF recommender system using TensorFlow. It
|
||||
explains how NCF improves traditional collaborative filtering methods by
|
||||
leveraging neural networks to model non-linear user-item interactions. The
|
||||
post outlines the implementation using the recommenders library, focusing on
|
||||
the use of implicit data (for example, user interactions like viewing or
|
||||
purchasing) and how it addresses challenges like the lack of negative values.
|
||||
|
||||
* The `Creating a PyTorch/TensorFlow code environment on AMD GPUs
|
||||
<https://rocm.blogs.amd.com/software-tools-optimization/pytorch-tensorflow-env/README.html>`__
|
||||
blog post provides instructions for creating a machine learning environment
|
||||
for PyTorch and TensorFlow on AMD GPUs using ROCm. It covers steps like
|
||||
installing the libraries, cloning code repositories, installing dependencies,
|
||||
and troubleshooting potential issues with CUDA-based code. Additionally, it
|
||||
explains how to HIPify code (port CUDA code to HIP) and manage Docker images
|
||||
for a better experience on AMD GPUs. This guide aims to help data scientists
|
||||
and ML practitioners adapt their code for AMD GPUs.
|
||||
|
||||
For more use cases and recommendations, see the `ROCm Tensorflow blog posts <https://rocm.blogs.amd.com/blog/tag/tensorflow.html>`__.
|
||||
@@ -1,118 +0,0 @@
|
||||
:orphan:
|
||||
|
||||
.. meta::
|
||||
:description: verl compatibility
|
||||
:keywords: GPU, verl, deep learning, framework compatibility
|
||||
|
||||
.. version-set:: rocm_version latest
|
||||
|
||||
*******************************************************************************
|
||||
verl compatibility
|
||||
*******************************************************************************
|
||||
|
||||
Volcano Engine Reinforcement Learning for LLMs (`verl <https://verl.readthedocs.io/en/latest/>`__)
|
||||
is a reinforcement learning framework designed for large language models (LLMs).
|
||||
verl offers a scalable, open-source fine-tuning solution by using a hybrid programming model
|
||||
that makes it easy to define and run complex post-training dataflows efficiently.
|
||||
|
||||
Its modular APIs separate computation from data, allowing smooth integration with other frameworks.
|
||||
It also supports flexible model placement across GPUs for efficient scaling on different cluster sizes.
|
||||
verl achieves high training and generation throughput by building on existing LLM frameworks.
|
||||
Its 3D-HybridEngine reduces memory use and communication overhead when switching between training
|
||||
and inference, improving overall performance.
|
||||
|
||||
Support overview
|
||||
================================================================================
|
||||
|
||||
- The ROCm-supported version of verl is maintained in the official `https://github.com/ROCm/verl
|
||||
<https://github.com/ROCm/verl>`__ repository, which differs from the
|
||||
`https://github.com/volcengine/verl <https://github.com/volcengine/verl>`__ upstream repository.
|
||||
|
||||
- To get started and install verl on ROCm, use the prebuilt :ref:`Docker image <verl-docker-compat>`,
|
||||
which includes ROCm, verl, and all required dependencies.
|
||||
|
||||
- See the :doc:`ROCm verl installation guide <rocm-install-on-linux:install/3rd-party/verl-install>`
|
||||
for installation and setup instructions.
|
||||
|
||||
- You can also consult the upstream `verl documentation <https://verl.readthedocs.io/en/latest/>`__
|
||||
for additional context.
|
||||
|
||||
.. _verl-docker-compat:
|
||||
|
||||
Compatibility matrix
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes `verl Docker images <https://hub.docker.com/r/rocm/verl/tags>`_
|
||||
with ROCm backends on Docker Hub. The following Docker image tag and associated inventories
|
||||
represent the latest verl version from the official Docker Hub.
|
||||
Click |docker-icon| to view the image on Docker Hub.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker image
|
||||
- ROCm
|
||||
- verl
|
||||
- Ubuntu
|
||||
- PyTorch
|
||||
- Python
|
||||
- vllm
|
||||
- GPU
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/verl/verl-0.6.0.amd0_rocm7.0_vllm0.11.0.dev/images/sha256-f70a3ebc94c1f66de42a2fcc3f8a6a8d6d0881eb0e65b6958d7d6d24b3eecb0d"><i class="fab fa-docker fa-lg"></i> rocm/verl</a>
|
||||
- `7.0.0 <https://repo.radeon.com/rocm/apt/7.0/>`__
|
||||
- `0.6.0 <https://github.com/volcengine/verl/releases/tag/v0.6.0>`__
|
||||
- 22.04
|
||||
- `2.9.0 <https://github.com/ROCm/pytorch/tree/release/2.9-rocm7.x-gfx115x>`__
|
||||
- `3.12.11 <https://www.python.org/downloads/release/python-31211/>`__
|
||||
- `0.11.0 <https://github.com/vllm-project/vllm/releases/tag/v0.11.0>`__
|
||||
- MI300X
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/verl/verl-0.3.0.post0_rocm6.2_vllm0.6.3/images/sha256-cbe423803fd7850448b22444176bee06f4dcf22cd3c94c27732752d3a39b04b2"><i class="fab fa-docker fa-lg"></i> rocm/verl</a>
|
||||
- `6.2.0 <https://repo.radeon.com/rocm/apt/6.2/>`__
|
||||
- `0.3.0.post0 <https://github.com/volcengine/verl/releases/tag/v0.3.0.post0>`__
|
||||
- 20.04
|
||||
- `2.5.0 <https://github.com/ROCm/pytorch/tree/release/2.5>`__
|
||||
- `3.9.19 <https://www.python.org/downloads/release/python-3919/>`__
|
||||
- `0.6.3 <https://github.com/vllm-project/vllm/releases/tag/v0.6.3>`__
|
||||
- MI300X
|
||||
|
||||
.. _verl-supported_features:
|
||||
|
||||
Supported modules with verl on ROCm
|
||||
===============================================================================
|
||||
|
||||
The following GPU-accelerated modules are supported with verl on ROCm:
|
||||
|
||||
- ``FSDP``: Training engine
|
||||
- ``vllm``: Inference engine
|
||||
|
||||
.. _verl-recommendations:
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* The benefits of verl in large-scale reinforcement learning from human feedback
|
||||
(RLHF) are discussed in the `Reinforcement Learning from Human Feedback on AMD
|
||||
GPUs with verl and ROCm Integration <https://rocm.blogs.amd.com/artificial-intelligence/verl-large-scale/README.html>`__
|
||||
blog. The blog post outlines how the Volcano Engine Reinforcement Learning
|
||||
(verl) framework integrates with the AMD ROCm platform to optimize training on
|
||||
AMD Instinct™ GPUs. The guide details the process of building a Docker image,
|
||||
setting up single-node and multi-node training environments, and highlights
|
||||
performance benchmarks demonstrating improved throughput and convergence accuracy.
|
||||
This resource serves as a comprehensive starting point for deploying verl on AMD GPUs,
|
||||
facilitating efficient RLHF training workflows.
|
||||
|
||||
Previous versions
|
||||
===============================================================================
|
||||
See :doc:`rocm-install-on-linux:install/3rd-party/previous-versions/verl-history` to find documentation for previous releases
|
||||
of the ``ROCm/verl`` Docker image.
|
||||
145
docs/conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst
Normal file
@@ -0,0 +1,145 @@
|
||||
===========================
|
||||
How ROCm uses PCIe atomics
|
||||
===========================
|
||||
|
||||
|
||||
ROCm PCIe feature and overview of BAR memory
|
||||
======================================================================
|
||||
|
||||
|
||||
ROCm is an extension of HSA platform architecture, so it shares the queueing model, memory model, signaling and synchronization protocols. Platform atomics are integral to perform queuing and signaling memory operations where there may be multiple-writers across CPU and GPU agents.
|
||||
|
||||
The full list of HSA system architecture platform requirements are here: `HSA Sys Arch Features <http://hsafoundation.com/wp-content/uploads/2021/02/HSA-SysArch-1.2.pdf>`_.
|
||||
|
||||
The ROCm Platform uses the new PCI Express 3.0 (PCIe 3.0) features for Atomic Read-Modify-Write Transactions which extends inter-processor synchronization mechanisms to IO to support the defined set of HSA capabilities needed for queuing and signaling memory operations.
|
||||
|
||||
The new PCIe AtomicOps operate as completers for ``CAS`` (Compare and Swap), ``FetchADD``, ``SWAP`` atomics. The AtomicsOps are initiated by the
|
||||
I/O device which support 32-bit, 64-bit and 128-bit operand which target address have to be naturally aligned to operation sizes.
|
||||
|
||||
For ROCm the Platform atomics are used in ROCm in the following ways:
|
||||
|
||||
* Update HSA queue’s read_dispatch_id: 64 bit atomic add used by the command processor on the GPU agent to update the packet ID it processed.
|
||||
* Update HSA queue’s write_dispatch_id: 64 bit atomic add used by the CPU and GPU agent to support multi-writer queue insertions.
|
||||
* Update HSA Signals – 64bit atomic ops are used for CPU & GPU synchronization.
|
||||
|
||||
The PCIe 3.0 AtomicOp feature allows atomic transactions to be requested by, routed through and completed by PCIe components. Routing and completion does not require software support. Component support for each is detectable via the DEVCAP2 register. Upstream bridges need to have AtomicOp routing enabled or the Atomic Operations will fail even though PCIe endpoint and PCIe I/O devices has the capability to Atomics Operations.
|
||||
|
||||
To do AtomicOp routing capability between two or more Root Ports, each associated Root Port must indicate that capability via the AtomicOp routing supported bit in the Device Capabilities 2 register.
|
||||
|
||||
If your system has a PCIe Express Switch it needs to support AtomicsOp routing. AtomicOp requests are permitted only if a component’s ``DEVCTL2.ATOMICOP_REQUESTER_ENABLE`` field is set. These requests can only be serviced if the upstream components support AtomicOp completion and/or routing to a component which does. AtomicOp Routing Support=1 Routing is supported, AtomicOp Routing Support=0 routing is not supported.
|
||||
|
||||
An atomic operation is a non-posted transaction supporting 32-bit and 64-bit address formats, there must be a response for Completion containing the result of the operation. Errors associated with the operation (uncorrectable error accessing the target location or carrying out the Atomic operation) are signaled to the requester by setting the Completion Status field in the completion descriptor, they are set to to Completer Abort (CA) or Unsupported Request (UR).
|
||||
|
||||
To understand more about how PCIe atomic operations work, see `PCIe atomics <https://pcisig.com/specifications/pciexpress/specifications/ECN_Atomic_Ops_080417.pdf>`_
|
||||
|
||||
`Linux Kernel Patch to pci_enable_atomic_request <https://patchwork.kernel.org/project/linux-pci/patch/1443110390-4080-1-git-send-email-jay@jcornwall.me/>`_
|
||||
|
||||
There are also a number of papers which talk about these new capabilities:
|
||||
|
||||
* `Atomic Read Modify Write Primitives by Intel <https://www.intel.es/content/dam/doc/white-paper/atomic-read-modify-write-primitives-i-o-devices-paper.pdf>`_
|
||||
* `PCI express 3 Accelerator White paper by Intel <https://www.intel.sg/content/dam/doc/white-paper/pci-express3-accelerator-white-paper.pdf>`_
|
||||
* `Intel PCIe Generation 3 Hotchips Paper <https://www.hotchips.org/wp-content/uploads/hc_archives/hc21/1_sun/HC21.23.1.SystemInterconnectTutorial-Epub/HC21.23.131.Ajanovic-Intel-PCIeGen3.pdf>`_
|
||||
* `PCIe Generation 4 Base Specification includes Atomics Operation <https://astralvx.com/storage/2020/11/PCI_Express_Base_4.0_Rev0.3_February19-2014.pdf>`_
|
||||
|
||||
Other I/O devices with PCIe atomics support
|
||||
|
||||
* `Mellanox ConnectX-5 InfiniBand Card <http://www.mellanox.com/related-docs/prod_adapter_cards/PB_ConnectX-5_VPI_Card.pdf>`_
|
||||
* `Cray Aries Interconnect <http://www.hoti.org/hoti20/slides/Bob_Alverson.pdf>`_
|
||||
* `Xilinx PCIe Ultrascale White paper <https://docs.xilinx.com/v/u/8OZSA2V1b1LLU2rRCDVGQw>`_
|
||||
* `Xilinx 7 Series Devices <https://docs.xilinx.com/v/u/1nfXeFNnGpA0ywyykvWHWQ>`_
|
||||
|
||||
Future bus technology with richer I/O atomics operation Support
|
||||
|
||||
* GenZ
|
||||
|
||||
New PCIe Endpoints with support beyond AMD Ryzen and EPYC CPU; Intel Haswell or newer CPU’s with PCIe Generation 3.0 support.
|
||||
|
||||
* `Mellanox Bluefield SOC <https://docs.nvidia.com/networking/display/BlueFieldSWv25111213/BlueField+Software+Overview>`_
|
||||
* `Cavium Thunder X2 <https://en.wikichip.org/wiki/cavium/thunderx2>`_
|
||||
|
||||
In ROCm, we also take advantage of PCIe ID based ordering technology for P2P when the GPU originates two writes to two different targets:
|
||||
|
||||
| 1. write to another GPU memory,
|
||||
|
||||
| 2. then write to system memory to indicate transfer complete.
|
||||
|
||||
They are routed off to different ends of the computer but we want to make sure the write to system memory to indicate transfer complete occurs AFTER P2P write to GPU has complete.
|
||||
|
||||
BAR memory overview
|
||||
***************************************************************************************************
|
||||
On a Xeon E5 based system in the BIOS we can turn on above 4GB PCIe addressing, if so he need to set MMIO Base address ( MMIOH Base) and Range ( MMIO High Size) in the BIOS.
|
||||
|
||||
In SuperMicro system in the system bios you need to see the following
|
||||
|
||||
* Advanced->PCIe/PCI/PnP configuration-> Above 4G Decoding = Enabled
|
||||
|
||||
* Advanced->PCIe/PCI/PnP Configuration->MMIOH Base = 512G
|
||||
|
||||
* Advanced->PCIe/PCI/PnP Configuration->MMIO High Size = 256G
|
||||
|
||||
When we support Large Bar Capability there is a Large Bar Vbios which also disable the IO bar.
|
||||
|
||||
For GFX9 and Vega10 which have Physical Address up 44 bit and 48 bit Virtual address.
|
||||
|
||||
* BAR0-1 registers: 64bit, prefetchable, GPU memory. 8GB or 16GB depending on Vega10 SKU. Must be placed < 2^44 to support P2P access from other Vega10.
|
||||
* BAR2-3 registers: 64bit, prefetchable, Doorbell. Must be placed < 2^44 to support P2P access from other Vega10.
|
||||
* BAR4 register: Optional, not a boot device.
|
||||
* BAR5 register: 32bit, non-prefetchable, MMIO. Must be placed < 4GB.
|
||||
|
||||
Here is how our base address register (BAR) works on GFX 8 GPU’s with 40 bit Physical Address Limit ::
|
||||
|
||||
11:00.0 Display controller: Advanced Micro Devices, Inc. [AMD/ATI] Fiji [Radeon R9 FURY / NANO Series] (rev c1)
|
||||
|
||||
Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device 0b35
|
||||
|
||||
Flags: bus master, fast devsel, latency 0, IRQ 119
|
||||
|
||||
Memory at bf40000000 (64-bit, prefetchable) [size=256M]
|
||||
|
||||
Memory at bf50000000 (64-bit, prefetchable) [size=2M]
|
||||
|
||||
I/O ports at 3000 [size=256]
|
||||
|
||||
Memory at c7400000 (32-bit, non-prefetchable) [size=256K]
|
||||
|
||||
Expansion ROM at c7440000 [disabled] [size=128K]
|
||||
|
||||
Legend:
|
||||
|
||||
1 : GPU Frame Buffer BAR – In this example it happens to be 256M, but typically this will be size of the GPU memory (typically 4GB+). This BAR has to be placed < 2^40 to allow peer-to-peer access from other GFX8 AMD GPUs. For GFX9 (Vega GPU) the BAR has to be placed < 2^44 to allow peer-to-peer access from other GFX9 AMD GPUs.
|
||||
|
||||
2 : Doorbell BAR – The size of the BAR is typically will be < 10MB (currently fixed at 2MB) for this generation GPUs. This BAR has to be placed < 2^40 to allow peer-to-peer access from other current generation AMD GPUs.
|
||||
|
||||
3 : IO BAR - This is for legacy VGA and boot device support, but since this the GPUs in this project are not VGA devices (headless), this is not a concern even if the SBIOS does not setup.
|
||||
|
||||
4 : MMIO BAR – This is required for the AMD Driver SW to access the configuration registers. Since the reminder of the BAR available is only 1 DWORD (32bit), this is placed < 4GB. This is fixed at 256KB.
|
||||
|
||||
5 : Expansion ROM – This is required for the AMD Driver SW to access the GPU’s video-bios. This is currently fixed at 128KB.
|
||||
|
||||
Excerpts from 'Overview of Changes to PCI Express 3.0'
|
||||
================================================================
|
||||
By Mike Jackson, Senior Staff Architect, MindShare, Inc.
|
||||
***************************************************************************************************
|
||||
Atomic operations – goal:
|
||||
***************************************************************************************************
|
||||
Support SMP-type operations across a PCIe network to allow for things like offloading tasks between CPU cores and accelerators like a GPU. The spec says this enables advanced synchronization mechanisms that are particularly useful with multiple producers or consumers that need to be synchronized in a non-blocking fashion. Three new atomic non-posted requests were added, plus the corresponding completion (the address must be naturally aligned with the operand size or the TLP is malformed):
|
||||
|
||||
* Fetch and Add – uses one operand as the “add” value. Reads the target location, adds the operand, and then writes the result back to the original location.
|
||||
|
||||
* Unconditional Swap – uses one operand as the “swap” value. Reads the target location and then writes the swap value to it.
|
||||
|
||||
* Compare and Swap – uses 2 operands: first data is compare value, second is swap value. Reads the target location, checks it against the compare value and, if equal, writes the swap value to the target location.
|
||||
|
||||
* AtomicOpCompletion – new completion to give the result so far atomic request and indicate that the atomicity of the transaction has been maintained.
|
||||
|
||||
Since atomic operations are not locked they don't have the performance downsides of the PCI locked protocol. Compared to locked cycles, they provide “lower latency, higher scalability, advanced synchronization algorithms, and dramatically lower impact on other PCIe traffic.” The lock mechanism can still be used across a bridge to PCI or PCI-X to achieve the desired operation.
|
||||
|
||||
Atomic operations can go from device to device, device to host, or host to device. Each completer indicates whether it supports this capability and guarantees atomic access if it does. The ability to route atomic operations is also indicated in the registers for a given port.
|
||||
|
||||
ID-based ordering – goal:
|
||||
***************************************************************************************************
|
||||
Improve performance by avoiding stalls caused by ordering rules. For example, posted writes are never normally allowed to pass each other in a queue, but if they are requested by different functions, we can have some confidence that the requests are not dependent on each other. The previously reserved Attribute bit [2] is now combined with the RO bit to indicate ID ordering with or without relaxed ordering.
|
||||
|
||||
This only has meaning for memory requests, and is reserved for Configuration or IO requests. Completers are not required to copy this bit into a completion, and only use the bit if their enable bit is set for this operation.
|
||||
|
||||
To read more on PCIe Gen 3 new options https://www.mindshare.com/files/resources/PCIe%203-0.pdf
|
||||
326
docs/conceptual/ai-migraphx-optimization.md
Normal file
@@ -0,0 +1,326 @@
|
||||
# Inference optimization with MIGraphX
|
||||
|
||||
The following sections cover inferencing and introduces [MIGraphX](https://rocm.docs.amd.com/projects/AMDMIGraphX/en/latest/).
|
||||
|
||||
## Inference
|
||||
|
||||
The inference is where capabilities learned during deep-learning training are put to work. It refers to using a fully trained neural network to make conclusions (predictions) on unseen data that the model has never interacted with before. Deep-learning inferencing is achieved by feeding new data, such as new images, to the network, giving the Deep Neural Network a chance to classify the image.
|
||||
|
||||
Taking our previous example of MNIST, the DNN can be fed new images of handwritten digit images, allowing the neural network to classify digits. A fully trained DNN should make accurate predictions about what an image represents, and inference cannot happen without training.
|
||||
|
||||
## MIGraphX introduction
|
||||
|
||||
MIGraphX is a graph compiler focused on accelerating the machine-learning inference that can target AMD GPUs and CPUs. MIGraphX accelerates the machine-learning models by leveraging several graph-level transformations and optimizations. These optimizations include:
|
||||
|
||||
* Operator fusion
|
||||
* Arithmetic simplifications
|
||||
* Dead-code elimination
|
||||
* Common subexpression elimination (CSE)
|
||||
* Constant propagation
|
||||
|
||||
After doing all these transformations, MIGraphX emits code for the AMD GPU by calling to MIOpen or rocBLAS or creating HIP kernels for a particular operator. MIGraphX can also target CPUs using DNNL or ZenDNN libraries.
|
||||
|
||||
MIGraphX provides easy-to-use APIs in C++ and Python to import machine models in ONNX or TensorFlow. Users can compile, save, load, and run these models using the MIGraphX C++ and Python APIs. Internally, MIGraphX parses ONNX or TensorFlow models into internal graph representation where each operator in the model gets mapped to an operator within MIGraphX. Each of these operators defines various attributes such as:
|
||||
|
||||
* Number of arguments
|
||||
* Type of arguments
|
||||
* Shape of arguments
|
||||
|
||||
After optimization passes, all these operators get mapped to different kernels on GPUs or CPUs.
|
||||
|
||||
After importing a model into MIGraphX, the model is represented as `migraphx::program`. `migraphx::program` is made up of `migraphx::module`. The program can consist of several modules, but it always has one main_module. Modules are made up of `migraphx::instruction_ref`. Instructions contain the `migraphx::op` and arguments to the operator.
|
||||
|
||||
## Installing MIGraphX
|
||||
|
||||
There are three options to get started with MIGraphX installation. MIGraphX depends on ROCm libraries; assume that the machine has ROCm installed.
|
||||
|
||||
### Option 1: installing binaries
|
||||
|
||||
To install MIGraphX on Debian-based systems like Ubuntu, use the following command:
|
||||
|
||||
```bash
|
||||
sudo apt update && sudo apt install -y migraphx
|
||||
```
|
||||
|
||||
The header files and libraries are installed under `/opt/rocm-\<version\>`, where \<version\> is the ROCm version.
|
||||
|
||||
### Option 2: building from source
|
||||
|
||||
There are two ways to build the MIGraphX sources.
|
||||
|
||||
* [Use the ROCm build tool](https://github.com/ROCmSoftwarePlatform/AMDMIGraphX#use-the-rocm-build-tool-rbuild) - This approach uses `[rbuild](https://github.com/RadeonOpenCompute/rbuild)` to install the prerequisites and build the libraries with just one command.
|
||||
|
||||
or
|
||||
|
||||
* [Use CMake](https://github.com/ROCmSoftwarePlatform/AMDMIGraphX#use-cmake-to-build-migraphx) - This approach uses a script to install the prerequisites, then uses CMake to build the source.
|
||||
|
||||
For detailed steps on building from source and installing dependencies, refer to the following `README` file:
|
||||
|
||||
[https://github.com/ROCmSoftwarePlatform/AMDMIGraphX#building-from-source](https://github.com/ROCmSoftwarePlatform/AMDMIGraphX#building-from-source)
|
||||
|
||||
### Option 3: use docker
|
||||
|
||||
To use Docker, follow these steps:
|
||||
|
||||
1. The easiest way to set up the development environment is to use Docker. To build Docker from scratch, first clone the MIGraphX repository by running:
|
||||
|
||||
```bash
|
||||
git clone --recursive https://github.com/ROCmSoftwarePlatform/AMDMIGraphX
|
||||
```
|
||||
|
||||
2. The repository contains a Dockerfile from which you can build a Docker image as:
|
||||
|
||||
```bash
|
||||
docker build -t migraphx .
|
||||
```
|
||||
|
||||
3. Then to enter the development environment, use Docker run:
|
||||
|
||||
```bash
|
||||
docker run --device='/dev/kfd' --device='/dev/dri' -v=`pwd`:/code/AMDMIGraphX -w /code/AMDMIGraphX --group-add video -it migraphx
|
||||
```
|
||||
|
||||
The Docker image contains all the prerequisites required for the installation, so users can go to the folder `/code/AMDMIGraphX` and follow the steps mentioned in [Option 2: Building from Source](#option-2-building-from-source).
|
||||
|
||||
## MIGraphX example
|
||||
|
||||
MIGraphX provides both C++ and Python APIs. The following sections show examples of both using the Inception v3 model. To walk through the examples, fetch the Inception v3 ONNX model by running the following:
|
||||
|
||||
```py
|
||||
import torch
|
||||
import torchvision.models as models
|
||||
inception = models.inception_v3(pretrained=True)
|
||||
torch.onnx.export(inception,torch.randn(1,3,299,299), "inceptioni1.onnx")
|
||||
```
|
||||
|
||||
This will create `inceptioni1.onnx`, which can be imported in MIGraphX using C++ or Python API.
|
||||
|
||||
### MIGraphX Python API
|
||||
|
||||
Follow these steps:
|
||||
|
||||
1. To import the MIGraphX module in Python script, set `PYTHONPATH` to the MIGraphX libraries installation. If binaries are installed using steps mentioned in [Option 1: Installing Binaries](#option-1-installing-binaries), perform the following action:
|
||||
|
||||
```bash
|
||||
export PYTHONPATH=$PYTHONPATH:/opt/rocm/
|
||||
```
|
||||
|
||||
2. The following script shows the usage of Python API to import the ONNX model, compile it, and run inference on it. Set `LD_LIBRARY_PATH` to `/opt/rocm/` if required.
|
||||
|
||||
```py
|
||||
# import migraphx and numpy
|
||||
import migraphx
|
||||
import numpy as np
|
||||
# import and parse inception model
|
||||
model = migraphx.parse_onnx("inceptioni1.onnx")
|
||||
# compile model for the GPU target
|
||||
model.compile(migraphx.get_target("gpu"))
|
||||
# optionally print compiled model
|
||||
model.print()
|
||||
# create random input image
|
||||
input_image = np.random.rand(1, 3, 299, 299).astype('float32')
|
||||
# feed image to model, 'x.1` is the input param name
|
||||
results = model.run({'x.1': input_image})
|
||||
# get the results back
|
||||
result_np = np.array(results[0])
|
||||
# print the inferred class of the input image
|
||||
print(np.argmax(result_np))
|
||||
```
|
||||
|
||||
Find additional examples of Python API in the `/examples` directory of the MIGraphX repository.
|
||||
|
||||
## MIGraphX C++ API
|
||||
|
||||
Follow these steps:
|
||||
|
||||
1. The following is a minimalist example that shows the usage of MIGraphX C++ API to load ONNX file, compile it for the GPU, and run inference on it. To use MIGraphX C++ API, you only need to load the `migraphx.hpp` file. This example runs inference on the Inception v3 model.
|
||||
|
||||
```c++
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include <ctime>
|
||||
#include <random>
|
||||
#include <migraphx/migraphx.hpp>
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
migraphx::program prog;
|
||||
migraphx::onnx_options onnx_opts;
|
||||
// import and parse onnx file into migraphx::program
|
||||
prog = parse_onnx("inceptioni1.onnx", onnx_opts);
|
||||
// print imported model
|
||||
prog.print();
|
||||
migraphx::target targ = migraphx::target("gpu");
|
||||
migraphx::compile_options comp_opts;
|
||||
comp_opts.set_offload_copy();
|
||||
// compile for the GPU
|
||||
prog.compile(targ, comp_opts);
|
||||
// print the compiled program
|
||||
prog.print();
|
||||
// randomly generate input image
|
||||
// of shape (1, 3, 299, 299)
|
||||
std::srand(unsigned(std::time(nullptr)));
|
||||
std::vector<float> input_image(1*299*299*3);
|
||||
std::generate(input_image.begin(), input_image.end(), std::rand);
|
||||
// users need to provide data for the input
|
||||
// parameters in order to run inference
|
||||
// you can query into migraph program for the parameters
|
||||
migraphx::program_parameters prog_params;
|
||||
auto param_shapes = prog.get_parameter_shapes();
|
||||
auto input = param_shapes.names().front();
|
||||
// create argument for the parameter
|
||||
prog_params.add(input, migraphx::argument(param_shapes[input], input_image.data()));
|
||||
// run inference
|
||||
auto outputs = prog.eval(prog_params);
|
||||
// read back the output
|
||||
float* results = reinterpret_cast<float*>(outputs[0].data());
|
||||
float* max = std::max_element(results, results + 1000);
|
||||
int answer = max - results;
|
||||
std::cout << "answer: " << answer << std::endl;
|
||||
}
|
||||
```
|
||||
|
||||
2. To compile this program, you can use CMake and you only need to link the `migraphx::c` library to use MIGraphX's C++ API. The following is the `CMakeLists.txt` file that can build the earlier example:
|
||||
|
||||
```cmake
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
project (CAI)
|
||||
|
||||
set (CMAKE_CXX_STANDARD 14)
|
||||
set (EXAMPLE inception_inference)
|
||||
|
||||
list (APPEND CMAKE_PREFIX_PATH /opt/rocm/hip /opt/rocm)
|
||||
find_package (migraphx)
|
||||
|
||||
message("source file: " ${EXAMPLE}.cpp " ---> bin: " ${EXAMPLE})
|
||||
add_executable(${EXAMPLE} ${EXAMPLE}.cpp)
|
||||
|
||||
target_link_libraries(${EXAMPLE} migraphx::c)
|
||||
```
|
||||
|
||||
3. To build the executable file, run the following from the directory containing the `inception_inference.cpp` file:
|
||||
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$(nproc)
|
||||
./inception_inference
|
||||
```
|
||||
|
||||
```{note}
|
||||
Set `LD_LIBRARY_PATH` to `/opt/rocm/lib` if required during the build. Additional examples can be found in the MIGraphX repository under the `/examples/` directory.
|
||||
```
|
||||
|
||||
## Tuning MIGraphX
|
||||
|
||||
MIGraphX uses MIOpen kernels to target AMD GPU. For the model compiled with MIGraphX, tune MIOpen to pick the best possible kernel implementation. The MIOpen tuning results in a significant performance boost. Tuning can be done by setting the environment variable `MIOPEN_FIND_ENFORCE=3`.
|
||||
|
||||
```{note}
|
||||
The tuning process can take a long time to finish.
|
||||
```
|
||||
|
||||
**Example:** The average inference time of the inception model example shown previously over 100 iterations using untuned kernels is 0.01383ms. After tuning, it reduces to 0.00459ms, which is a 3x improvement. This result is from ROCm v4.5 on a MI100 GPU.
|
||||
|
||||
```{note}
|
||||
The results may vary depending on the system configurations.
|
||||
```
|
||||
|
||||
For reference, the following code snippet shows inference runs for only the first 10 iterations for both tuned and untuned kernels:
|
||||
|
||||
```console
|
||||
### UNTUNED ###
|
||||
iterator : 0
|
||||
Inference complete
|
||||
Inference time: 0.063ms
|
||||
iterator : 1
|
||||
Inference complete
|
||||
Inference time: 0.008ms
|
||||
iterator : 2
|
||||
Inference complete
|
||||
Inference time: 0.007ms
|
||||
iterator : 3
|
||||
Inference complete
|
||||
Inference time: 0.007ms
|
||||
iterator : 4
|
||||
Inference complete
|
||||
Inference time: 0.007ms
|
||||
iterator : 5
|
||||
Inference complete
|
||||
Inference time: 0.008ms
|
||||
iterator : 6
|
||||
Inference complete
|
||||
Inference time: 0.007ms
|
||||
iterator : 7
|
||||
Inference complete
|
||||
Inference time: 0.028ms
|
||||
iterator : 8
|
||||
Inference complete
|
||||
Inference time: 0.029ms
|
||||
iterator : 9
|
||||
Inference complete
|
||||
Inference time: 0.029ms
|
||||
|
||||
### TUNED ###
|
||||
iterator : 0
|
||||
Inference complete
|
||||
Inference time: 0.063ms
|
||||
iterator : 1
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 2
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 3
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 4
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 5
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 6
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 7
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 8
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
iterator : 9
|
||||
Inference complete
|
||||
Inference time: 0.004ms
|
||||
```
|
||||
|
||||
### YModel
|
||||
|
||||
The best inference performance through MIGraphX is conditioned upon having tuned kernel configurations stored in a `/home` local User Database (DB). If a user were to move their model to a different server or allow a different user to use it, they would have to run through the MIOpen tuning process again to populate the next User DB with the best kernel configurations and corresponding solvers.
|
||||
|
||||
Tuning is time consuming, and if the users have not performed tuning, they would see discrepancies between expected or claimed inference performance and actual inference performance. This has led to repetitive and time-consuming tuning tasks for each user.
|
||||
|
||||
MIGraphX introduces a feature, known as YModel, that stores the kernel config parameters found during tuning into a `.mxr` file. This ensures the same level of expected performance, even when a model is copied to a different user/system.
|
||||
|
||||
The YModel feature is available starting from ROCm 5.4.1 and UIF 1.1.
|
||||
|
||||
#### YModel example
|
||||
|
||||
Through the `migraphx-driver` functionality, you can generate `.mxr` files with tuning information stored inside it by passing additional `--binary --output model.mxr` to `migraphx-driver` along with the rest of the necessary flags.
|
||||
|
||||
For example, to generate `.mxr` file from the ONNX model, use the following:
|
||||
|
||||
```bash
|
||||
./path/to/migraphx-driver compile --onnx resnet50.onnx --enable-offload-copy --binary --output resnet50.mxr
|
||||
```
|
||||
|
||||
To run generated `.mxr` files through `migraphx-driver`, use the following:
|
||||
|
||||
```bash
|
||||
./path/to/migraphx-driver run --migraphx resnet50.mxr --enable-offload-copy
|
||||
```
|
||||
|
||||
Alternatively, you can use the MIGraphX C++ or Python API to generate `.mxr` files.
|
||||
|
||||

|
||||
@@ -1,10 +1,3 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="Inception V3 with PyTorch">
|
||||
<meta name="keywords" content="PyTorch, Inception V3, deep-learning, training data, optimization
|
||||
algorithm, AMD, ROCm">
|
||||
</head>
|
||||
|
||||
# Deep learning: Inception V3 with PyTorch
|
||||
|
||||
## Deep learning training
|
||||
@@ -22,7 +15,6 @@ Training occurs in multiple phases for every batch of training data. the followi
|
||||
:::{table} Types of Training Phases
|
||||
:name: training-phases
|
||||
:widths: auto
|
||||
|
||||
| Types of Phases | |
|
||||
| ----------------- | --- |
|
||||
| Forward Pass | The input features are fed into the model, whose parameters may be randomly initialized initially. Activations (outputs) of each layer are retained during this pass to help in the loss gradient computation during the backward pass. |
|
||||
@@ -36,7 +28,6 @@ Training is different from inference, particularly from the hardware perspective
|
||||
:::{table} Training vs. Inference
|
||||
:name: training-inference
|
||||
:widths: auto
|
||||
|
||||
| Training | Inference |
|
||||
| ----------- | ----------- |
|
||||
| Training is measured in hours/days. | The inference is measured in minutes. |
|
||||
@@ -45,7 +36,7 @@ Training is different from inference, particularly from the hardware perspective
|
||||
| Data for training is available on the disk before the training process and is generally significant. The training performance is measured by how fast the data batches can be processed. | Inference data usually arrive stochastically, which may be batched to improve performance. Inference performance is generally measured in throughput speed to process the batch of data and the delay in responding to the input (latency). |
|
||||
:::
|
||||
|
||||
Different quantization data types are typically chosen between training (FP32, BF16) and inference (FP16, INT8). The computation hardware has different specializations from other data types, leading to improvement in performance if a faster datatype can be selected for the corresponding task.
|
||||
Different quantization data types are typically chosen between training (FP32, BF16) and inference (FP16, INT8). The computation hardware has different specializations from other datatypes, leading to improvement in performance if a faster datatype can be selected for the corresponding task.
|
||||
|
||||
## Case studies
|
||||
|
||||
@@ -53,7 +44,7 @@ The following sections contain case studies for the Inception V3 model.
|
||||
|
||||
### Inception V3 with PyTorch
|
||||
|
||||
Convolution Neural Networks are forms of artificial neural networks commonly used for image processing. One of the core layers of such a network is the convolutional layer, which convolves the input with a weight tensor and passes the result to the next layer. Inception V3 is an architectural development over the ImageNet competition-winning entry, AlexNet, using more profound and broader networks while attempting to meet computational and memory budgets.
|
||||
Convolution Neural Networks are forms of artificial neural networks commonly used for image processing. One of the core layers of such a network is the convolutional layer, which convolves the input with a weight tensor and passes the result to the next layer. Inception V3[^inception_arch] is an architectural development over the ImageNet competition-winning entry, AlexNet, using more profound and broader networks while attempting to meet computational and memory budgets.
|
||||
|
||||
The implementation uses PyTorch as a framework. This case study utilizes [TorchVision](https://pytorch.org/vision/stable/index.html), a repository of popular datasets and model architectures, for obtaining the model. TorchVision also provides pre-trained weights as a starting point to develop new models or fine-tune the model for a new task.
|
||||
|
||||
@@ -65,7 +56,7 @@ This example is adapted from the PyTorch research hub page on [Inception V3](htt
|
||||
|
||||
Follow these steps:
|
||||
|
||||
1. Run the PyTorch ROCm-based Docker image or refer to the section {doc}`Installing PyTorch <rocm-install-on-linux:install/3rd-party/pytorch-install>` for setting up a PyTorch environment on ROCm.
|
||||
1. Run the PyTorch ROCm-based Docker image or refer to the section [Installing PyTorch](../install/pytorch-install.md) for setting up a PyTorch environment on ROCm.
|
||||
|
||||
```dockerfile
|
||||
docker run -it -v $HOME:/data --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 8G rocm/pytorch:latest
|
||||
@@ -155,14 +146,14 @@ The previous section focused on downloading and using the Inception V3 model for
|
||||
|
||||
Follow these steps:
|
||||
|
||||
1. Run the PyTorch ROCm Docker image or refer to the section {doc}`Installing PyTorch <rocm-install-on-linux:install/3rd-party/pytorch-install>` for setting up a PyTorch environment on ROCm.
|
||||
1. Run the PyTorch ROCm Docker image or refer to the section [Installing PyTorch](../install/pytorch-install.md) for setting up a PyTorch environment on ROCm.
|
||||
|
||||
```dockerfile
|
||||
docker pull rocm/pytorch:latest
|
||||
docker run -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 8G rocm/pytorch:latest
|
||||
```
|
||||
|
||||
2. Download an ImageNet database. For this example, the `tiny-imagenet-200`, a smaller ImageNet variant with 200 image classes and a training dataset with 100,000 images, was downsized to 64x64 color images.
|
||||
2. Download an ImageNet database. For this example, the `tiny-imagenet-200`[^Stanford_deep_learning], a smaller ImageNet variant with 200 image classes and a training dataset with 100,000 images, was downsized to 64x64 color images.
|
||||
|
||||
```bash
|
||||
wget http://cs231n.stanford.edu/tiny-imagenet-200.zip
|
||||
@@ -217,9 +208,9 @@ Follow these steps:
|
||||
|
||||
7. Set parameters to guide the training process.
|
||||
|
||||
:::{note}
|
||||
```{note}
|
||||
The device is set to `"cuda"`. In PyTorch, `"cuda"` is a generic keyword to denote a GPU.
|
||||
:::
|
||||
```
|
||||
|
||||
```py
|
||||
device = "cuda"
|
||||
@@ -279,9 +270,9 @@ Follow these steps:
|
||||
lr_gamma = 0.1
|
||||
```
|
||||
|
||||
:::{note}
|
||||
```{note}
|
||||
One training epoch is when the neural network passes an entire dataset forward and backward.
|
||||
:::
|
||||
```
|
||||
|
||||
```py
|
||||
epochs = 90
|
||||
@@ -342,9 +333,9 @@ Follow these steps:
|
||||
)
|
||||
```
|
||||
|
||||
:::{note}
|
||||
```{note}
|
||||
Use torchvision to obtain the Inception V3 model. Use the pre-trained model weights to speed up training.
|
||||
:::
|
||||
```
|
||||
|
||||
```py
|
||||
print("Creating model")
|
||||
@@ -366,7 +357,7 @@ Follow these steps:
|
||||
model.to(device)
|
||||
```
|
||||
|
||||
13. Set the loss criteria. For this example, Cross Entropy Loss is used.
|
||||
13. Set the loss criteria. For this example, Cross Entropy Loss[^cross_entropy] is used.
|
||||
|
||||
```py
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
@@ -586,7 +577,7 @@ Follow these steps:
|
||||
import torch.optim as optim
|
||||
```
|
||||
|
||||
10. Set the loss criteria. For this example, Cross Entropy Loss is used.
|
||||
10. Set the loss criteria. For this example, Cross Entropy Loss[^cross_entropy] is used.
|
||||
|
||||
```py
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
@@ -681,7 +672,7 @@ The dataset has 60,000 images you will use to train the network and 10,000 to ev
|
||||
|
||||
Access the source code from the following repository:
|
||||
|
||||
[https://github.com/ROCm/tensorflow_fashionmnist/blob/main/fashion_mnist.py](https://github.com/ROCm/tensorflow_fashionmnist/blob/main/fashion_mnist.py)
|
||||
[https://github.com/ROCmSoftwarePlatform/tensorflow_fashionmnist/blob/main/fashion_mnist.py](https://github.com/ROCmSoftwarePlatform/tensorflow_fashionmnist/blob/main/fashion_mnist.py)
|
||||
|
||||
To understand the code step by step, follow these steps:
|
||||
|
||||
@@ -878,7 +869,7 @@ To understand the code step by step, follow these steps:
|
||||
thisplot[true_label].set_color('blue')
|
||||
```
|
||||
|
||||
9. With the model trained, you can use it to make predictions about some images. Review the 0<sup>th</sup> image predictions and the prediction array. Correct prediction labels are blue, and incorrect prediction labels are red. The number gives the percentage (out of 100) for the predicted label.
|
||||
9. With the model trained, you can use it to make predictions about some images. Review the 0-th image predictions and the prediction array. Correct prediction labels are blue, and incorrect prediction labels are red. The number gives the percentage (out of 100) for the predicted label.
|
||||
|
||||
```py
|
||||
i = 0
|
||||
@@ -1164,10 +1155,9 @@ To prepare the data for training, follow these steps:
|
||||
print("Accuracy: ", accuracy)
|
||||
```
|
||||
|
||||
:::{note}
|
||||
`model.fit()` returns a History object that contains a dictionary with everything that happened during
|
||||
training.
|
||||
:::
|
||||
```{note}
|
||||
model.fit() returns a History object that contains a dictionary with everything that happened during training.
|
||||
```
|
||||
|
||||
```py
|
||||
history_dict = history.history
|
||||
|
||||
@@ -1,40 +1,34 @@
|
||||
.. meta::
|
||||
:description: Using CMake
|
||||
:keywords: CMake, dependencies, HIP, C++, AMD, ROCm
|
||||
|
||||
*********************************
|
||||
***********
|
||||
Using CMake
|
||||
*********************************
|
||||
***********
|
||||
|
||||
Most components in ROCm support CMake. Projects depending on header-only or
|
||||
library components typically require CMake 3.5 or higher whereas those wanting
|
||||
to make use of the CMake HIP language support will require CMake 3.21 or higher.
|
||||
to make use of CMake's HIP language support will require CMake 3.21 or higher.
|
||||
|
||||
Finding dependencies
|
||||
====================
|
||||
|
||||
.. note::
|
||||
|
||||
For a complete
|
||||
reference on how to deal with dependencies in CMake, refer to the CMake docs
|
||||
on `find_package
|
||||
<https://cmake.org/cmake/help/latest/command/find_package.html>`_ and the
|
||||
`Using Dependencies Guide
|
||||
<https://cmake.org/cmake/help/latest/guide/using-dependencies/index.html>`_
|
||||
to get an overview of CMake related facilities.
|
||||
For a complete
|
||||
reference on how to deal with dependencies in CMake, refer to the CMake docs
|
||||
on `find_package
|
||||
<https://cmake.org/cmake/help/latest/command/find_package.html>`_ and the
|
||||
`Using Dependencies Guide
|
||||
<https://cmake.org/cmake/help/latest/guide/using-dependencies/index.html>`_
|
||||
to get an overview of CMake's related facilities.
|
||||
|
||||
In short, CMake supports finding dependencies in two ways:
|
||||
|
||||
* In Module mode, it consults a file ``Find<PackageName>.cmake`` which tries to find the component
|
||||
in typical install locations and layouts. CMake ships a few dozen such scripts, but users and projects
|
||||
may ship them as well.
|
||||
|
||||
* In Config mode, it locates a file named ``<packagename>-config.cmake`` or
|
||||
``<PackageName>Config.cmake`` which describes the installed component in all regards needed to
|
||||
consume it.
|
||||
* In Module mode, it consults a file ``Find<PackageName>.cmake`` which tries to
|
||||
find the component in typical install locations and layouts. CMake ships a
|
||||
few dozen such scripts, but users and projects may ship them as well.
|
||||
* In Config mode, it locates a file named ``<packagename>-config.cmake`` or
|
||||
``<PackageName>Config.cmake`` which describes the installed component in all
|
||||
regards needed to consume it.
|
||||
|
||||
ROCm predominantly relies on Config mode, one notable exception being the Module
|
||||
driving the compilation of HIP programs on NVIDIA runtimes. As such, when
|
||||
driving the compilation of HIP programs on Nvidia runtimes. As such, when
|
||||
dependencies are not found in standard system locations, one either has to
|
||||
instruct CMake to search for package config files in additional folders using
|
||||
the ``CMAKE_PREFIX_PATH`` variable (a semi-colon separated list of file system
|
||||
@@ -46,9 +40,9 @@ it to your CMake configuration command on the command line via
|
||||
``-D CMAKE_PREFIX_PATH=....`` . AMD packaged ROCm installs can typically be
|
||||
added to the config file search paths such as:
|
||||
|
||||
* Windows: ``-D CMAKE_PREFIX_PATH=${env:HIP_PATH}``
|
||||
- Windows: ``-D CMAKE_PREFIX_PATH=${env:HIP_PATH}``
|
||||
|
||||
* Linux: ``-D CMAKE_PREFIX_PATH=/opt/rocm``
|
||||
- Linux: ``-D CMAKE_PREFIX_PATH=/opt/rocm``
|
||||
|
||||
ROCm provides the respective *config-file* packages, and this enables
|
||||
``find_package`` to be used directly. ROCm does not require any Find module as
|
||||
@@ -56,16 +50,14 @@ the *config-file* packages are shipped with the upstream projects, such as
|
||||
rocPRIM and other ROCm libraries.
|
||||
|
||||
For a complete guide on where and how ROCm may be installed on a system, refer
|
||||
to the installation guides for
|
||||
`Linux <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html>`_
|
||||
and
|
||||
`Windows <https://rocm.docs.amd.com/projects/install-on-windows/en/latest/index.html>`_.
|
||||
to the installation guides for `Linux <../install/linux/install.html>`_ and
|
||||
`Windows <../install/windows/install.html>`_.
|
||||
|
||||
Using HIP in CMake
|
||||
==================
|
||||
|
||||
ROCm components providing a C/C++ interface support consumption via any
|
||||
C/C++ toolchain that CMake knows how to drive. ROCm also supports the CMake HIP
|
||||
C/C++ toolchain that CMake knows how to drive. ROCm also supports CMake's HIP
|
||||
language features, allowing users to program using the HIP single-source
|
||||
programming model. When a program (or translation-unit) uses the HIP API without
|
||||
compiling any GPU device code, HIP can be treated in CMake as a simple C/C++
|
||||
@@ -78,22 +70,22 @@ Source code written in the HIP dialect of C++ typically uses the `.hip`
|
||||
extension. When the HIP CMake language is enabled, it will automatically
|
||||
associate such source files with the HIP toolchain being used.
|
||||
|
||||
.. code-block:: cmake
|
||||
::
|
||||
|
||||
cmake_minimum_required(VERSION 3.21) # HIP language support requires 3.21
|
||||
cmake_policy(VERSION 3.21.3...3.27)
|
||||
project(MyProj LANGUAGES HIP)
|
||||
add_executable(MyApp Main.hip)
|
||||
cmake_minimum_required(VERSION 3.21) # HIP language support requires 3.21
|
||||
cmake_policy(VERSION 3.21.3...3.27)
|
||||
project(MyProj LANGUAGES HIP)
|
||||
add_executable(MyApp Main.hip)
|
||||
|
||||
Should you have existing CUDA code that is from the source compatible subset of
|
||||
HIP, you can tell CMake that despite their `.cu` extension, they're HIP sources.
|
||||
Do note that this mostly facilitates compiling kernel code-only source files,
|
||||
as host-side CUDA API won't compile in this fashion.
|
||||
|
||||
.. code-block:: cmake
|
||||
::
|
||||
|
||||
add_library(MyLib MyLib.cu)
|
||||
set_source_files_properties(MyLib.cu PROPERTIES LANGUAGE HIP)
|
||||
add_library(MyLib MyLib.cu)
|
||||
set_source_files_properties(MyLib.cu PROPERTIES LANGUAGE HIP)
|
||||
|
||||
CMake itself only hosts part of the HIP language support, such as defining
|
||||
HIP-specific properties, etc. while the other half ships with the HIP
|
||||
@@ -105,10 +97,6 @@ there's a catch-all, last resort variable consulted locating this file,
|
||||
``-D CMAKE_HIP_COMPILER_ROCM_ROOT:PATH=`` which should be set the root of the
|
||||
ROCm installation.
|
||||
|
||||
.. note::
|
||||
Imported targets defined by `hip-lang-config.cmake` are for internal use
|
||||
only.
|
||||
|
||||
If the user doesn't provide a semi-colon delimited list of device architectures
|
||||
via ``CMAKE_HIP_ARCHITECTURES``, CMake will select some sensible default. It is
|
||||
advised though that if a user knows what devices they wish to target, then set
|
||||
@@ -122,57 +110,45 @@ Illustrated in the example below is a C++ application using MIOpen from CMake.
|
||||
It calls ``find_package(miopen)``, which provides the ``MIOpen`` imported
|
||||
target. This can be linked with ``target_link_libraries``
|
||||
|
||||
.. code-block:: cmake
|
||||
::
|
||||
|
||||
cmake_minimum_required(VERSION 3.5) # find_package(miopen) requires 3.5
|
||||
cmake_policy(VERSION 3.5...3.27)
|
||||
project(MyProj LANGUAGES CXX)
|
||||
find_package(miopen)
|
||||
add_library(MyLib ...)
|
||||
target_link_libraries(MyLib PUBLIC MIOpen)
|
||||
cmake_minimum_required(VERSION 3.5) # find_package(miopen) requires 3.5
|
||||
cmake_policy(VERSION 3.5...3.27)
|
||||
project(MyProj LANGUAGES CXX)
|
||||
find_package(miopen)
|
||||
add_library(MyLib ...)
|
||||
target_link_libraries(MyLib PUBLIC MIOpen)
|
||||
|
||||
.. note::
|
||||
|
||||
Most libraries are designed as host-only API, so using a GPU device
|
||||
compiler is not necessary for downstream projects unless they use GPU device
|
||||
code.
|
||||
Most libraries are designed as host-only API, so using a GPU device
|
||||
compiler is not necessary for downstream projects unless they use GPU device
|
||||
code.
|
||||
|
||||
Consuming the HIP API in C++ code
|
||||
---------------------------------
|
||||
|
||||
Consuming the HIP API without compiling single-source GPU device code can be
|
||||
done using any C++ compiler. The ``find_package(hip)`` provides the
|
||||
``hip::host`` imported target to use HIP in this scenario.
|
||||
Use the HIP API without compiling the GPU device code. As there is no GPU code,
|
||||
any C or C++ compiler can be used. The ``find_package(hip)`` provides the
|
||||
``hip::host`` imported target to use HIP in this context.
|
||||
|
||||
.. code-block:: cmake
|
||||
::
|
||||
|
||||
cmake_minimum_required(VERSION 3.5) # find_package(hip) requires 3.5
|
||||
cmake_policy(VERSION 3.5...3.27)
|
||||
project(MyProj LANGUAGES CXX)
|
||||
find_package(hip REQUIRED)
|
||||
add_executable(MyApp ...)
|
||||
target_link_libraries(MyApp PRIVATE hip::host)
|
||||
|
||||
When mixing such ``CXX`` sources with ``HIP`` sources holding device-code, link
|
||||
only to `hip::host`. If HIP sources don't have `.hip` as their extension, use
|
||||
`set_source_files_properties(<hip_sources>... PROPERTIES LANGUAGE HIP)` on them.
|
||||
Linking to `hip::host` will set all the necessary flags for the ``CXX`` sources
|
||||
while ``HIP`` sources inherit all flags from the built-in language support.
|
||||
Having HIP sources in a target will turn the |LINK_LANG|_ into ``HIP``.
|
||||
|
||||
.. |LINK_LANG| replace:: ``LINKER_LANGUAGE``
|
||||
.. _LINK_LANG: https://cmake.org/cmake/help/latest/prop_tgt/LINKER_LANGUAGE.html
|
||||
cmake_minimum_required(VERSION 3.5) # find_package(hip) requires 3.5
|
||||
cmake_policy(VERSION 3.5...3.27)
|
||||
project(MyProj LANGUAGES CXX)
|
||||
find_package(hip REQUIRED)
|
||||
add_executable(MyApp ...)
|
||||
target_link_libraries(MyApp PRIVATE hip::host)
|
||||
|
||||
Compiling device code in C++ language mode
|
||||
------------------------------------------
|
||||
|
||||
.. attention::
|
||||
|
||||
The workflow detailed here is considered legacy and is shown for
|
||||
understanding's sake. It pre-dates the existence of HIP language support in
|
||||
CMake. If source code has HIP device code in it, it is a HIP source file
|
||||
and should be compiled as such. Only resort to the method below if your
|
||||
HIP-enabled CMake code path can't mandate CMake version 3.21.
|
||||
The workflow detailed here is considered legacy and is shown for
|
||||
understanding's sake. It pre-dates the existence of HIP language support in
|
||||
CMake. If source code has HIP device code in it, it is a HIP source file
|
||||
and should be compiled as such. Only resort to the method below if your
|
||||
HIP-enabled CMake codepath can't mandate CMake version 3.21.
|
||||
|
||||
If code uses the HIP API and compiles GPU device code, it requires using a
|
||||
device compiler. The compiler for CMake can be set using either the
|
||||
@@ -184,34 +160,34 @@ compiler that supports AMD GPU targets, which is usually Clang.
|
||||
The ``find_package(hip)`` provides the ``hip::device`` imported target to add
|
||||
all the flags necessary for device compilation.
|
||||
|
||||
.. code-block:: cmake
|
||||
::
|
||||
|
||||
cmake_minimum_required(VERSION 3.8) # cxx_std_11 requires 3.8
|
||||
cmake_policy(VERSION 3.8...3.27)
|
||||
project(MyProj LANGUAGES CXX)
|
||||
find_package(hip REQUIRED)
|
||||
add_library(MyLib ...)
|
||||
target_link_libraries(MyLib PRIVATE hip::device)
|
||||
target_compile_features(MyLib PRIVATE cxx_std_11)
|
||||
cmake_minimum_required(VERSION 3.8) # cxx_std_11 requires 3.8
|
||||
cmake_policy(VERSION 3.8...3.27)
|
||||
project(MyProj LANGUAGES CXX)
|
||||
find_package(hip REQUIRED)
|
||||
add_library(MyLib ...)
|
||||
target_link_libraries(MyLib PRIVATE hip::device)
|
||||
target_compile_features(MyLib PRIVATE cxx_std_11)
|
||||
|
||||
.. note::
|
||||
Compiling for the GPU device requires at least C++11.
|
||||
|
||||
Compiling for the GPU device requires at least C++11.
|
||||
This project can then be configured with for eg.
|
||||
|
||||
This project can then be configured with the following CMake commands:
|
||||
- Windows: ``cmake -D CMAKE_CXX_COMPILER:PATH=${env:HIP_PATH}\bin\clang++.exe``
|
||||
|
||||
* Windows: ``cmake -D CMAKE_CXX_COMPILER:PATH=${env:HIP_PATH}\bin\clang++.exe``
|
||||
* Linux: ``cmake -D CMAKE_CXX_COMPILER:PATH=/opt/rocm/bin/amdclang++``
|
||||
- Linux: ``cmake -D CMAKE_CXX_COMPILER:PATH=/opt/rocm/bin/amdclang++``
|
||||
|
||||
Which use the device compiler provided from the binary packages of
|
||||
`ROCm HIP SDK <https://www.amd.com/en/developer/resources/rocm-hub/hip-sdk.html>`_ and
|
||||
`ROCm HIP SDK <https://www.amd.com/en/developer/rocm-hub.html>`_ and
|
||||
`repo.radeon.com <https://repo.radeon.com>`_ respectively.
|
||||
|
||||
When using the ``CXX`` language support to compile HIP device code, selecting the
|
||||
When using the CXX language support to compile HIP device code, selecting the
|
||||
target GPU architectures is done via setting the ``GPU_TARGETS`` variable.
|
||||
``CMAKE_HIP_ARCHITECTURES`` only exists when the HIP language is enabled. By
|
||||
default, this is set to some subset of the currently supported architectures of
|
||||
AMD ROCm. It can be set to the CMake option ``-D GPU_TARGETS="gfx1032;gfx1035"``.
|
||||
AMD ROCm. It can be set to eg. ``-D GPU_TARGETS="gfx1032;gfx1035"``.
|
||||
|
||||
ROCm CMake packages
|
||||
-------------------
|
||||
@@ -276,12 +252,13 @@ options.
|
||||
|
||||
IDEs supporting CMake (Visual Studio, Visual Studio Code, CLion, etc.) all came
|
||||
up with their own way to register command-line fragments of different purpose in
|
||||
a setup-and-forget fashion for quick assembly using graphical front-ends. This is
|
||||
a setup'n'forget fashion for quick assembly using graphical front-ends. This is
|
||||
all nice, but configurations aren't portable, nor can they be reused in
|
||||
Continuous Integration (CI) pipelines. CMake has condensed existing practice
|
||||
Continuous Intergration (CI) pipelines. CMake has condensed existing practice
|
||||
into a portable JSON format that works in all IDEs and can be invoked from any
|
||||
command line. This is
|
||||
`CMake Presets <https://cmake.org/cmake/help/latest/manual/cmake-presets.7.html>`_.
|
||||
`CMake Presets <https://cmake.org/cmake/help/latest/manual/cmake-presets.7.html>`_
|
||||
.
|
||||
|
||||
There are two types of preset files: one supplied by the project, called
|
||||
``CMakePresets.json`` which is meant to be committed to version control,
|
||||
@@ -298,110 +275,109 @@ Following is an example ``CMakeUserPresets.json`` file which actually compiles
|
||||
the `amd/rocm-examples <https://github.com/amd/rocm-examples>`_ suite of sample
|
||||
applications on a typical ROCm installation:
|
||||
|
||||
.. code-block:: json
|
||||
::
|
||||
|
||||
{
|
||||
"version": 3,
|
||||
"cmakeMinimumRequired": {
|
||||
"major": 3,
|
||||
"minor": 21,
|
||||
"patch": 0
|
||||
},
|
||||
"configurePresets": [
|
||||
{
|
||||
"name": "layout",
|
||||
"hidden": true,
|
||||
"binaryDir": "${sourceDir}/build/${presetName}",
|
||||
"installDir": "${sourceDir}/install/${presetName}"
|
||||
{
|
||||
"version": 3,
|
||||
"cmakeMinimumRequired": {
|
||||
"major": 3,
|
||||
"minor": 21,
|
||||
"patch": 0
|
||||
},
|
||||
{
|
||||
"name": "generator-ninja-multi-config",
|
||||
"hidden": true,
|
||||
"generator": "Ninja Multi-Config"
|
||||
},
|
||||
{
|
||||
"name": "toolchain-makefiles-c/c++-amdclang",
|
||||
"hidden": true,
|
||||
"cacheVariables": {
|
||||
"CMAKE_C_COMPILER": "/opt/rocm/bin/amdclang",
|
||||
"CMAKE_CXX_COMPILER": "/opt/rocm/bin/amdclang++",
|
||||
"CMAKE_HIP_COMPILER": "/opt/rocm/bin/amdclang++"
|
||||
"configurePresets": [
|
||||
{
|
||||
"name": "layout",
|
||||
"hidden": true,
|
||||
"binaryDir": "${sourceDir}/build/${presetName}",
|
||||
"installDir": "${sourceDir}/install/${presetName}"
|
||||
},
|
||||
{
|
||||
"name": "generator-ninja-multi-config",
|
||||
"hidden": true,
|
||||
"generator": "Ninja Multi-Config"
|
||||
},
|
||||
{
|
||||
"name": "toolchain-makefiles-c/c++-amdclang",
|
||||
"hidden": true,
|
||||
"cacheVariables": {
|
||||
"CMAKE_C_COMPILER": "/opt/rocm/bin/amdclang",
|
||||
"CMAKE_CXX_COMPILER": "/opt/rocm/bin/amdclang++",
|
||||
"CMAKE_HIP_COMPILER": "/opt/rocm/bin/amdclang++"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "clang-strict-iso-high-warn",
|
||||
"hidden": true,
|
||||
"cacheVariables": {
|
||||
"CMAKE_C_FLAGS": "-Wall -Wextra -pedantic",
|
||||
"CMAKE_CXX_FLAGS": "-Wall -Wextra -pedantic",
|
||||
"CMAKE_HIP_FLAGS": "-Wall -Wextra -pedantic"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm",
|
||||
"displayName": "Ninja Multi-Config ROCm",
|
||||
"inherits": [
|
||||
"layout",
|
||||
"generator-ninja-multi-config",
|
||||
"toolchain-makefiles-c/c++-amdclang",
|
||||
"clang-strict-iso-high-warn"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "clang-strict-iso-high-warn",
|
||||
"hidden": true,
|
||||
"cacheVariables": {
|
||||
"CMAKE_C_FLAGS": "-Wall -Wextra -pedantic",
|
||||
"CMAKE_CXX_FLAGS": "-Wall -Wextra -pedantic",
|
||||
"CMAKE_HIP_FLAGS": "-Wall -Wextra -pedantic"
|
||||
],
|
||||
"buildPresets": [
|
||||
{
|
||||
"name": "ninja-mc-rocm-debug",
|
||||
"displayName": "Debug",
|
||||
"configuration": "Debug",
|
||||
"configurePreset": "ninja-mc-rocm"
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-release",
|
||||
"displayName": "Release",
|
||||
"configuration": "Release",
|
||||
"configurePreset": "ninja-mc-rocm"
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-debug-verbose",
|
||||
"displayName": "Debug (verbose)",
|
||||
"configuration": "Debug",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"verbose": true
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-release-verbose",
|
||||
"displayName": "Release (verbose)",
|
||||
"configuration": "Release",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"verbose": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm",
|
||||
"displayName": "Ninja Multi-Config ROCm",
|
||||
"inherits": [
|
||||
"layout",
|
||||
"generator-ninja-multi-config",
|
||||
"toolchain-makefiles-c/c++-amdclang",
|
||||
"clang-strict-iso-high-warn"
|
||||
]
|
||||
}
|
||||
],
|
||||
"buildPresets": [
|
||||
{
|
||||
"name": "ninja-mc-rocm-debug",
|
||||
"displayName": "Debug",
|
||||
"configuration": "Debug",
|
||||
"configurePreset": "ninja-mc-rocm"
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-release",
|
||||
"displayName": "Release",
|
||||
"configuration": "Release",
|
||||
"configurePreset": "ninja-mc-rocm"
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-debug-verbose",
|
||||
"displayName": "Debug (verbose)",
|
||||
"configuration": "Debug",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"verbose": true
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-release-verbose",
|
||||
"displayName": "Release (verbose)",
|
||||
"configuration": "Release",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"verbose": true
|
||||
}
|
||||
],
|
||||
"testPresets": [
|
||||
{
|
||||
"name": "ninja-mc-rocm-debug",
|
||||
"displayName": "Debug",
|
||||
"configuration": "Debug",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"execution": {
|
||||
"jobs": 0
|
||||
],
|
||||
"testPresets": [
|
||||
{
|
||||
"name": "ninja-mc-rocm-debug",
|
||||
"displayName": "Debug",
|
||||
"configuration": "Debug",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"execution": {
|
||||
"jobs": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-release",
|
||||
"displayName": "Release",
|
||||
"configuration": "Release",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"execution": {
|
||||
"jobs": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ninja-mc-rocm-release",
|
||||
"displayName": "Release",
|
||||
"configuration": "Release",
|
||||
"configurePreset": "ninja-mc-rocm",
|
||||
"execution": {
|
||||
"jobs": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. note::
|
||||
|
||||
Getting presets to work reliably on Windows requires some CMake improvements
|
||||
and/or support from compiler vendors. (Refer to
|
||||
`Add support to the Visual Studio generators <https://gitlab.kitware.com/cmake/cmake/-/issues/24245>`_
|
||||
and `Sourcing environment scripts <https://gitlab.kitware.com/cmake/cmake/-/issues/21619>`_
|
||||
.)
|
||||
Getting presets to work reliably on Windows requires some CMake improvements
|
||||
and/or support from compiler vendors. (Refer to
|
||||
`Add support to the Visual Studio generators <https://gitlab.kitware.com/cmake/cmake/-/issues/24245>`_
|
||||
and `Sourcing environment scripts <https://gitlab.kitware.com/cmake/cmake/-/issues/21619>`_
|
||||
.)
|
||||
|
||||
15
docs/conceptual/compiler-disambiguation.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# ROCm compilers disambiguation
|
||||
|
||||
ROCm ships multiple compilers of varying origins and purposes. This article
|
||||
disambiguates compiler naming used throughout the documentation.
|
||||
|
||||
## Compiler terms
|
||||
|
||||
| Term | Description |
|
||||
| - | - |
|
||||
| `amdclang++` | Clang/LLVM-based compiler that is part of `rocm-llvm` package. The source code is available at <a href="https://github.com/RadeonOpenCompute/llvm-project" target="_blank">https://github.com/RadeonOpenCompute/llvm-project</a>. |
|
||||
| AOCC | Closed-source clang-based compiler that includes additional CPU optimizations. Offered as part of ROCm via the `rocm-llvm-alt` package. See for details, <a href="https://developer.amd.com/amd-aocc/" target="_blank">https://developer.amd.com/amd-aocc/</a>. |
|
||||
| HIP-Clang | Informal term for the `amdclang++` compiler |
|
||||
| HIPIFY | Tools including `hipify-clang` and `hipify-perl`, used to automatically translate CUDA source code into portable HIP C++. The source code is available at <a href="https://github.com/ROCm-Developer-Tools/HIPIFY" target="_blank">https://github.com/ROCm-Developer-Tools/HIPIFY</a> |
|
||||
| `hipcc` | HIP compiler driver. A utility that invokes `clang` or `nvcc` depending on the target and passes the appropriate include and library options for the target compiler and HIP infrastructure. The source code is available at <a href="https://github.com/ROCm-Developer-Tools/HIPCC" target="_blank">https://github.com/ROCm-Developer-Tools/HIPCC</a>. |
|
||||
| ROCmCC | Clang/LLVM-based compiler. ROCmCC in itself is not a binary but refers to the overall compiler. |
|
||||
@@ -1,14 +0,0 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="AMD ROCm documentation">
|
||||
<meta name="keywords" content="documentation, guides, installation, compatibility, support,
|
||||
reference, ROCm, AMD">
|
||||
</head>
|
||||
|
||||
# Using compiler features
|
||||
|
||||
The following topics describe using specific features of the compilation tools:
|
||||
|
||||
* [ROCm compiler infrastructure](https://rocm.docs.amd.com/projects/llvm-project/en/latest/index.html)
|
||||
* [Using AddressSanitizer](https://rocm.docs.amd.com/projects/llvm-project/en/latest/conceptual/using-gpu-sanitizer.html)
|
||||
* [OpenMP support](https://rocm.docs.amd.com/projects/llvm-project/en/latest/conceptual/openmp.html)
|
||||
@@ -1,15 +1,8 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="ROCm Linux Filesystem Hierarchy Standard reorganization">
|
||||
<meta name="keywords" content="FHS, Linux Filesystem Hierarchy Standard, directory structure,
|
||||
AMD, ROCm">
|
||||
</head>
|
||||
|
||||
# ROCm Linux Filesystem Hierarchy Standard reorganization
|
||||
|
||||
## Introduction
|
||||
|
||||
The ROCm Software has adopted the Linux Filesystem Hierarchy Standard (FHS) [https://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html](https://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html) in order to to ensure ROCm is consistent with standard open source conventions. The following sections specify how current and future releases of ROCm adhere to FHS, how the previous ROCm file system is supported, and how improved versioning specifications are applied to ROCm.
|
||||
The ROCm platform has adopted the Linux Filesystem Hierarchy Standard (FHS) [https://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html](https://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html) in order to to ensure ROCm is consistent with standard open source conventions. The following sections specify how current and future releases of ROCm adhere to FHS, how the previous ROCm file system is supported, and how improved versioning specifications are applied to ROCm.
|
||||
|
||||
## Adopting the FHS
|
||||
|
||||
@@ -159,7 +152,7 @@ correct header file and use correct search paths.
|
||||
|
||||
## Changes in versioning specifications
|
||||
|
||||
In order to better manage ROCm dependencies specification and allow smoother releases of ROCm while avoiding dependency conflicts, ROCm software shall adhere to the following scheme when numbering and incrementing ROCm files versions:
|
||||
In order to better manage ROCm dependencies specification and allow smoother releases of ROCm while avoiding dependency conflicts, the ROCm platform shall adhere to the following scheme when numbering and incrementing ROCm files versions:
|
||||
|
||||
rocm-\<ver\>, where \<ver\> = \<x.y.z\>
|
||||
|
||||
|
||||
@@ -1,62 +1,40 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="GPU architecture">
|
||||
<meta name="keywords" content="GPU architecture, architecture support, MI200, MI250, RDNA,
|
||||
MI100, AMD Instinct">
|
||||
</head>
|
||||
|
||||
(gpu-arch-documentation)=
|
||||
|
||||
# GPU architecture documentation
|
||||
|
||||
:::::{grid} 1 1 2 2
|
||||
:gutter: 1
|
||||
|
||||
:::{grid-item-card}
|
||||
**AMD Instinct MI300 Series**
|
||||
**AMD Instinct MI200 series**
|
||||
|
||||
Review hardware aspects of the AMD Instinct™ MI300 Series GPUs and the CDNA™ 3
|
||||
architecture.
|
||||
|
||||
* [AMD Instinct™ MI300 microarchitecture](./gpu-arch/mi300.md)
|
||||
* [AMD Instinct MI300/CDNA3 ISA](https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/instruction-set-architectures/amd-instinct-mi300-cdna3-instruction-set-architecture.pdf)
|
||||
* [White paper](https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/white-papers/amd-cdna-3-white-paper.pdf)
|
||||
* [MI300 performance counters](./gpu-arch/mi300-mi200-performance-counters.rst)
|
||||
* [MI350 Series performance counters](./gpu-arch/mi350-performance-counters.rst)
|
||||
:::
|
||||
|
||||
:::{grid-item-card}
|
||||
**AMD Instinct MI200 Series**
|
||||
|
||||
Review hardware aspects of the AMD Instinct™ MI200 Series GPUs and the CDNA™ 2
|
||||
architecture.
|
||||
Review hardware aspects of the AMD Instinct™ MI200 series of GPU
|
||||
accelerators and the CDNA™ 2 architecture.
|
||||
|
||||
* [AMD Instinct™ MI250 microarchitecture](./gpu-arch/mi250.md)
|
||||
* [AMD Instinct MI200/CDNA2 ISA](https://www.amd.com/system/files/TechDocs/instinct-mi200-cdna2-instruction-set-architecture.pdf)
|
||||
* [White paper](https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna2-white-paper.pdf)
|
||||
* [Performance counters](./gpu-arch/mi300-mi200-performance-counters.rst)
|
||||
* [White paper](https://www.amd.com/system/files/documents/amd-cdna2-white-paper.pdf)
|
||||
* [Performance counters](./gpu-arch/mi200-performance-counters.md)
|
||||
|
||||
:::
|
||||
|
||||
:::{grid-item-card}
|
||||
**AMD Instinct MI100**
|
||||
|
||||
Review hardware aspects of the AMD Instinct™ MI100 Series GPUs and the CDNA™ 1
|
||||
architecture.
|
||||
Review hardware aspects of the AMD Instinct™ MI100
|
||||
accelerators and the CDNA™ 1 architecture that is the foundation of these GPUs.
|
||||
|
||||
* [AMD Instinct™ MI100 microarchitecture](./gpu-arch/mi100.md)
|
||||
* [AMD Instinct MI100/CDNA1 ISA](https://www.amd.com/system/files/TechDocs/instinct-mi100-cdna1-shader-instruction-set-architecture%C2%A0.pdf)
|
||||
* [White paper](https://www.amd.com/content/dam/amd/en/documents/instinct-business-docs/white-papers/amd-cdna-white-paper.pdf)
|
||||
* [White paper](https://www.amd.com/system/files/documents/amd-cdna-whitepaper.pdf)
|
||||
|
||||
:::
|
||||
|
||||
:::{grid-item-card}
|
||||
**RDNA**
|
||||
|
||||
* [AMD RDNA4 ISA](https://www.amd.com/content/dam/amd/en/documents/radeon-tech-docs/instruction-set-architectures/rdna4-instruction-set-architecture.pdf)
|
||||
* [AMD RDNA3 ISA](https://www.amd.com/system/files/TechDocs/rdna3-shader-instruction-set-architecture-feb-2023_0.pdf)
|
||||
* [AMD RDNA2 ISA](https://www.amd.com/system/files/TechDocs/rdna2-shader-instruction-set-architecture.pdf)
|
||||
* [AMD RDNA ISA](https://www.amd.com/system/files/TechDocs/rdna-shader-instruction-set-architecture.pdf)
|
||||
* [AMD RDNA Architecture White Paper](https://www.amd.com/system/files/documents/rdna-whitepaper.pdf)
|
||||
|
||||
:::
|
||||
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
---
|
||||
myst:
|
||||
html_meta:
|
||||
"description lang=en": "Learn about the AMD Instinct MI100 Series architecture."
|
||||
"keywords": "Instinct, MI100, microarchitecture, AMD, ROCm"
|
||||
---
|
||||
|
||||
# AMD Instinct™ MI100 microarchitecture
|
||||
|
||||
The following image shows the node-level architecture of a system that
|
||||
comprises two AMD EPYC™ processors and (up to) eight AMD Instinct™ GPUs.
|
||||
comprises two AMD EPYC™ processors and (up to) eight AMD Instinct™ accelerators.
|
||||
The two EPYC processors are connected to each other with the AMD Infinity™
|
||||
fabric which provides a high-bandwidth (up to 18 GT/sec) and coherent links such
|
||||
that each processor can access the available node memory as a single
|
||||
@@ -18,29 +11,29 @@ available to connect the processors plus one PCIe Gen 4 x16 link per processor
|
||||
can attach additional I/O devices such as the host adapters for the network
|
||||
fabric.
|
||||
|
||||

|
||||

|
||||
|
||||
In a typical node configuration, each processor can host up to four AMD
|
||||
Instinct™ GPUs that are attached using PCIe Gen 4 links at 16 GT/sec,
|
||||
Instinct™ accelerators that are attached using PCIe Gen 4 links at 16 GT/sec,
|
||||
which corresponds to a peak bidirectional link bandwidth of 32 GB/sec. Each hive
|
||||
of four GPUs can participate in a fully connected, coherent AMD
|
||||
Instinct™ fabric that connects the four GPUs using 23 GT/sec AMD
|
||||
of four accelerators can participate in a fully connected, coherent AMD
|
||||
Instinct™ fabric that connects the four accelerators using 23 GT/sec AMD
|
||||
Infinity fabric links that run at a higher frequency than the inter-processor
|
||||
links. This inter-GPU link can be established in certified server systems if the
|
||||
GPUs are mounted in neighboring PCIe slots by installing the AMD Infinity
|
||||
Fabric™ bridge for the AMD Instinct™ GPUs.
|
||||
Fabric™ bridge for the AMD Instinct™ accelerators.
|
||||
|
||||
## Microarchitecture
|
||||
|
||||
The microarchitecture of the AMD Instinct GPUs is based on the AMD CDNA
|
||||
The microarchitecture of the AMD Instinct accelerators is based on the AMD CDNA
|
||||
architecture, which targets compute applications such as high-performance
|
||||
computing (HPC) and AI & machine learning (ML) that run on everything from
|
||||
individual servers to the world's largest exascale supercomputers. The overall
|
||||
system architecture is designed for extreme scalability and compute performance.
|
||||
|
||||
")
|
||||
")
|
||||
|
||||
The above image shows the AMD Instinct GPU with its PCIe Gen 4 x16
|
||||
The above image shows the AMD Instinct accelerator with its PCIe Gen 4 x16
|
||||
link (16 GT/sec, at the bottom) that connects the GPU to (one of) the host
|
||||
processor(s). It also shows the three AMD Infinity Fabric ports that provide
|
||||
high-speed links (23 GT/sec, also at the bottom) to the other GPUs of the local
|
||||
@@ -48,7 +41,7 @@ hive.
|
||||
|
||||
On the left and right of the floor plan, the High Bandwidth Memory (HBM)
|
||||
attaches via the GPU memory controller. The MI100 generation of the AMD
|
||||
Instinct GPU offers four stacks of HBM generation 2 (HBM2) for a total
|
||||
Instinct accelerator offers four stacks of HBM generation 2 (HBM2) for a total
|
||||
of 32GB with a 4,096bit-wide memory interface. The peak memory bandwidth of the
|
||||
attached HBM2 is 1.228 TB/sec at a memory clock frequency of 1.2 GHz.
|
||||
|
||||
@@ -64,7 +57,7 @@ Therefore, the theoretical maximum FP64 peak performance is 11.5 TFLOPS
|
||||

|
||||
|
||||
The preceding image shows the block diagram of a single CU of an AMD Instinct™
|
||||
MI100 GPU and summarizes how instructions flow through the execution
|
||||
MI100 accelerator and summarizes how instructions flow through the execution
|
||||
engines. The CU fetches the instructions via a 32KB instruction cache and moves
|
||||
them forward to execution via a dispatcher. The CU can handle up to ten
|
||||
wavefronts at a time and feed their instructions into the execution unit. The
|
||||
|
||||
455
docs/conceptual/gpu-arch/mi200-performance-counters.md
Normal file
@@ -0,0 +1,455 @@
|
||||
# MI200 performance counters and metrics
|
||||
<!-- markdownlint-disable no-duplicate-header -->
|
||||
|
||||
This document lists and describes the hardware performance counters and the derived metrics available on the AMD Instinct™ MI200 GPU. All hardware performance monitors, and the derived performance metrics are accessible via AMD ROCm™ Profiler tool.
|
||||
|
||||
## MI200 performance counters list
|
||||
|
||||
```{note}
|
||||
Preliminary validation of all MI200 performance counters is in progress. Those with “[*]” appended to the names require further evaluation.
|
||||
```
|
||||
|
||||
### GRBM
|
||||
|
||||
#### GRBM counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
|--------------------|--------| ------------------------------------------------------|
|
||||
| `grbm_count` | Cycles | Free-running GPU clock |
|
||||
| `grbm_gui_active` | Cycles | GPU active cycles |
|
||||
| `grbm_cp_busy` | Cycles | Any of the command processor (CPC/CPF) blocks are busy. |
|
||||
| `grbm_spi_busy` | Cycles | Any of the shader processor input (SPI) are busy in the shader engine(s). |
|
||||
| `grbm_ta_busy` | Cycles | Any of the texture addressing unit are busy in the shader engine(s). |
|
||||
| `grbm_tc_busy` | Cycles | Any of the texture cache blocks (TCP/TCI/TCA/TCC) are busy. |
|
||||
| `grbm_cpc_busy` | Cycles | The command processor - compute (CPC) is busy. |
|
||||
| `grbm_cpf_busy` | Cycles | The command processor - fetcher (CPF) is busy. |
|
||||
| `grbm_utcl2_busy` | Cycles | The unified translation cache - level 2 (UTCL2) block is busy. |
|
||||
| `grbm_ea_busy` | Cycles | The efficiency arbiter (EA) block is busy. |
|
||||
|
||||
### Command processor
|
||||
|
||||
The command processor counters are further classified into fetcher and compute.
|
||||
|
||||
#### CPF
|
||||
|
||||
##### CPF counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
|--------------------------------------|--------|--------------------------------------------------------------|
|
||||
| `cpf_cmp_utcl1_stall_on_translation` | Cycles | One of the compute UTCL1s is stalled waiting on translation. |
|
||||
| `cpf_cpf_stat_idle[∗]` | Cycles | CPF idle |
|
||||
| `cpf_cpf_stat_stall` | Cycles | CPF stall |
|
||||
| `cpf_cpf_tciu_busy` | Cycles | CPF TCIU interface busy |
|
||||
| `cpf_cpf_tciu_idle` | Cycles | CPF TCIU interface idle |
|
||||
| `cpf_cpf_tciu_stall[∗]` | Cycles | CPF TCIU interface is stalled waiting on free tags. |
|
||||
|
||||
#### CPC
|
||||
|
||||
##### CPC counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| ---------------------------------| -------| --------------------------------------------------- |
|
||||
| `cpc_me1_busy_for_packet_decode` | Cycles | CPC ME1 busy decoding packets |
|
||||
| `cpc_utcl1_stall_on_translation` | Cycles | One of the UTCL1s is stalled waiting on translation |
|
||||
| `cpc_cpc_stat_busy` | Cycles | CPC busy |
|
||||
| `cpc_cpc_stat_idle` | Cycles | CPC idle |
|
||||
| `cpc_cpc_stat_stall` | Cycles | CPC stalled |
|
||||
| `cpc_cpc_tciu_busy` | Cycles | CPC TCIU interface busy |
|
||||
| `cpc_cpc_tciu_idle` | Cycles | CPC TCIU interface idle |
|
||||
| `cpc_cpc_utcl2iu_busy` | Cycles | CPC UTCL2 interface busy |
|
||||
| `cpc_cpc_utcl2iu_idle` | Cycles | CPC UTCL2 interface idle |
|
||||
| `cpc_cpc_utcl2iu_stall[∗]` | Cycles | CPC UTCL2 interface stalled waiting |
|
||||
| `cpc_me1_dci0_spi_busy` | Cycles | CPC ME1 Processor busy |
|
||||
|
||||
### SPI
|
||||
|
||||
#### SPI counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :----------------------------| :-----------| -----------------------------------------------------------: |
|
||||
| `spi_csn_busy` | Cycles | Number of clocks with outstanding waves |
|
||||
| `spi_csn_window_valid` | Cycles | Clock count enabled by perfcounter_start event |
|
||||
| `spi_csn_num_threadgroups` | Workgroups | Total number of dispatched workgroups |
|
||||
| `spi_csn_wave` | Wavefronts | Total number of dispatched wavefronts |
|
||||
| `spi_ra_req_no_alloc` | Cycles | Arb cycles with requests but no allocation (need to multiply this value by 4) |
|
||||
|`spi_ra_req_no_alloc_csn` | Cycles | Arb cycles with CSn req and no CSn alloc (need to multiply this value by 4) |
|
||||
| `spi_ra_res_stall_csn` | Cycles | Arb cycles with CSn req and no CSn fits (need to multiply this value by 4) |
|
||||
| `spi_ra_tmp_stall_csn[∗]` | Cycles | Cycles where CSn wants to req but does not fit in temp space |
|
||||
| `spi_ra_wave_simd_full_csn` | SIMD-cycles | Sum of SIMD where WAVE cannot take csn wave when not fits |
|
||||
| `spi_ra_vgpr_simd_full_csn[∗]` | SIMD-cycles | Sum of SIMD where VGPR cannot take csn wave when not fits |
|
||||
| `spi_ra_sgpr_simd_full_csn[∗]` | SIMD-cycles | Sum of SIMD where SGPR cannot take csn wave when not fits |
|
||||
| `spi_ra_lds_cu_full_csn` | CUs | Sum of CU where LDS cannot take csn wave when not fits |
|
||||
| `spi_ra_bar_cu_full_csn[∗]` | CUs | Sum of CU where BARRIER cannot take csn wave when not fits |
|
||||
| `spi_ra_bulky_cu_full_csn[∗]` | CUs | Sum of CU where BULKY cannot take csn wave when not fits |
|
||||
| `spi_ra_tglim_cu_full_csn[∗]` | Cycles | Cycles where csn wants to req but all CUs are at tg_limit |
|
||||
| `spi_ra_wvlim_cu_full_csn[∗]` | Cycles | Number of clocks csn is stalled due to WAVE LIMIT |
|
||||
| `spi_vwc_csc_wr` | Cycles | Number of clocks to write CSC waves to VGPRs (need to multiply this value by 4) |
|
||||
| `spi_swc_csc_wr` | Cycles | Number of clocks to write CSC waves to SGPRs (need to multiply this value by 4) |
|
||||
|
||||
### Compute unit
|
||||
|
||||
The compute unit counters are further classified into instruction mix, MFMA operation counters, level counters, wavefront counters, wavefront cycle counters, local data share counters, and others.
|
||||
|
||||
#### Instruction mix
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :-----------------------| :-----:| -----------------------------------------------------------------------: |
|
||||
| `sq_insts` | Instr | Number of instructions issued |
|
||||
| `sq_insts_valu` | Instr | Number of VALU instructions issued, including MFMA |
|
||||
| `sq_insts_valu_add_f16` | Instr | Number of VALU F16 Add instructions issued |
|
||||
| `sq_insts_valu_mul_f16` | Instr | Number of VALU F16 Multiply instructions issued |
|
||||
| `sq_insts_valu_fma_f16` | Instr | Number of VALU F16 FMA instructions issued |
|
||||
| `sq_insts_valu_trans_f16` | Instr | Number of VALU F16 Transcendental instructions issued |
|
||||
| `sq_insts_valu_add_f32` | Instr | Number of VALU F32 Add instructions issued |
|
||||
| `sq_insts_valu_mul_f32` | Instr | Number of VALU F32 Multiply instructions issued |
|
||||
| `sq_insts_valu_fma_f32` | Instr | Number of VALU F32 FMA instructions issued |
|
||||
| `sq_insts_valu_trans_f32` | Instr | Number of VALU F32 Transcendental instructions issued |
|
||||
| `sq_insts_valu_add_f64` | Instr | Number of VALU F64 Add instructions issued |
|
||||
| `sq_insts_valu_mul_f64` | Instr | Number of VALU F64 Multiply instructions issued |
|
||||
| `sq_insts_valu_fma_f64` | Instr | Number of VALU F64 FMA instructions issued |
|
||||
| `sq_insts_valu_trans_f64` | Instr | Number of VALU F64 Transcendental instructions issued |
|
||||
| `sq_insts_valu_int32` | Instr | Number of VALU 32-bit integer instructions issued (signed or unsigned) |
|
||||
| `sq_insts_valu_int64` | Instr | Number of VALU 64-bit integer instructions issued (signed or unsigned) |
|
||||
| `sq_insts_valu_cvt` | Instr | Number of VALU Conversion instructions issued |
|
||||
| `sq_insts_valu_mfma_i8` | Instr | Number of 8-bit Integer MFMA instructions issued |
|
||||
| `sq_insts_valu_mfma_f16` | Instr | Number of F16 MFMA instructions issued |
|
||||
| `sq_insts_valu_mfma_bf16` | Instr | Number of BF16 MFMA instructions issued |
|
||||
| `sq_insts_valu_mfma_f32` | Instr | Number of F32 MFMA instructions issued |
|
||||
| `sq_insts_valu_mfma_f64` | Instr | Number of F64 MFMA instructions issued |
|
||||
| `sq_insts_mfma` | Instr | Number of MFMA instructions issued |
|
||||
| `sq_insts_vmem_wr` | Instr | Number of VMEM write instructions issued |
|
||||
| `sq_insts_vmem_rd` | Instr | Number of VMEM read instructions issued |
|
||||
| `sq_insts_vmem` | Instr | Number of VMEM instructions issued, including both FLAT and buffer instructions |
|
||||
| `sq_insts_salu` | Instr | Number of SALU instructions issued |
|
||||
| `sq_insts_smem` | Instr | Number of SMEM instructions issued |
|
||||
| `sq_insts_smem_norm` | Instr | Number of SMEM instructions issued to normalize to match `smem_level`. Used in measuring SMEM latency |
|
||||
| `sq_insts_flat` | Instr | Number of FLAT instructions issued |
|
||||
| `sq_insts_flat_lds_only` | Instr | Number of FLAT instructions issued that read/write only from/to LDS |
|
||||
| `sq_insts_lds` | Instr | Number of LDS instructions issued |
|
||||
| `sq_insts_gds` | Instr | Number of GDS instructions issued |
|
||||
| `sq_insts_exp_gds` | Instr | Number of EXP and GDS instructions excluding skipped export instructions issued |
|
||||
| `sq_insts_branch` | Instr | Number of Branch instructions issued |
|
||||
| `sq_insts_sendmsg` | Instr | Number of SENDMSG instructions including s_endpgm issued |
|
||||
| `sq_insts_vskipped[∗]` | Instr | Number of VSkipped instructions issued |
|
||||
|
||||
#### MFMA operation counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :----------------------------| :-----| ----------------------------------------------: |
|
||||
| `sq_insts_valu_mfma_mops_I8` | IOP | Number of 8-bit integer MFMA ops in unit of 512 |
|
||||
| `sq_insts_valu_mfma_mops_F16` | FLOP | Number of F16 floating MFMA ops in unit of 512 |
|
||||
| `sq_insts_valu_mfma_mops_BF16` | FLOP | Number of BF16 floating MFMA ops in unit of 512 |
|
||||
| `sq_insts_valu_mfma_mops_F32` | FLOP | Number of F32 floating MFMA ops in unit of 512 |
|
||||
| `sq_insts_valu_mfma_mops_F64` | FLOP | Number of F64 floating MFMA ops in unit of 512 |
|
||||
|
||||
#### Level counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :-------------------| :-----| -------------------------------------: |
|
||||
| `sq_accum_prev` | Count | Accumulated counter sample value where accumulation takes place once every four cycles |
|
||||
| `sq_accum_prev_hires` | Count | Accumulated counter sample value where accumulation takes place once every cycle |
|
||||
| `sq_level_waves` | Waves | Number of inflight waves |
|
||||
| `sq_insts_level_vmem` | Instr | Number of inflight VMEM instructions |
|
||||
| `sq_insts_level_smem` | Instr | Number of inflight SMEM instructions |
|
||||
| `sq_insts_level_lds` | Instr | Number of inflight LDS instructions |
|
||||
| `sq_ifetch_level` | Instr | Number of inflight instruction fetches |
|
||||
|
||||
#### Wavefront counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :--------------------| :-----| ----------------------------------------------------------------: |
|
||||
| `sq_waves` | Waves | Number of wavefronts dispatch to SQs, including both new and restored wavefronts |
|
||||
| `sq_waves_saved[∗]` | Waves | Number of context-saved wavefronts |
|
||||
| `sq_waves_restored[∗]` | Waves | Number of context-restored wavefronts |
|
||||
| `sq_waves_eq_64` | Waves | Number of wavefronts with exactly 64 active threads sent to SQs |
|
||||
| `sq_waves_lt_64` | Waves | Number of wavefronts with less than 64 active threads sent to SQs |
|
||||
| `sq_waves_lt_48` | Waves | Number of wavefronts with less than 48 active threads sent to SQs |
|
||||
| `sq_waves_lt_32` | Waves | Number of wavefronts with less than 32 active threads sent to SQs |
|
||||
| `sq_waves_lt_16` | Waves | Number of wavefronts with less than 16 active threads sent to SQs |
|
||||
|
||||
#### Wavefront cycle counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :------------------------| :-------| --------------------------------------------------------------------: |
|
||||
| `sq_cycles` | Cycles | Free-running SQ clocks |
|
||||
| `sq_busy_cycles` | Cycles | Number of cycles while SQ reports it to be busy |
|
||||
| `sq_busy_cu_cycles` | Qcycles | Number of quad cycles each CU is busy |
|
||||
| `sq_valu_mfma_busy_cycles` | Cycles | Number of cycles the MFMA ALU is busy |
|
||||
| `sq_wave_cycles` | Qcycles | Number of quad cycles spent by waves in the CUs |
|
||||
| `sq_wait_any` | Qcycles | Number of quad cycles spent waiting for anything |
|
||||
| `sq_wait_inst_any` | Qcycles | Number of quad cycles spent waiting for an issued instruction |
|
||||
| `sq_active_inst_any` | Qcycles | Number of quad cycles spent by each wave to work on an instruction |
|
||||
| `sq_active_inst_vmem` | Qcycles | Number of quad cycles spent by each wave to work on a non-FLAT VMEM instruction |
|
||||
| `sq_active_inst_lds` | Qcycles | Number of quad cycles spent by each wave to work on an LDS instruction |
|
||||
| `sq_active_inst_valu` | Qcycles | Number of quad cycles spent by each wave to work on a VALU instruction |
|
||||
| `sq_active_inst_sca` | Qcycles | Number of quad cycles spent by each wave to work on an SCA instruction |
|
||||
| `sq_active_inst_exp_gds` | Qcycles | Number of quad cycles spent by each wave to work on EXP or GDS instruction |
|
||||
| `sq_active_inst_misc` | Qcycles | Number of quad cycles spent by each wave to work on an MISC instruction, including branch and sendmsg |
|
||||
| `sq_active_inst_flat` | Qcycles | Number of quad cycles spent by each wave to work on a FLAT instruction |
|
||||
| `sq_inst_cycles_vmem_wr` | Qcycles | Number of quad cycles spent to send addr and cmd data for VMEM write instructions, including both FLAT and buffer |
|
||||
| `sq_inst_cycles_vmem_rd` | Qcycles | Number of quad cycles spent to send addr and cmd data for VMEM read instructions, including both FLAT and buffer |
|
||||
| `sq_inst_cycles_smem` | Qcycles | Number of quad cycles spent to execute scalar memory reads |
|
||||
| `sq_inst_cycles_salu` | Cycles | Number of cycles spent to execute non-memory read scalar operations |
|
||||
| `sq_thread_cycles_valu` | Cycles | Number of thread cycles spent to execute VALU operations |
|
||||
|
||||
#### Local data share
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :--------------------------| :------| --------------------------------------------------------: |
|
||||
| `sq_lds_atomic_return` | Cycles | Number of atomic return cycles in LDS |
|
||||
| `sq_lds_bank_conflict` | Cycles | Number of cycles LDS is stalled by bank conflicts |
|
||||
| `sq_lds_addr_conflict[∗]` | Cycles | Number of cycles LDS is stalled by address conflicts |
|
||||
| `sq_lds_unaligned_stalls[∗]` | Cycles | Number of cycles LDS is stalled processing flat unaligned load/store ops |
|
||||
| `sq_lds_mem_violations[∗]` | Count | Number of threads that have a memory violation in the LDS |
|
||||
|
||||
#### Miscellaneous
|
||||
|
||||
##### Local data share
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :----------------| :-------| --------------------------------------------------------: |
|
||||
| `sq_ifetch` | Count | Number of fetch requests from L1I cache, in 32-byte width |
|
||||
| `sq_items` | Threads | Number of valid threads |
|
||||
|
||||
### L1I and sL1D caches
|
||||
|
||||
#### L1I and sL1D caches
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :----------------------------| :------| ----------------------------------------------------------------: |
|
||||
| `sqc_icache_req` | Req | Number of L1I cache requests |
|
||||
| `sqc_icache_hits` | Count | Number of L1I cache lookup-hits |
|
||||
| `sqc_icache_misses` | Count | Number of L1I cache non-duplicate lookup-misses |
|
||||
| `sqc_icache_misses_duplicate` | Count | Number of d L1I cache duplicate lookup misses whose previous lookup miss on the same cache line is not fulfilled yet |
|
||||
| `sqc_dcache_req` | Req | Number of sL1D cache requests |
|
||||
| `sqc_dcache_input_valid_readb` | Cycles | Number of cycles while SQ input is valid but sL1D cache is not ready |
|
||||
| `sqc_dcache_hits` | Count | Number of sL1D cache lookup-hits |
|
||||
| `sqc_dcache_misses` | Count | Number of sL1D non-duplicate lookup-misses |
|
||||
| `sqc_dcache_misses_duplicate` | Count | Number of sL1D duplicate lookup-misses |
|
||||
| `sqc_dcache_req_read_1` | Req | Number of read requests in a single 32-bit data word, DWORD (DW) |
|
||||
| `sqc_dcache_req_read_2` | Req | Number of read requests in 2 DW |
|
||||
| `sqc_dcache_req_read_4` | Req | Number of read requests in 4 DW |
|
||||
| `sqc_dcache_req_read_8` | Req | Number of read requests in 8 DW |
|
||||
| `sqc_dcache_req_read_16` | Req | Number of read requests in 16 DW |
|
||||
| `sqc_dcache_atomic[∗]` | Req | Number of atomic requests |
|
||||
| `sqc_tc_req` | Req | Number of L2 cache requests that were issued by instruction and constant caches |
|
||||
| `sqc_tc_inst_req` | Req | Number of instruction cache line requests to L2 cache |
|
||||
| `sqc_tc_data_read_req` | Req | Number of data read requests to the L2 cache |
|
||||
| `sqc_tc_data_write_req[∗]` | Req | Number of data write requests to the L2 cache |
|
||||
| `sqc_tc_data_atomic_req[∗]` | Req | Number of data atomic requests to the L2 cache |
|
||||
| `sqc_tc_stall[∗]` | Cycles | Number of cycles while the valid requests to L2 cache are stalled |
|
||||
|
||||
### Vector L1 cache subsystem
|
||||
|
||||
The vector L1 cache subsystem counters are further classified into texture addressing unit, texture data unit, vector L1D cache, and texture cache arbiter.
|
||||
|
||||
#### Texture addressing unit
|
||||
|
||||
##### Texture addressing unit counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :--------------------------------| :------| ------------------------------------------------: |
|
||||
| `ta_ta_busy` | Cycles | texture addressing unit busy cycles |
|
||||
| `ta_total_wavefronts` | Instr | Number of wavefront instructions |
|
||||
| `ta_buffer_wavefronts` | Instr | Number of buffer wavefront instructions |
|
||||
| `ta_buffer_read_wavefronts` | Instr | Number of buffer read wavefront instructions |
|
||||
| `ta_buffer_write_wavefronts` | Instr | Number of buffer write wavefront instructions |
|
||||
| `ta_buffer_atomic_wavefronts[∗]` | Instr | Number of buffer atomic wavefront instructions |
|
||||
| `ta_buffer_total_cycles` | Cycles | Number of buffer cycles, including read and write |
|
||||
| `ta_buffer_coalesced_read_cycles` | Cycles | Number of coalesced buffer read cycles |
|
||||
| `ta_buffer_coalesced_write_cycles` | Cycles | Number of coalesced buffer write cycles |
|
||||
| `ta_addr_stalled_by_tc` | Cycles | Number of cycles texture addressing unit address is stalled by TCP |
|
||||
| `ta_data_stalled_by_tc` | Cycles | Number of cycles texture addressing unit data is stalled by TCP |
|
||||
| `ta_addr_stalled_by_td_cycles[∗]` | Cycles | Number of cycles texture addressing unit address is stalled by TD |
|
||||
| `ta_flat_wavefronts` | Instr | Number of flat wavefront instructions |
|
||||
| `ta_flat_read_wavefronts` | Instr | Number of flat read wavefront instructions |
|
||||
| `ta_flat_write_wavefronts` | Instr | Number of flat write wavefront instructions |
|
||||
| `ta_flat_atomic_wavefronts` | Instr | Number of flat atomic wavefront instructions |
|
||||
|
||||
#### Texture data unit
|
||||
|
||||
##### Texture data unit counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :------------------------| :-----| ---------------------------------------------------: |
|
||||
| `td_td_busy` | Cycle | TD busy cycles |
|
||||
| `td_tc_stall` | Cycle | Number of cycles TD is stalled by TCP |
|
||||
| `td_spi_stall[∗]` | Cycle | Number of cycles TD is stalled by SPI |
|
||||
| `td_load_wavefront` | Instr | Number of wavefront instructions (read/write/atomic) |
|
||||
| `td_store_wavefront` | Instr | Number of write wavefront instructions |
|
||||
| `td_atomic_wavefront` | Instr | Number of atomic wavefront instructions |
|
||||
| `td_coalescable_wavefront` | Instr | Number of coalescable instructions |
|
||||
|
||||
#### Vector L1D cache
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :-----------------------------------| :------| ----------------------------------------------------------: |
|
||||
| `tcp_gate_en1` | Cycles | Number of cycles/ vL1D interface clocks are turned on |
|
||||
| `tcp_gate_en2` | Cycles | Number of cycles vL1D core clocks are turned on |
|
||||
| `tcp_td_tcp_stall_cycles` | Cycles | Number of cycles TD stalls vL1D |
|
||||
| `tcp_tcr_tcp_stall_cycles` | Cycles | Number of cycles TCR stalls vL1D |
|
||||
| `tcp_read_tagconflict_stall_cycles` | Cycles | Number of cycles tagram conflict stalls on a read |
|
||||
| `tcp_write_tagconflict_stall_cycles` | Cycles | Number of cycles tagram conflict stalls on a write |
|
||||
| `tcp_atomic_tagconflict_stall_cycles` | Cycles | Number of cycles tagram conflict stalls on an atomic |
|
||||
| `tcp_pending_stall_cycles` | Cycles | Number of cycles vL1D cache is stalled due to data pending from L2 cache |
|
||||
| `tcp_ta_tcp_state_read` | Req | Number of wavefront instruction requests to vL1D |
|
||||
| `tcp_volatile[∗]` | Req | Number of L1 volatile pixels/buffers from texture addressing unit |
|
||||
| `tcp_total_accesses` | Req | Number of vL1D accesses |
|
||||
| `tcp_total_read` | Req | Number of vL1D read accesses |
|
||||
| `tcp_total_write` | Req | Number of vL1D write accesses |
|
||||
| `tcp_total_atomic_with_ret` | Req | Number of vL1D atomic with return |
|
||||
| `tcp_total_atomic_without_ret` | Req | Number of vL1D atomic without return |
|
||||
| `tcp_total_writeback_invalidates` | Count | Number of vL1D writebacks and Invalidates |
|
||||
| `tcp_utcl1_request` | Req | Number of address translation requests to UTCL1 |
|
||||
| `tcp_utcl1_translation_hit` | Req | Number of UTCL1 translation hits |
|
||||
| `tcp_utcl1_translation_miss` | Req | Number of UTCL1 translation misses |
|
||||
| `tcp_utcl1_persmission_miss` | Req | Number of UTCL1 permission misses |
|
||||
| `tcp_total_cache_accesses` | Req | Number of vL1D cache accesses |
|
||||
| `tcp_tcp_latency` | Cycles | Accumulated wave access latency to vL1D over all wavefronts |
|
||||
| `tcp_tcc_read_req_latency` | Cycles | Accumulated vL1D-L2 request latency over all wavefronts for reads and atomics with return |
|
||||
| `tcp_tcc_write_req_latency` | Cycles | Accumulated vL1D-L2 request latency over all wavefronts for writes and atomics without return |
|
||||
| `tcp_tcc_read_req` | Req | Number of read requests to L2 cache |
|
||||
| `tcp_tcc_write_req` | Req | Number of write requests to L2 cache |
|
||||
| `tcp_tcc_atomic_with_ret_req` | Req | Number of atomic requests to L2 cache with return |
|
||||
| `tcp_tcc_atomic_without_ret_req` | Req | Number of atomic requests to L2 cache without return |
|
||||
| `tcp_tcc_nc_read_req` | Req | Number of NC read requests to L2 cache |
|
||||
| `tcp_tcc_uc_read_req` | Req | Number of UC read requests to L2 cache |
|
||||
| `tcp_tcc_cc_read_req` | Req | Number of CC read requests to L2 cache |
|
||||
| `tcp_tcc_rw_read_req` | Req | Number of RW read requests to L2 cache |
|
||||
| `tcp_tcc_nc_write_req` | Req | Number of NC write requests to L2 cache |
|
||||
| `tcp_tcc_uc_write_req` | Req | Number of UC write requests to L2 cache |
|
||||
| `tcp_tcc_cc_write_req` | Req | Number of CC write requests to L2 cache |
|
||||
| `tcp_tcc_rw_write_req` | Req | Number of RW write requests to L2 cache |
|
||||
| `tcp_tcc_nc_atomic_req` | Req | Number of NC atomic requests to L2 cache |
|
||||
| `tcp_tcc_uc_atomic_req` | Req | Number of UC atomic requests to L2 cache |
|
||||
| `tcp_tcc_cc_atomic_req` | Req | Number of CC atomic requests to L2 cache |
|
||||
| `tcp_tcc_rw_atomic_req` | Req | Number of RW atomic requests to L2 cache |
|
||||
|
||||
#### TCA
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :----------------| :------| ------------------------------------------: |
|
||||
| `tca_cycle` | Cycles | TCA cycles |
|
||||
| `tca_busy` | Cycles | Number of cycles TCA has a pending request |
|
||||
|
||||
### L2 cache access
|
||||
|
||||
#### L2 cache access counters
|
||||
|
||||
| Hardware Counter | Unit | Definition |
|
||||
| :--------------------------------| :------| -------------------------------------------------------------: |
|
||||
| `tcc_cycle` |Cycle | L2 cache free-running clocks |
|
||||
| `tcc_busy` |Cycle | L2 cache busy cycles |
|
||||
| `tcc_req` |Req | Number of L2 cache requests |
|
||||
| `tcc_streaming_req[∗]` |Req | Number of L2 cache streaming requests |
|
||||
| `tcc_NC_req` |Req | Number of NC requests |
|
||||
| `tcc_UC_req` |Req | Number of UC requests |
|
||||
| `tcc_CC_req` |Req | Number of CC requests |
|
||||
| `tcc_RW_req` |Req | Number of RW requests |
|
||||
| `tcc_probe` |Req | Number of L2 cache probe requests |
|
||||
| `tcc_probe_all[∗]` |Req | Number of external probe requests with EA_TCC_preq_all== 1 |
|
||||
| `tcc_read_req` |Req | Number of L2 cache read requests |
|
||||
| `tcc_write_req` |Req | Number of L2 cache write requests |
|
||||
| `tcc_atomic_req` |Req | Number of L2 cache atomic requests |
|
||||
| `tcc_hit` |Req | Number of L2 cache lookup-hits |
|
||||
| `tcc_miss` |Req | Number of L2 cache lookup-misses |
|
||||
| `tcc_writeback` |Req | Number of lines written back to main memory, including writebacks of dirty lines and uncached write/atomic requests |
|
||||
| `tcc_ea_wrreq` |Req | Total number of 32-byte and 64-byte write requests to EA |
|
||||
| `tcc_ea_wrreq_64B` |Req | Total number of 64-byte write requests to EA |
|
||||
| `tcc_ea_wr_uncached_32B` |Req | Number of 32-byte write/atomic going over the TC_EA_wrreq interface due to uncached traffic. Note that CC mtypes can produce uncached requests, and those are included in this. A 64-byte request is counted as 2. |
|
||||
| `tcc_ea_wrreq_stall` | Cycles | Number of cycles a write request was stalled |
|
||||
| `tcc_ea_wrreq_io_credit_stall[∗]` | Cycles | Number of cycles an EA write request runs out of IO credits |
|
||||
| `tcc_ea_wrreq_gmi_credit_stall[∗]` | Cycles | Number of cycles an EA write request runs out of GMI credits |
|
||||
| `tcc_ea_wrreq_dram_credit_stall` | Cycles | Number of cycles an EA write request runs out of DRAM credits |
|
||||
| `tcc_too_many_ea_wrreqs_stall[∗]` | Cycles | Number of cycles the L2 cache reaches maximum number of pending EA write requests |
|
||||
| `tcc_ea_wrreq_level` | Req | Accumulated number of L2 cache-EA write requests in flight |
|
||||
| `tcc_ea_atomic` | Req | Number of 32-byte and 64-byte atomic requests to EA |
|
||||
| `tcc_ea_atomic_level` | Req | Accumulated number of L2 cache-EA atomic requests in flight |
|
||||
| `tcc_ea_rdreq` | Req | Total number of 32-byte and 64-byte read requests to EA |
|
||||
| `tcc_ea_rdreq_32B` | Req | Total number of 32-byte read requests to EA |
|
||||
| `tcc_ea_rd_uncached_32B` | Req | Number of 32-byte L2 cache-EA read due to uncached traffic. A 64-byte request is counted as 2. |
|
||||
| `tcc_ea_rdreq_io_credit_stall[∗]` | Cycles | Number of cycles read request interface runs out of IO credits |
|
||||
| `tcc_ea_rdreq_gmi_credit_stall[∗]` | Cycles | Number of cycles read request interface runs out of GMI credits |
|
||||
| `tcc_ea_rdreq_dram_credit_stall` | Cycles | Number of cycles read request interface runs out of DRAM credits |
|
||||
| `tcc_ea_rdreq_level` | Req | Accumulated number of L2 cache-EA read requests in flight |
|
||||
| `tcc_ea_rdreq_dram` | Req | Number of 32-byte and 64-byte read requests to HBM |
|
||||
| `tcc_ea_wrreq_dram` | Req | Number of 32-byte and 64-byte write requests to HBM |
|
||||
| `tcc_tag_stall` | Cycles | Number of cycles the normal request pipeline in the tag was stalled for any reason |
|
||||
| `tcc_normal_writeback` | Req | Number of L2 cache normal writeback |
|
||||
| `tcc_all_tc_op_wb_writeback[∗]` | Req | Number of instruction-triggered writeback requests |
|
||||
| `tcc_normal_evict` | Req | Number of L2 cache normal evictions |
|
||||
| `tcc_all_tc_op_inv_evict[∗]` | Req | Number of instruction-triggered eviction requests |
|
||||
|
||||
## MI200 derived metrics list
|
||||
|
||||
### Derived metrics on MI200 GPUs
|
||||
|
||||
| Derived Metric | Description |
|
||||
| :----------------| -------------------------------------------------------------------------------------: |
|
||||
| `VFetchInsts` | The average number of vector fetch instructions from the video memory executed per work-item (affected by flow control). Excludes FLAT instructions that fetch from video memory |
|
||||
| `VWriteInsts` | The average number of vector write instructions to the video memory executed per work-item (affected by flow control). Excludes FLAT instructions that write to video memory |
|
||||
| `FlatVMemInsts` | The average number of FLAT instructions that read from or write to the video memory executed per work item (affected by flow control). Includes FLAT instructions that read from or write to scratch |
|
||||
| `LDSInsts` | The average number of LDS read/write instructions executed per work item (affected by flow control). Excludes FLAT instructions that read from or write to LDS |
|
||||
| `FlatLDSInsts` | The average number of FLAT instructions that read or write to LDS executed per work item (affected by flow control) |
|
||||
| `VALUUtilization` | The percentage of active vector ALU threads in a wave. A lower number can mean either more thread divergence in a wave or that the work-group size is not a multiple of 64. Value range: 0% (bad), 100% (ideal - no thread divergence) |
|
||||
| `VALUBusy` | The percentage of GPU time vector ALU instructions are processed. Value range: 0% (bad) to 100% (optimal) |
|
||||
| `SALUBusy` | The percentage of GPU time scalar ALU instructions are processed. Value range: 0% (bad) to 100% (optimal) |
|
||||
| `MemWrites32B` | The total number of effective 32B write transactions to the memory |
|
||||
| `L2CacheHit` | The percentage of fetch, write, atomic, and other instructions that hit the data in L2 cache. Value range: 0% (no hit) to 100% (optimal) |
|
||||
| `MemUnitStalled` | The percentage of GPU time the memory unit is stalled. Try reducing the number or size of fetches and writes if possible. Value range: 0% (optimal) to 100% (bad) |
|
||||
| `WriteUnitStalled` | The percentage of GPU time the write unit is stalled. Value range: 0% to 100% (bad) |
|
||||
| `LDSBankConflict` | The percentage of GPU time LDS is stalled by bank conflicts. Value range: 0% (optimal) to 100% (bad) |
|
||||
|
||||
## MI200 acronyms
|
||||
|
||||
| Abbreviation | Meaning |
|
||||
| :------------| --------------------------------------------------------------------------------: |
|
||||
| `ALU` | Arithmetic logic unit |
|
||||
| `Arb` | Arbiter |
|
||||
| `BF16` | Brain floating point – 16 |
|
||||
| `CC` | Coherently cached |
|
||||
| `CP` | Command processor |
|
||||
| `CPC` | Command processor – compute |
|
||||
| `CPF` | Command processor – fetcher |
|
||||
| `CS` | Compute shader |
|
||||
| `CSC` | Compute shader controller |
|
||||
| `CSn` | Compute Shader, the n-th pipe |
|
||||
| `CU` | Compute unit |
|
||||
| `DW` | 32-bit data word, DWORD |
|
||||
| `EA` | Efficiency arbiter |
|
||||
| `F16` | Half-precision floating point |
|
||||
| `FLAT` | FLAT instructions allow read/write/atomic access to a generic memory address pointer, which can resolve to any of the following physical memories:<br>• Global Memory<br>• Scratch (“private”)<br>• LDS (“shared”)<br>• Invalid – MEM_VIOL TrapStatus |
|
||||
| `FMA` | Fused multiply-add |
|
||||
| `GDS` | Global data share |
|
||||
| `GRBM` | Graphics register bus manager |
|
||||
| `HBM` | High bandwidth memory |
|
||||
| `Instr` | Instructions |
|
||||
| `IOP` | Integer operation |
|
||||
| `L2` | Level-2 cache |
|
||||
| `LDS` | Local data share |
|
||||
| `ME1` | Micro-engine, running packet processing firmware on CPC |
|
||||
| `MFMA` | Matrix fused multiply-add |
|
||||
| `NC` | Noncoherently cached |
|
||||
| `RW` | Coherently cached with write |
|
||||
| `SALU` | Scalar ALU |
|
||||
| `SGPR` | Scalar GPR |
|
||||
| `SIMD` | Single instruction multiple data |
|
||||
| `sL1D` | Scalar Level-1 data cache |
|
||||
| `SMEM` | Scalar memory |
|
||||
| `SPI` | Shader processor input |
|
||||
| `SQ` | Sequencer |
|
||||
| `TA` | Texture addressing unit |
|
||||
| `TC` | Texture cache |
|
||||
| `TCA` | Texture cache arbiter |
|
||||
| `TCC` | Texture cache per channel, known as L2 cache |
|
||||
| `TCIU` | Texture cache interface unit, command processor’s interface to memory system |
|
||||
| `TCP` | Texture cache per pipe, known as vector L1 cache |
|
||||
| `TCR` | Texture cache router |
|
||||
| `TD` | Texture data unit |
|
||||
| `UC` | Uncached |
|
||||
| `UTCL1` | Unified translation cache – level 1 |
|
||||
| `UTCL2` | Unified translation cache – level 2 |
|
||||
| `VALU` | Vector ALU |
|
||||
| `VGPR` | Vector GPR |
|
||||
| `vL1D` | Vector level 1 data cache |
|
||||
| `VMEM` | Vector memory |
|
||||
@@ -1,13 +1,6 @@
|
||||
---
|
||||
myst:
|
||||
html_meta:
|
||||
"description lang=en": "Learn about the AMD Instinct MI250 Series architecture."
|
||||
"keywords": "Instinct, MI250, microarchitecture, AMD, ROCm"
|
||||
---
|
||||
|
||||
# AMD Instinct™ MI250 microarchitecture
|
||||
|
||||
The microarchitecture of the AMD Instinct MI250 GPU is based on the
|
||||
The microarchitecture of the AMD Instinct MI250 accelerators is based on the
|
||||
AMD CDNA 2 architecture that targets compute applications such as HPC,
|
||||
artificial intelligence (AI), and machine learning (ML) and that run on
|
||||
everything from individual servers to the world’s largest exascale
|
||||
@@ -34,13 +27,13 @@ Units (CU). The MI250 GCD has 104 active CUs. Each compute unit is further
|
||||
subdivided into four SIMD units that process SIMD instructions of 16 data
|
||||
elements per instruction (for the FP64 data type). This enables the CU to
|
||||
process 64 work items (a so-called “wavefront”) at a peak clock frequency of 1.7
|
||||
GHz. Therefore, the theoretical maximum FP64 peak performance per GCD is 22.6
|
||||
TFLOPS for vector instructions. This equates to 45.3 TFLOPS for vector instructions for both GCDs together. The MI250 compute units also provide specialized
|
||||
GHz. Therefore, the theoretical maximum FP64 peak performance per GCD is 45.3
|
||||
TFLOPS for vector instructions. The MI250 compute units also provide specialized
|
||||
execution units (also called matrix cores), which are geared toward executing
|
||||
matrix operations like matrix-matrix multiplications. For FP64, the peak
|
||||
performance of these units amounts to 90.5 TFLOPS.
|
||||
|
||||

|
||||

|
||||
|
||||
```{list-table} Peak-performance capabilities of the MI250 OAM for different data types.
|
||||
:header-rows: 1
|
||||
@@ -84,9 +77,16 @@ performance of these units amounts to 90.5 TFLOPS.
|
||||
- 362.1
|
||||
```
|
||||
|
||||
The above table summarizes the aggregated peak performance of the AMD Instinct MI250 Open Compute Platform (OCP) Open Accelerator Modules (OAMs) and its two GCDs for different data types and execution units. The middle column lists the peak performance (number of data elements processed in a single instruction) of a single compute unit if a SIMD (or matrix) instruction is being retired in each clock cycle. The third column lists the theoretical peak performance of the OAM module. The theoretical aggregated peak memory bandwidth of the GPU is 3.2 TB/sec (1.6 TB/sec per GCD).
|
||||
The above table summarizes the aggregated peak performance of the AMD
|
||||
Instinct MI250 OCP Open Accelerator Modules (OAM, OCP is short for Open Compute
|
||||
Platform) and its two GCDs for different data types and execution units. The
|
||||
middle column lists the peak performance (number of data elements processed in a
|
||||
single instruction) of a single compute unit if a SIMD (or matrix) instruction
|
||||
is being retired in each clock cycle. The third column lists the theoretical
|
||||
peak performance of the OAM module. The theoretical aggregated peak memory
|
||||
bandwidth of the GPU is 3.2 TB/sec (1.6 TB/sec per GCD).
|
||||
|
||||

|
||||

|
||||
|
||||
The following image shows the block diagram of an OAM package that consists
|
||||
of two GCDs, each of which constitutes one GPU device in the system. The two
|
||||
@@ -98,18 +98,18 @@ between the two GCDs of an OAM, or a bidirectional peak transfer bandwidth of
|
||||
## Node-level architecture
|
||||
|
||||
The following image shows the node-level architecture of a system that is
|
||||
based on the AMD Instinct MI250 GPU. The MI250 OAMs attach to the host
|
||||
based on the AMD Instinct MI250 accelerator. The MI250 OAMs attach to the host
|
||||
system via PCIe Gen 4 x16 links (yellow lines). Each GCD maintains its own PCIe
|
||||
x16 link to the host part of the system. Depending on the server platform, the
|
||||
GCD can attach to the AMD EPYC processor directly or via an optional PCIe switch
|
||||
. Note that some platforms may offer an x8 interface to the GCDs, which reduces
|
||||
the available host-to-GPU bandwidth.
|
||||
|
||||

|
||||

|
||||
|
||||
The preceding image shows the node-level architecture of a system with AMD
|
||||
EPYC processors in a dual-socket configuration and four AMD Instinct MI250
|
||||
GPUs. The MI250 OAMs attach to the host processors system via PCIe Gen 4
|
||||
accelerators. The MI250 OAMs attach to the host processors system via PCIe Gen 4
|
||||
x16 links (yellow lines). Depending on the system design, a PCIe switch may
|
||||
exist to make more PCIe lanes available for additional components like network
|
||||
interfaces and/or storage devices. Each GCD maintains its own PCIe x16 link to
|
||||
|
||||
@@ -1,757 +0,0 @@
|
||||
.. meta::
|
||||
:description: MI300 and MI200 Series performance counters and metrics
|
||||
:keywords: MI300, MI200, performance counters, command processor counters
|
||||
|
||||
***************************************************************************************************
|
||||
MI300 and MI200 Series performance counters and metrics
|
||||
***************************************************************************************************
|
||||
|
||||
This document lists and describes the hardware performance counters and derived metrics available
|
||||
for the AMD Instinct™ MI300 and MI200 GPU. You can also access this information using the
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:how-to/using-rocprofv3>`.
|
||||
|
||||
MI300 and MI200 Series performance counters
|
||||
===============================================================
|
||||
|
||||
Series performance counters include the following categories:
|
||||
|
||||
* :ref:`command-processor-counters`
|
||||
* :ref:`graphics-register-bus-manager-counters`
|
||||
* :ref:`spi-counters`
|
||||
* :ref:`compute-unit-counters`
|
||||
* :ref:`l1i-and-sl1d-cache-counters`
|
||||
* :ref:`vector-l1-cache-subsystem-counters`
|
||||
* :ref:`l2-cache-access-counters`
|
||||
|
||||
The following sections provide additional details for each category.
|
||||
|
||||
.. note::
|
||||
|
||||
Preliminary validation of all MI300 and MI200 Series performance counters is in progress. Those with
|
||||
an asterisk (*) require further evaluation.
|
||||
|
||||
.. _command-processor-counters:
|
||||
|
||||
Command processor counters
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Command processor counters are further classified into command processor-fetcher and command
|
||||
processor-compute.
|
||||
|
||||
Command processor-fetcher counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``CPF_CMP_UTCL1_STALL_ON_TRANSLATION``", "Cycles", "Number of cycles one of the compute unified translation caches (L1) is stalled waiting on translation"
|
||||
"``CPF_CPF_STAT_BUSY``", "Cycles", "Number of cycles command processor-fetcher is busy"
|
||||
"``CPF_CPF_STAT_IDLE``", "Cycles", "Number of cycles command processor-fetcher is idle"
|
||||
"``CPF_CPF_STAT_STALL``", "Cycles", "Number of cycles command processor-fetcher is stalled"
|
||||
"``CPF_CPF_TCIU_BUSY``", "Cycles", "Number of cycles command processor-fetcher texture cache interface unit interface is busy"
|
||||
"``CPF_CPF_TCIU_IDLE``", "Cycles", "Number of cycles command processor-fetcher texture cache interface unit interface is idle"
|
||||
"``CPF_CPF_TCIU_STALL``", "Cycles", "Number of cycles command processor-fetcher texture cache interface unit interface is stalled waiting on free tags"
|
||||
|
||||
The texture cache interface unit is the interface between the command processor and the memory
|
||||
system.
|
||||
|
||||
Command processor-compute counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``CPC_ME1_BUSY_FOR_PACKET_DECODE``", "Cycles", "Number of cycles command processor-compute micro engine is busy decoding packets"
|
||||
"``CPC_UTCL1_STALL_ON_TRANSLATION``", "Cycles", "Number of cycles one of the unified translation caches (L1) is stalled waiting on translation"
|
||||
"``CPC_CPC_STAT_BUSY``", "Cycles", "Number of cycles command processor-compute is busy"
|
||||
"``CPC_CPC_STAT_IDLE``", "Cycles", "Number of cycles command processor-compute is idle"
|
||||
"``CPC_CPC_STAT_STALL``", "Cycles", "Number of cycles command processor-compute is stalled"
|
||||
"``CPC_CPC_TCIU_BUSY``", "Cycles", "Number of cycles command processor-compute texture cache interface unit interface is busy"
|
||||
"``CPC_CPC_TCIU_IDLE``", "Cycles", "Number of cycles command processor-compute texture cache interface unit interface is idle"
|
||||
"``CPC_CPC_UTCL2IU_BUSY``", "Cycles", "Number of cycles command processor-compute unified translation cache (L2) interface is busy"
|
||||
"``CPC_CPC_UTCL2IU_IDLE``", "Cycles", "Number of cycles command processor-compute unified translation cache (L2) interface is idle"
|
||||
"``CPC_CPC_UTCL2IU_STALL``", "Cycles", "Number of cycles command processor-compute unified translation cache (L2) interface is stalled"
|
||||
"``CPC_ME1_DC0_SPI_BUSY``", "Cycles", "Number of cycles command processor-compute micro engine processor is busy"
|
||||
|
||||
The micro engine runs packet-processing firmware on the command processor-compute counter.
|
||||
|
||||
.. _graphics-register-bus-manager-counters:
|
||||
|
||||
Graphics register bus manager counters
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``GRBM_COUNT``", "Cycles","Number of free-running GPU cycles"
|
||||
"``GRBM_GUI_ACTIVE``", "Cycles", "Number of GPU active cycles"
|
||||
"``GRBM_CP_BUSY``", "Cycles", "Number of cycles any of the command processor blocks are busy"
|
||||
"``GRBM_SPI_BUSY``", "Cycles", "Number of cycles any of the shader processor input is busy in the shader engines"
|
||||
"``GRBM_TA_BUSY``", "Cycles", "Number of cycles any of the texture addressing unit is busy in the shader engines"
|
||||
"``GRBM_TC_BUSY``", "Cycles", "Number of cycles any of the texture cache blocks are busy"
|
||||
"``GRBM_CPC_BUSY``", "Cycles", "Number of cycles the command processor-compute is busy"
|
||||
"``GRBM_CPF_BUSY``", "Cycles", "Number of cycles the command processor-fetcher is busy"
|
||||
"``GRBM_UTCL2_BUSY``", "Cycles", "Number of cycles the unified translation cache (Level 2 [L2]) block is busy"
|
||||
"``GRBM_EA_BUSY``", "Cycles", "Number of cycles the efficiency arbiter block is busy"
|
||||
|
||||
Texture cache blocks include:
|
||||
|
||||
* Texture cache arbiter
|
||||
* Texture cache per pipe, also known as vector Level 1 (L1) cache
|
||||
* Texture cache per channel, also known as known as L2 cache
|
||||
* Texture cache interface
|
||||
|
||||
.. _spi-counters:
|
||||
|
||||
Shader processor input counters
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SPI_CSN_BUSY``", "Cycles", "Number of cycles with outstanding waves"
|
||||
"``SPI_CSN_WINDOW_VALID``", "Cycles", "Number of cycles enabled by ``perfcounter_start`` event"
|
||||
"``SPI_CSN_NUM_THREADGROUPS``", "Workgroups", "Number of dispatched workgroups"
|
||||
"``SPI_CSN_WAVE``", "Wavefronts", "Number of dispatched wavefronts"
|
||||
"``SPI_RA_REQ_NO_ALLOC``", "Cycles", "Number of arbiter cycles with requests but no allocation"
|
||||
"``SPI_RA_REQ_NO_ALLOC_CSN``", "Cycles", "Number of arbiter cycles with compute shader (n\ :sup:`th` pipe) requests but no compute shader (n\ :sup:`th` pipe) allocation"
|
||||
"``SPI_RA_RES_STALL_CSN``", "Cycles", "Number of arbiter stall cycles due to shortage of compute shader (n\ :sup:`th` pipe) pipeline slots"
|
||||
"``SPI_RA_TMP_STALL_CSN``", "Cycles", "Number of stall cycles due to shortage of temp space"
|
||||
"``SPI_RA_WAVE_SIMD_FULL_CSN``", "SIMD-cycles", "Accumulated number of single instruction, multiple data (SIMD) per cycle affected by shortage of wave slots for compute shader (n\ :sup:`th` pipe) wave dispatch"
|
||||
"``SPI_RA_VGPR_SIMD_FULL_CSN``", "SIMD-cycles", "Accumulated number of SIMDs per cycle affected by shortage of vector general-purpose register (VGPR) slots for compute shader (n\ :sup:`th` pipe) wave dispatch"
|
||||
"``SPI_RA_SGPR_SIMD_FULL_CSN``", "SIMD-cycles", "Accumulated number of SIMDs per cycle affected by shortage of scalar general-purpose register (SGPR) slots for compute shader (n\ :sup:`th` pipe) wave dispatch"
|
||||
"``SPI_RA_LDS_CU_FULL_CSN``", "CU", "Number of compute units affected by shortage of local data share (LDS) space for compute shader (n\ :sup:`th` pipe) wave dispatch"
|
||||
"``SPI_RA_BAR_CU_FULL_CSN``", "CU", "Number of compute units with compute shader (n\ :sup:`th` pipe) waves waiting at a BARRIER"
|
||||
"``SPI_RA_BULKY_CU_FULL_CSN``", "CU", "Number of compute units with compute shader (n\ :sup:`th` pipe) waves waiting for BULKY resource"
|
||||
"``SPI_RA_TGLIM_CU_FULL_CSN``", "Cycles", "Number of compute shader (n\ :sup:`th` pipe) wave stall cycles due to restriction of ``tg_limit`` for thread group size"
|
||||
"``SPI_RA_WVLIM_STALL_CSN``", "Cycles", "Number of cycles compute shader (n\ :sup:`th` pipe) is stalled due to ``WAVE_LIMIT``"
|
||||
"``SPI_VWC_CSC_WR``", "Qcycles", "Number of quad-cycles taken to initialize VGPRs when launching waves"
|
||||
"``SPI_SWC_CSC_WR``", "Qcycles", "Number of quad-cycles taken to initialize SGPRs when launching waves"
|
||||
|
||||
.. _compute-unit-counters:
|
||||
|
||||
Compute unit counters
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
The compute unit counters are further classified into instruction mix, matrix fused multiply-add (FMA)
|
||||
operation counters, level counters, wavefront counters, wavefront cycle counters, and LDS counters.
|
||||
|
||||
Instruction mix
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQ_INSTS``", "Instr", "Number of instructions issued"
|
||||
"``SQ_INSTS_VALU``", "Instr", "Number of vector arithmetic logic unit (VALU) instructions including matrix FMA issued"
|
||||
"``SQ_INSTS_VALU_ADD_F16``", "Instr", "Number of VALU half-precision floating-point (F16) ``ADD`` or ``SUB`` instructions issued"
|
||||
"``SQ_INSTS_VALU_MUL_F16``", "Instr", "Number of VALU F16 Multiply instructions issued"
|
||||
"``SQ_INSTS_VALU_FMA_F16``", "Instr", "Number of VALU F16 FMA or multiply-add instructions issued"
|
||||
"``SQ_INSTS_VALU_TRANS_F16``", "Instr", "Number of VALU F16 Transcendental instructions issued"
|
||||
"``SQ_INSTS_VALU_ADD_F32``", "Instr", "Number of VALU full-precision floating-point (F32) ``ADD`` or ``SUB`` instructions issued"
|
||||
"``SQ_INSTS_VALU_MUL_F32``", "Instr", "Number of VALU F32 Multiply instructions issued"
|
||||
"``SQ_INSTS_VALU_FMA_F32``", "Instr", "Number of VALU F32 FMAor multiply-add instructions issued"
|
||||
"``SQ_INSTS_VALU_TRANS_F32``", "Instr", "Number of VALU F32 Transcendental instructions issued"
|
||||
"``SQ_INSTS_VALU_ADD_F64``", "Instr", "Number of VALU F64 ``ADD`` or ``SUB`` instructions issued"
|
||||
"``SQ_INSTS_VALU_MUL_F64``", "Instr", "Number of VALU F64 Multiply instructions issued"
|
||||
"``SQ_INSTS_VALU_FMA_F64``", "Instr", "Number of VALU F64 FMA or multiply-add instructions issued"
|
||||
"``SQ_INSTS_VALU_TRANS_F64``", "Instr", "Number of VALU F64 Transcendental instructions issued"
|
||||
"``SQ_INSTS_VALU_INT32``", "Instr", "Number of VALU 32-bit integer instructions (signed or unsigned) issued"
|
||||
"``SQ_INSTS_VALU_INT64``", "Instr", "Number of VALU 64-bit integer instructions (signed or unsigned) issued"
|
||||
"``SQ_INSTS_VALU_CVT``", "Instr", "Number of VALU Conversion instructions issued"
|
||||
"``SQ_INSTS_VALU_MFMA_I8``", "Instr", "Number of 8-bit Integer matrix FMA instructions issued"
|
||||
"``SQ_INSTS_VALU_MFMA_F16``", "Instr", "Number of F16 matrix FMA instructions issued"
|
||||
"``SQ_INSTS_VALU_MFMA_F32``", "Instr", "Number of F32 matrix FMA instructions issued"
|
||||
"``SQ_INSTS_VALU_MFMA_F64``", "Instr", "Number of F64 matrix FMA instructions issued"
|
||||
"``SQ_INSTS_MFMA``", "Instr", "Number of matrix FMA instructions issued"
|
||||
"``SQ_INSTS_VMEM_WR``", "Instr", "Number of vector memory write instructions (including flat) issued"
|
||||
"``SQ_INSTS_VMEM_RD``", "Instr", "Number of vector memory read instructions (including flat) issued"
|
||||
"``SQ_INSTS_VMEM``", "Instr", "Number of vector memory instructions issued, including both flat and buffer instructions"
|
||||
"``SQ_INSTS_SALU``", "Instr", "Number of scalar arithmetic logic unit (SALU) instructions issued"
|
||||
"``SQ_INSTS_SMEM``", "Instr", "Number of scalar memory instructions issued"
|
||||
"``SQ_INSTS_SMEM_NORM``", "Instr", "Number of scalar memory instructions normalized to match ``smem_level`` issued"
|
||||
"``SQ_INSTS_FLAT``", "Instr", "Number of flat instructions issued"
|
||||
"``SQ_INSTS_FLAT_LDS_ONLY``", "Instr", "**MI200 Series only** Number of FLAT instructions that read/write only from/to LDS issued. Works only if ``EARLY_TA_DONE`` is enabled."
|
||||
"``SQ_INSTS_LDS``", "Instr", "Number of LDS instructions issued **(MI200: includes flat; MI300: does not include flat)**"
|
||||
"``SQ_INSTS_GDS``", "Instr", "Number of global data share instructions issued"
|
||||
"``SQ_INSTS_EXP_GDS``", "Instr", "Number of EXP and global data share instructions excluding skipped export instructions issued"
|
||||
"``SQ_INSTS_BRANCH``", "Instr", "Number of branch instructions issued"
|
||||
"``SQ_INSTS_SENDMSG``", "Instr", "Number of ``SENDMSG`` instructions including ``s_endpgm`` issued"
|
||||
"``SQ_INSTS_VSKIPPED``", "Instr", "Number of vector instructions skipped"
|
||||
|
||||
Flat instructions allow read, write, and atomic access to a generic memory address pointer that can
|
||||
resolve to any of the following physical memories:
|
||||
|
||||
* Global Memory
|
||||
* Scratch ("private")
|
||||
* LDS ("shared")
|
||||
* Invalid - ``MEM_VIOL`` TrapStatus
|
||||
|
||||
Matrix fused multiply-add operation counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQ_INSTS_VALU_MFMA_MOPS_I8``", "IOP", "Number of 8-bit integer matrix FMA ops in the unit of 512"
|
||||
"``SQ_INSTS_VALU_MFMA_MOPS_F16``", "FLOP", "Number of F16 floating matrix FMA ops in the unit of 512"
|
||||
"``SQ_INSTS_VALU_MFMA_MOPS_BF16``", "FLOP", "Number of BF16 floating matrix FMA ops in the unit of 512"
|
||||
"``SQ_INSTS_VALU_MFMA_MOPS_F32``", "FLOP", "Number of F32 floating matrix FMA ops in the unit of 512"
|
||||
"``SQ_INSTS_VALU_MFMA_MOPS_F64``", "FLOP", "Number of F64 floating matrix FMA ops in the unit of 512"
|
||||
|
||||
Level counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. note::
|
||||
|
||||
All level counters must be followed by ``SQ_ACCUM_PREV_HIRES`` counter to measure average latency.
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQ_ACCUM_PREV``", "Count", "Accumulated counter sample value where accumulation takes place once every four cycles"
|
||||
"``SQ_ACCUM_PREV_HIRES``", "Count", "Accumulated counter sample value where accumulation takes place once every cycle"
|
||||
"``SQ_LEVEL_WAVES``", "Waves", "Number of inflight waves"
|
||||
"``SQ_INST_LEVEL_VMEM``", "Instr", "Number of inflight vector memory (including flat) instructions"
|
||||
"``SQ_INST_LEVEL_SMEM``", "Instr", "Number of inflight scalar memory instructions"
|
||||
"``SQ_INST_LEVEL_LDS``", "Instr", "Number of inflight LDS (including flat) instructions"
|
||||
"``SQ_IFETCH_LEVEL``", "Instr", "Number of inflight instruction fetch requests from the cache"
|
||||
|
||||
Use the following formulae to calculate latencies:
|
||||
|
||||
* Vector memory latency = ``SQ_ACCUM_PREV_HIRES`` divided by ``SQ_INSTS_VMEM``
|
||||
* Wave latency = ``SQ_ACCUM_PREV_HIRES`` divided by ``SQ_WAVE``
|
||||
* LDS latency = ``SQ_ACCUM_PREV_HIRES`` divided by ``SQ_INSTS_LDS``
|
||||
* Scalar memory latency = ``SQ_ACCUM_PREV_HIRES`` divided by ``SQ_INSTS_SMEM_NORM``
|
||||
* Instruction fetch latency = ``SQ_ACCUM_PREV_HIRES`` divided by ``SQ_IFETCH``
|
||||
|
||||
Wavefront counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQ_WAVES``", "Waves", "Number of wavefronts dispatched to sequencers, including both new and restored wavefronts"
|
||||
"``SQ_WAVES_SAVED``", "Waves", "Number of context-saved waves"
|
||||
"``SQ_WAVES_RESTORED``", "Waves", "Number of context-restored waves sent to sequencers"
|
||||
"``SQ_WAVES_EQ_64``", "Waves", "Number of wavefronts with exactly 64 active threads sent to sequencers"
|
||||
"``SQ_WAVES_LT_64``", "Waves", "Number of wavefronts with less than 64 active threads sent to sequencers"
|
||||
"``SQ_WAVES_LT_48``", "Waves", "Number of wavefronts with less than 48 active threads sent to sequencers"
|
||||
"``SQ_WAVES_LT_32``", "Waves", "Number of wavefronts with less than 32 active threads sent to sequencers"
|
||||
"``SQ_WAVES_LT_16``", "Waves", "Number of wavefronts with less than 16 active threads sent to sequencers"
|
||||
|
||||
Wavefront cycle counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQ_CYCLES``", "Cycles", "Clock cycles"
|
||||
"``SQ_BUSY_CYCLES``", "Cycles", "Number of cycles while sequencers reports it to be busy"
|
||||
"``SQ_BUSY_CU_CYCLES``", "Qcycles", "Number of quad-cycles each compute unit is busy"
|
||||
"``SQ_VALU_MFMA_BUSY_CYCLES``", "Cycles", "Number of cycles the matrix FMA arithmetic logic unit (ALU) is busy"
|
||||
"``SQ_WAVE_CYCLES``", "Qcycles", "Number of quad-cycles spent by waves in the compute units"
|
||||
"``SQ_WAIT_ANY``", "Qcycles", "Number of quad-cycles spent waiting for anything"
|
||||
"``SQ_WAIT_INST_ANY``", "Qcycles", "Number of quad-cycles spent waiting for any instruction to be issued"
|
||||
"``SQ_ACTIVE_INST_ANY``", "Qcycles", "Number of quad-cycles spent by each wave to work on an instruction"
|
||||
"``SQ_ACTIVE_INST_VMEM``", "Qcycles", "Number of quad-cycles spent by the sequencer instruction arbiter to work on a vector memory instruction"
|
||||
"``SQ_ACTIVE_INST_LDS``", "Qcycles", "Number of quad-cycles spent by the sequencer instruction arbiter to work on an LDS instruction"
|
||||
"``SQ_ACTIVE_INST_VALU``", "Qcycles", "Number of quad-cycles spent by the sequencer instruction arbiter to work on a VALU instruction"
|
||||
"``SQ_ACTIVE_INST_SCA``", "Qcycles", "Number of quad-cycles spent by the sequencer instruction arbiter to work on a SALU or scalar memory instruction"
|
||||
"``SQ_ACTIVE_INST_EXP_GDS``", "Qcycles", "Number of quad-cycles spent by the sequencer instruction arbiter to work on an ``EXPORT`` or ``GDS`` instruction"
|
||||
"``SQ_ACTIVE_INST_MISC``", "Qcycles", "Number of quad-cycles spent by the sequencer instruction arbiter to work on a ``BRANCH`` or ``SENDMSG`` instruction"
|
||||
"``SQ_ACTIVE_INST_FLAT``", "Qcycles", "Number of quad-cycles spent by the sequencer instruction arbiter to work on a flat instruction"
|
||||
"``SQ_INST_CYCLES_VMEM_WR``", "Qcycles", "Number of quad-cycles spent to send addr and cmd data for vector memory write instructions"
|
||||
"``SQ_INST_CYCLES_VMEM_RD``", "Qcycles", "Number of quad-cycles spent to send addr and cmd data for vector memory read instructions"
|
||||
"``SQ_INST_CYCLES_SMEM``", "Qcycles", "Number of quad-cycles spent to execute scalar memory reads"
|
||||
"``SQ_INST_CYCLES_SALU``", "Qcycles", "Number of quad-cycles spent to execute non-memory read scalar operations"
|
||||
"``SQ_THREAD_CYCLES_VALU``", "Qcycles", "Number of quad-cycles spent to execute VALU operations on active threads"
|
||||
"``SQ_WAIT_INST_LDS``", "Qcycles", "Number of quad-cycles spent waiting for LDS instruction to be issued"
|
||||
|
||||
``SQ_THREAD_CYCLES_VALU`` is similar to ``INST_CYCLES_VALU``, but it's multiplied by the number of
|
||||
active threads.
|
||||
|
||||
LDS counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQ_LDS_ATOMIC_RETURN``", "Cycles", "Number of atomic return cycles in LDS"
|
||||
"``SQ_LDS_BANK_CONFLICT``", "Cycles", "Number of cycles LDS is stalled by bank conflicts"
|
||||
"``SQ_LDS_ADDR_CONFLICT``", "Cycles", "Number of cycles LDS is stalled by address conflicts"
|
||||
"``SQ_LDS_UNALIGNED_STALL``", "Cycles", "Number of cycles LDS is stalled processing flat unaligned load or store operations"
|
||||
"``SQ_LDS_MEM_VIOLATIONS``", "Count", "Number of threads that have a memory violation in the LDS"
|
||||
"``SQ_LDS_IDX_ACTIVE``", "Cycles", "Number of cycles LDS is used for indexed operations"
|
||||
|
||||
Miscellaneous counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQ_IFETCH``", "Count", "Number of instruction fetch requests from L1i, in 32-byte width"
|
||||
"``SQ_ITEMS``", "Threads", "Number of valid items per wave"
|
||||
|
||||
.. _l1i-and-sl1d-cache-counters:
|
||||
|
||||
L1 instruction cache (L1i) and scalar L1 data cache (L1d) counters
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition"
|
||||
|
||||
"``SQC_ICACHE_REQ``", "Req", "Number of L1 instruction (L1i) cache requests"
|
||||
"``SQC_ICACHE_HITS``", "Count", "Number of L1i cache hits"
|
||||
"``SQC_ICACHE_MISSES``", "Count", "Number of non-duplicate L1i cache misses including uncached requests"
|
||||
"``SQC_ICACHE_MISSES_DUPLICATE``", "Count", "Number of duplicate L1i cache misses whose previous lookup miss on the same cache line is not fulfilled yet"
|
||||
"``SQC_DCACHE_REQ``", "Req", "Number of scalar L1d requests"
|
||||
"``SQC_DCACHE_INPUT_VALID_READYB``", "Cycles", "Number of cycles while sequencer input is valid but scalar L1d is not ready"
|
||||
"``SQC_DCACHE_HITS``", "Count", "Number of scalar L1d hits"
|
||||
"``SQC_DCACHE_MISSES``", "Count", "Number of non-duplicate scalar L1d misses including uncached requests"
|
||||
"``SQC_DCACHE_MISSES_DUPLICATE``", "Count", "Number of duplicate scalar L1d misses"
|
||||
"``SQC_DCACHE_REQ_READ_1``", "Req", "Number of constant cache read requests in a single 32-bit data word"
|
||||
"``SQC_DCACHE_REQ_READ_2``", "Req", "Number of constant cache read requests in two 32-bit data words"
|
||||
"``SQC_DCACHE_REQ_READ_4``", "Req", "Number of constant cache read requests in four 32-bit data words"
|
||||
"``SQC_DCACHE_REQ_READ_8``", "Req", "Number of constant cache read requests in eight 32-bit data words"
|
||||
"``SQC_DCACHE_REQ_READ_16``", "Req", "Number of constant cache read requests in 16 32-bit data words"
|
||||
"``SQC_DCACHE_ATOMIC``", "Req", "Number of atomic requests"
|
||||
"``SQC_TC_REQ``", "Req", "Number of texture cache requests that were issued by instruction and constant caches"
|
||||
"``SQC_TC_INST_REQ``", "Req", "Number of instruction requests to the L2 cache"
|
||||
"``SQC_TC_DATA_READ_REQ``", "Req", "Number of data Read requests to the L2 cache"
|
||||
"``SQC_TC_DATA_WRITE_REQ``", "Req", "Number of data write requests to the L2 cache"
|
||||
"``SQC_TC_DATA_ATOMIC_REQ``", "Req", "Number of data atomic requests to the L2 cache"
|
||||
"``SQC_TC_STALL``", "Cycles", "Number of cycles while the valid requests to the L2 cache are stalled"
|
||||
|
||||
.. _vector-l1-cache-subsystem-counters:
|
||||
|
||||
Vector L1 cache subsystem counters
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
The vector L1 cache subsystem counters are further classified into texture addressing unit, texture data
|
||||
unit, vector L1d or texture cache per pipe, and texture cache arbiter counters.
|
||||
|
||||
Texture addressing unit counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition", "Value range for ``n``"
|
||||
|
||||
"``TA_TA_BUSY[n]``", "Cycles", "Texture addressing unit busy cycles", "0-15"
|
||||
"``TA_TOTAL_WAVEFRONTS[n]``", "Instr", "Number of wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_BUFFER_WAVEFRONTS[n]``", "Instr", "Number of buffer wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_BUFFER_READ_WAVEFRONTS[n]``", "Instr", "Number of buffer read wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_BUFFER_WRITE_WAVEFRONTS[n]``", "Instr", "Number of buffer write wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_BUFFER_ATOMIC_WAVEFRONTS[n]``", "Instr", "Number of buffer atomic wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_BUFFER_TOTAL_CYCLES[n]``", "Cycles", "Number of buffer cycles (including read and write) issued to texture cache", "0-15"
|
||||
"``TA_BUFFER_COALESCED_READ_CYCLES[n]``", "Cycles", "Number of coalesced buffer read cycles issued to texture cache", "0-15"
|
||||
"``TA_BUFFER_COALESCED_WRITE_CYCLES[n]``", "Cycles", "Number of coalesced buffer write cycles issued to texture cache", "0-15"
|
||||
"``TA_ADDR_STALLED_BY_TC_CYCLES[n]``", "Cycles", "Number of cycles texture addressing unit address path is stalled by texture cache", "0-15"
|
||||
"``TA_DATA_STALLED_BY_TC_CYCLES[n]``", "Cycles", "Number of cycles texture addressing unit data path is stalled by texture cache", "0-15"
|
||||
"``TA_ADDR_STALLED_BY_TD_CYCLES[n]``", "Cycles", "Number of cycles texture addressing unit address path is stalled by texture data unit", "0-15"
|
||||
"``TA_FLAT_WAVEFRONTS[n]``", "Instr", "Number of flat opcode wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_FLAT_READ_WAVEFRONTS[n]``", "Instr", "Number of flat opcode read wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_FLAT_WRITE_WAVEFRONTS[n]``", "Instr", "Number of flat opcode write wavefronts processed by texture addressing unit", "0-15"
|
||||
"``TA_FLAT_ATOMIC_WAVEFRONTS[n]``", "Instr", "Number of flat opcode atomic wavefronts processed by texture addressing unit", "0-15"
|
||||
|
||||
Texture data unit counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition", "Value range for ``n``"
|
||||
|
||||
"``TD_TD_BUSY[n]``", "Cycle", "Texture data unit busy cycles while it is processing or waiting for data", "0-15"
|
||||
"``TD_TC_STALL[n]``", "Cycle", "Number of cycles texture data unit is stalled waiting for texture cache data", "0-15"
|
||||
"``TD_SPI_STALL[n]``", "Cycle", "Number of cycles texture data unit is stalled by shader processor input", "0-15"
|
||||
"``TD_LOAD_WAVEFRONT[n]``", "Instr", "Number of wavefront instructions (read, write, atomic)", "0-15"
|
||||
"``TD_STORE_WAVEFRONT[n]``", "Instr", "Number of write wavefront instructions", "0-15"
|
||||
"``TD_ATOMIC_WAVEFRONT[n]``", "Instr", "Number of atomic wavefront instructions", "0-15"
|
||||
"``TD_COALESCABLE_WAVEFRONT[n]``", "Instr", "Number of coalescable wavefronts according to texture addressing unit", "0-15"
|
||||
|
||||
Texture cache per pipe counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition", "Value range for ``n``"
|
||||
|
||||
"``TCP_GATE_EN1[n]``", "Cycles", "Number of cycles vector L1d interface clocks are turned on", "0-15"
|
||||
"``TCP_GATE_EN2[n]``", "Cycles", "Number of cycles vector L1d core clocks are turned on", "0-15"
|
||||
"``TCP_TD_TCP_STALL_CYCLES[n]``", "Cycles", "Number of cycles texture data unit stalls vector L1d", "0-15"
|
||||
"``TCP_TCR_TCP_STALL_CYCLES[n]``", "Cycles", "Number of cycles texture cache router stalls vector L1d", "0-15"
|
||||
"``TCP_READ_TAGCONFLICT_STALL_CYCLES[n]``", "Cycles", "Number of cycles tag RAM conflict stalls on a read", "0-15"
|
||||
"``TCP_WRITE_TAGCONFLICT_STALL_CYCLES[n]``", "Cycles", "Number of cycles tag RAM conflict stalls on a write", "0-15"
|
||||
"``TCP_ATOMIC_TAGCONFLICT_STALL_CYCLES[n]``", "Cycles", "Number of cycles tag RAM conflict stalls on an atomic", "0-15"
|
||||
"``TCP_PENDING_STALL_CYCLES[n]``", "Cycles", "Number of cycles vector L1d is stalled due to data pending from L2 Cache", "0-15"
|
||||
"``TCP_TCP_TA_DATA_STALL_CYCLES``", "Cycles", "Number of cycles texture cache per pipe stalls texture addressing unit data interface", "NA"
|
||||
"``TCP_TA_TCP_STATE_READ[n]``", "Req", "Number of state reads", "0-15"
|
||||
"``TCP_VOLATILE[n]``", "Req", "Number of L1 volatile pixels or buffers from texture addressing unit", "0-15"
|
||||
"``TCP_TOTAL_ACCESSES[n]``", "Req", "Number of vector L1d accesses. Equals ``TCP_PERF_SEL_TOTAL_READ`+`TCP_PERF_SEL_TOTAL_NONREAD``", "0-15"
|
||||
"``TCP_TOTAL_READ[n]``", "Req", "Number of vector L1d read accesses", "0-15"
|
||||
"``TCP_TOTAL_WRITE[n]``", "Req", "Number of vector L1d write accesses", "0-15"
|
||||
"``TCP_TOTAL_ATOMIC_WITH_RET[n]``", "Req", "Number of vector L1d atomic requests with return", "0-15"
|
||||
"``TCP_TOTAL_ATOMIC_WITHOUT_RET[n]``", "Req", "Number of vector L1d atomic without return", "0-15"
|
||||
"``TCP_TOTAL_WRITEBACK_INVALIDATES[n]``", "Count", "Total number of vector L1d writebacks and invalidates", "0-15"
|
||||
"``TCP_UTCL1_REQUEST[n]``", "Req", "Number of address translation requests to unified translation cache (L1)", "0-15"
|
||||
"``TCP_UTCL1_TRANSLATION_HIT[n]``", "Req", "Number of unified translation cache (L1) translation hits", "0-15"
|
||||
"``TCP_UTCL1_TRANSLATION_MISS[n]``", "Req", "Number of unified translation cache (L1) translation misses", "0-15"
|
||||
"``TCP_UTCL1_PERMISSION_MISS[n]``", "Req", "Number of unified translation cache (L1) permission misses", "0-15"
|
||||
"``TCP_TOTAL_CACHE_ACCESSES[n]``", "Req", "Number of vector L1d cache accesses including hits and misses", "0-15"
|
||||
"``TCP_TCP_LATENCY[n]``", "Cycles", "**MI200 Series only** Accumulated wave access latency to vL1D over all wavefronts", "0-15"
|
||||
"``TCP_TCC_READ_REQ_LATENCY[n]``", "Cycles", "**MI200 Series only** Total vL1D to L2 request latency over all wavefronts for reads and atomics with return", "0-15"
|
||||
"``TCP_TCC_WRITE_REQ_LATENCY[n]``", "Cycles", "**MI200 Series only** Total vL1D to L2 request latency over all wavefronts for writes and atomics without return", "0-15"
|
||||
"``TCP_TCC_READ_REQ[n]``", "Req", "Number of read requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_WRITE_REQ[n]``", "Req", "Number of write requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_ATOMIC_WITH_RET_REQ[n]``", "Req", "Number of atomic requests to L2 cache with return", "0-15"
|
||||
"``TCP_TCC_ATOMIC_WITHOUT_RET_REQ[n]``", "Req", "Number of atomic requests to L2 cache without return", "0-15"
|
||||
"``TCP_TCC_NC_READ_REQ[n]``", "Req", "Number of non-coherently cached read requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_UC_READ_REQ[n]``", "Req", "Number of uncached read requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_CC_READ_REQ[n]``", "Req", "Number of coherently cached read requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_RW_READ_REQ[n]``", "Req", "Number of coherently cached with write read requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_NC_WRITE_REQ[n]``", "Req", "Number of non-coherently cached write requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_UC_WRITE_REQ[n]``", "Req", "Number of uncached write requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_CC_WRITE_REQ[n]``", "Req", "Number of coherently cached write requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_RW_WRITE_REQ[n]``", "Req", "Number of coherently cached with write write requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_NC_ATOMIC_REQ[n]``", "Req", "Number of non-coherently cached atomic requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_UC_ATOMIC_REQ[n]``", "Req", "Number of uncached atomic requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_CC_ATOMIC_REQ[n]``", "Req", "Number of coherently cached atomic requests to L2 cache", "0-15"
|
||||
"``TCP_TCC_RW_ATOMIC_REQ[n]``", "Req", "Number of coherently cached with write atomic requests to L2 cache", "0-15"
|
||||
|
||||
Note that:
|
||||
|
||||
* ``TCP_TOTAL_READ[n]`` = ``TCP_PERF_SEL_TOTAL_HIT_LRU_READ`` + ``TCP_PERF_SEL_TOTAL_MISS_LRU_READ`` + ``TCP_PERF_SEL_TOTAL_MISS_EVICT_READ``
|
||||
* ``TCP_TOTAL_WRITE[n]`` = ``TCP_PERF_SEL_TOTAL_MISS_LRU_WRITE``+ ``TCP_PERF_SEL_TOTAL_MISS_EVICT_WRITE``
|
||||
* ``TCP_TOTAL_WRITEBACK_INVALIDATES[n]`` = ``TCP_PERF_SEL_TOTAL_WBINVL1``+ ``TCP_PERF_SEL_TOTAL_WBINVL1_VOL``+ ``TCP_PERF_SEL_CP_TCP_INVALIDATE``+ ``TCP_PERF_SEL_SQ_TCP_INVALIDATE_VOL``
|
||||
|
||||
Texture cache arbiter counters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition", "Value range for ``n``"
|
||||
|
||||
"``TCA_CYCLE[n]``", "Cycles", "Number of texture cache arbiter cycles", "0-31"
|
||||
"``TCA_BUSY[n]``", "Cycles", "Number of cycles texture cache arbiter has a pending request", "0-31"
|
||||
|
||||
.. _l2-cache-access-counters:
|
||||
|
||||
L2 cache access counters
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
L2 cache is also known as texture cache per channel.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: MI300 hardware counter
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition", "Value range for ``n``"
|
||||
|
||||
"``TCC_CYCLE[n]``", "Cycles", "Number of L2 cache free-running clocks", "0-31"
|
||||
"``TCC_BUSY[n]``", "Cycles", "Number of L2 cache busy cycles", "0-31"
|
||||
"``TCC_REQ[n]``", "Req", "Number of L2 cache requests of all types (measured at the tag block)", "0-31"
|
||||
"``TCC_STREAMING_REQ[n]``", "Req", "Number of L2 cache streaming requests (measured at the tag block)", "0-31"
|
||||
"``TCC_NC_REQ[n]``", "Req", "Number of non-coherently cached requests (measured at the tag block)", "0-31"
|
||||
"``TCC_UC_REQ[n]``", "Req", "Number of uncached requests. This is measured at the tag block", "0-31"
|
||||
"``TCC_CC_REQ[n]``", "Req", "Number of coherently cached requests. This is measured at the tag block", "0-31"
|
||||
"``TCC_RW_REQ[n]``", "Req", "Number of coherently cached with write requests. This is measured at the tag block", "0-31"
|
||||
"``TCC_PROBE[n]``", "Req", "Number of probe requests", "0-31"
|
||||
"``TCC_PROBE_ALL[n]``", "Req", "Number of external probe requests with ``EA_TCC_preq_all == 1``", "0-31"
|
||||
"``TCC_READ[n]``", "Req", "Number of L2 cache read requests (includes compressed reads but not metadata reads)", "0-31"
|
||||
"``TCC_WRITE[n]``", "Req", "Number of L2 cache write requests", "0-31"
|
||||
"``TCC_ATOMIC[n]``", "Req", "Number of L2 cache atomic requests of all types", "0-31"
|
||||
"``TCC_HIT[n]``", "Req", "Number of L2 cache hits", "0-31"
|
||||
"``TCC_MISS[n]``", "Req", "Number of L2 cache misses", "0-31"
|
||||
"``TCC_WRITEBACK[n]``", "Req", "Number of lines written back to the main memory, including writebacks of dirty lines and uncached write or atomic requests", "0-31"
|
||||
"``TCC_EA0_WRREQ[n]``", "Req", "Number of 32-byte and 64-byte transactions going over the ``TC_EA_wrreq`` interface (doesn't include probe commands)", "0-31"
|
||||
"``TCC_EA0_WRREQ_64B[n]``", "Req", "Total number of 64-byte transactions (write or ``CMPSWAP``) going over the ``TC_EA_wrreq`` interface", "0-31"
|
||||
"``TCC_EA0_WR_UNCACHED_32B[n]``", "Req", "Number of 32 or 64-byte write or atomic going over the ``TC_EA_wrreq`` interface due to uncached traffic", "0-31"
|
||||
"``TCC_EA0_WRREQ_STALL[n]``", "Cycles", "Number of cycles a write request is stalled", "0-31"
|
||||
"``TCC_EA0_WRREQ_IO_CREDIT_STALL[n]``", "Cycles", "Number of cycles an efficiency arbiter write request is stalled due to the interface running out of input-output (IO) credits", "0-31"
|
||||
"``TCC_EA0_WRREQ_GMI_CREDIT_STALL[n]``", "Cycles", "Number of cycles an efficiency arbiter write request is stalled due to the interface running out of GMI credits", "0-31"
|
||||
"``TCC_EA0_WRREQ_DRAM_CREDIT_STALL[n]``", "Cycles", "Number of cycles an efficiency arbiter write request is stalled due to the interface running out of DRAM credits", "0-31"
|
||||
"``TCC_TOO_MANY_EA_WRREQS_STALL[n]``", "Cycles", "Number of cycles the L2 cache is unable to send an efficiency arbiter write request due to it reaching its maximum capacity of pending efficiency arbiter write requests", "0-31"
|
||||
"``TCC_EA0_WRREQ_LEVEL[n]``", "Req", "The accumulated number of efficiency arbiter write requests in flight", "0-31"
|
||||
"``TCC_EA0_ATOMIC[n]``", "Req", "Number of 32-byte or 64-byte atomic requests going over the ``TC_EA_wrreq`` interface", "0-31"
|
||||
"``TCC_EA0_ATOMIC_LEVEL[n]``", "Req", "The accumulated number of efficiency arbiter atomic requests in flight", "0-31"
|
||||
"``TCC_EA0_RDREQ[n]``", "Req", "Number of 32-byte or 64-byte read requests to efficiency arbiter", "0-31"
|
||||
"``TCC_EA0_RDREQ_32B[n]``", "Req", "Number of 32-byte read requests to efficiency arbiter", "0-31"
|
||||
"``TCC_EA0_RD_UNCACHED_32B[n]``", "Req", "Number of 32-byte efficiency arbiter reads due to uncached traffic. A 64-byte request is counted as 2", "0-31"
|
||||
"``TCC_EA0_RDREQ_IO_CREDIT_STALL[n]``", "Cycles", "Number of cycles there is a stall due to the read request interface running out of IO credits", "0-31"
|
||||
"``TCC_EA0_RDREQ_GMI_CREDIT_STALL[n]``", "Cycles", "Number of cycles there is a stall due to the read request interface running out of GMI credits", "0-31"
|
||||
"``TCC_EA0_RDREQ_DRAM_CREDIT_STALL[n]``", "Cycles", "Number of cycles there is a stall due to the read request interface running out of DRAM credits", "0-31"
|
||||
"``TCC_EA0_RDREQ_LEVEL[n]``", "Req", "The accumulated number of efficiency arbiter read requests in flight", "0-31"
|
||||
"``TCC_EA0_RDREQ_DRAM[n]``", "Req", "Number of 32-byte or 64-byte efficiency arbiter read requests to High Bandwidth Memory (HBM)", "0-31"
|
||||
"``TCC_EA0_WRREQ_DRAM[n]``", "Req", "Number of 32-byte or 64-byte efficiency arbiter write requests to HBM", "0-31"
|
||||
"``TCC_TAG_STALL[n]``", "Cycles", "Number of cycles the normal request pipeline in the tag is stalled for any reason", "0-31"
|
||||
"``TCC_NORMAL_WRITEBACK[n]``", "Req", "Number of writebacks due to requests that are not writeback requests", "0-31"
|
||||
"``TCC_ALL_TC_OP_WB_WRITEBACK[n]``", "Req", "Number of writebacks due to all ``TC_OP`` writeback requests", "0-31"
|
||||
"``TCC_NORMAL_EVICT[n]``", "Req", "Number of evictions due to requests that are not invalidate or probe requests", "0-31"
|
||||
"``TCC_ALL_TC_OP_INV_EVICT[n]``", "Req", "Number of evictions due to all ``TC_OP`` invalidate requests", "0-31"
|
||||
|
||||
.. tab-item:: MI200 hardware counter
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Unit", "Definition", "Value range for ``n``"
|
||||
|
||||
"``TCC_CYCLE[n]``", "Cycles", "Number of L2 cache free-running clocks", "0-31"
|
||||
"``TCC_BUSY[n]``", "Cycles", "Number of L2 cache busy cycles", "0-31"
|
||||
"``TCC_REQ[n]``", "Req", "Number of L2 cache requests of all types (measured at the tag block)", "0-31"
|
||||
"``TCC_STREAMING_REQ[n]``", "Req", "Number of L2 cache streaming requests (measured at the tag block)", "0-31"
|
||||
"``TCC_NC_REQ[n]``", "Req", "Number of non-coherently cached requests (measured at the tag block)", "0-31"
|
||||
"``TCC_UC_REQ[n]``", "Req", "Number of uncached requests. This is measured at the tag block", "0-31"
|
||||
"``TCC_CC_REQ[n]``", "Req", "Number of coherently cached requests. This is measured at the tag block", "0-31"
|
||||
"``TCC_RW_REQ[n]``", "Req", "Number of coherently cached with write requests. This is measured at the tag block", "0-31"
|
||||
"``TCC_PROBE[n]``", "Req", "Number of probe requests", "0-31"
|
||||
"``TCC_PROBE_ALL[n]``", "Req", "Number of external probe requests with ``EA_TCC_preq_all == 1``", "0-31"
|
||||
"``TCC_READ[n]``", "Req", "Number of L2 cache read requests (includes compressed reads but not metadata reads)", "0-31"
|
||||
"``TCC_WRITE[n]``", "Req", "Number of L2 cache write requests", "0-31"
|
||||
"``TCC_ATOMIC[n]``", "Req", "Number of L2 cache atomic requests of all types", "0-31"
|
||||
"``TCC_HIT[n]``", "Req", "Number of L2 cache hits", "0-31"
|
||||
"``TCC_MISS[n]``", "Req", "Number of L2 cache misses", "0-31"
|
||||
"``TCC_WRITEBACK[n]``", "Req", "Number of lines written back to the main memory, including writebacks of dirty lines and uncached write or atomic requests", "0-31"
|
||||
"``TCC_EA_WRREQ[n]``", "Req", "Number of 32-byte and 64-byte transactions going over the ``TC_EA_wrreq`` interface (doesn't include probe commands)", "0-31"
|
||||
"``TCC_EA_WRREQ_64B[n]``", "Req", "Total number of 64-byte transactions (write or ``CMPSWAP``) going over the ``TC_EA_wrreq`` interface", "0-31"
|
||||
"``TCC_EA_WR_UNCACHED_32B[n]``", "Req", "Number of 32 write or atomic going over the ``TC_EA_wrreq`` interface due to uncached traffic. A 64-byte request will be counted as 2", "0-31"
|
||||
"``TCC_EA_WRREQ_STALL[n]``", "Cycles", "Number of cycles a write request is stalled", "0-31"
|
||||
"``TCC_EA_WRREQ_IO_CREDIT_STALL[n]``", "Cycles", "Number of cycles an efficiency arbiter write request is stalled due to the interface running out of input-output (IO) credits", "0-31"
|
||||
"``TCC_EA_WRREQ_GMI_CREDIT_STALL[n]``", "Cycles", "Number of cycles an efficiency arbiter write request is stalled due to the interface running out of GMI credits", "0-31"
|
||||
"``TCC_EA_WRREQ_DRAM_CREDIT_STALL[n]``", "Cycles", "Number of cycles an efficiency arbiter write request is stalled due to the interface running out of DRAM credits", "0-31"
|
||||
"``TCC_TOO_MANY_EA_WRREQS_STALL[n]``", "Cycles", "Number of cycles the L2 cache is unable to send an efficiency arbiter write request due to it reaching its maximum capacity of pending efficiency arbiter write requests", "0-31"
|
||||
"``TCC_EA_WRREQ_LEVEL[n]``", "Req", "The accumulated number of efficiency arbiter write requests in flight", "0-31"
|
||||
"``TCC_EA_ATOMIC[n]``", "Req", "Number of 32-byte or 64-byte atomic requests going over the ``TC_EA_wrreq`` interface", "0-31"
|
||||
"``TCC_EA_ATOMIC_LEVEL[n]``", "Req", "The accumulated number of efficiency arbiter atomic requests in flight", "0-31"
|
||||
"``TCC_EA_RDREQ[n]``", "Req", "Number of 32-byte or 64-byte read requests to efficiency arbiter", "0-31"
|
||||
"``TCC_EA_RDREQ_32B[n]``", "Req", "Number of 32-byte read requests to efficiency arbiter", "0-31"
|
||||
"``TCC_EA_RD_UNCACHED_32B[n]``", "Req", "Number of 32-byte efficiency arbiter reads due to uncached traffic. A 64-byte request is counted as 2", "0-31"
|
||||
"``TCC_EA_RDREQ_IO_CREDIT_STALL[n]``", "Cycles", "Number of cycles there is a stall due to the read request interface running out of IO credits", "0-31"
|
||||
"``TCC_EA_RDREQ_GMI_CREDIT_STALL[n]``", "Cycles", "Number of cycles there is a stall due to the read request interface running out of GMI credits", "0-31"
|
||||
"``TCC_EA_RDREQ_DRAM_CREDIT_STALL[n]``", "Cycles", "Number of cycles there is a stall due to the read request interface running out of DRAM credits", "0-31"
|
||||
"``TCC_EA_RDREQ_LEVEL[n]``", "Req", "The accumulated number of efficiency arbiter read requests in flight", "0-31"
|
||||
"``TCC_EA_RDREQ_DRAM[n]``", "Req", "Number of 32-byte or 64-byte efficiency arbiter read requests to High Bandwidth Memory (HBM)", "0-31"
|
||||
"``TCC_EA_WRREQ_DRAM[n]``", "Req", "Number of 32-byte or 64-byte efficiency arbiter write requests to HBM", "0-31"
|
||||
"``TCC_TAG_STALL[n]``", "Cycles", "Number of cycles the normal request pipeline in the tag is stalled for any reason", "0-31"
|
||||
"``TCC_NORMAL_WRITEBACK[n]``", "Req", "Number of writebacks due to requests that are not writeback requests", "0-31"
|
||||
"``TCC_ALL_TC_OP_WB_WRITEBACK[n]``", "Req", "Number of writebacks due to all ``TC_OP`` writeback requests", "0-31"
|
||||
"``TCC_NORMAL_EVICT[n]``", "Req", "Number of evictions due to requests that are not invalidate or probe requests", "0-31"
|
||||
"``TCC_ALL_TC_OP_INV_EVICT[n]``", "Req", "Number of evictions due to all ``TC_OP`` invalidate requests", "0-31"
|
||||
|
||||
Note the following:
|
||||
|
||||
* ``TCC_REQ[n]`` may be more than the number of requests arriving at the texture cache per channel,
|
||||
but it's a good indication of the total amount of work that needs to be performed.
|
||||
* For ``TCC_EA0_WRREQ[n]``, atomics may travel over the same interface and are generally classified as
|
||||
write requests.
|
||||
* CC mtypes can produce uncached requests, and those are included in
|
||||
``TCC_EA0_WR_UNCACHED_32B[n]``
|
||||
* ``TCC_EA0_WRREQ_LEVEL[n]`` is primarily intended to measure average efficiency arbiter write latency.
|
||||
|
||||
* Average write latency = ``TCC_PERF_SEL_EA0_WRREQ_LEVEL`` divided by ``TCC_PERF_SEL_EA0_WRREQ``
|
||||
|
||||
* ``TCC_EA0_ATOMIC_LEVEL[n]`` is primarily intended to measure average efficiency arbiter atomic
|
||||
latency
|
||||
|
||||
* Average atomic latency = ``TCC_PERF_SEL_EA0_WRREQ_ATOMIC_LEVEL`` divided by ``TCC_PERF_SEL_EA0_WRREQ_ATOMIC``
|
||||
|
||||
* ``TCC_EA0_RDREQ_LEVEL[n]`` is primarily intended to measure average efficiency arbiter read latency.
|
||||
|
||||
* Average read latency = ``TCC_PERF_SEL_EA0_RDREQ_LEVEL`` divided by ``TCC_PERF_SEL_EA0_RDREQ``
|
||||
|
||||
* Stalls can occur regardless of the need for a read to be performed
|
||||
* Normally, stalls are measured exactly at one point in the pipeline however in the case of
|
||||
``TCC_TAG_STALL[n]``, probes can stall the pipeline at a variety of places. There is no single point that
|
||||
can accurately measure the total stalls
|
||||
|
||||
MI300 and MI200 Series derived metrics list
|
||||
==============================================================
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``ALUStalledByLDS``", "Percentage of GPU time ALU units are stalled due to the LDS input queue being full or the output queue not being ready (value range: 0% (optimal) to 100%)"
|
||||
"``FetchSize``", "Total kilobytes fetched from the video memory; measured with all extra fetches and any cache or memory effects taken into account"
|
||||
"``FlatLDSInsts``", "Average number of flat instructions that read from or write to LDS, run per work item (affected by flow control)"
|
||||
"``FlatVMemInsts``", "Average number of flat instructions that read from or write to the video memory, run per work item (affected by flow control). Includes flat instructions that read from or write to scratch"
|
||||
"``GDSInsts``", "Average number of global data share read or write instructions run per work item (affected by flow control)"
|
||||
"``GPUBusy``", "Percentage of time GPU is busy"
|
||||
"``L2CacheHit``", "Percentage of fetch, write, atomic, and other instructions that hit the data in L2 cache (value range: 0% (no hit) to 100% (optimal))"
|
||||
"``LDSBankConflict``", "Percentage of GPU time LDS is stalled by bank conflicts (value range: 0% (optimal) to 100%)"
|
||||
"``LDSInsts``", "Average number of LDS read or write instructions run per work item (affected by flow control). Excludes flat instructions that read from or write to LDS."
|
||||
"``MemUnitBusy``", "Percentage of GPU time the memory unit is active, which is measured with all extra fetches and writes and any cache or memory effects taken into account (value range: 0% to 100% (fetch-bound))"
|
||||
"``MemUnitStalled``", "Percentage of GPU time the memory unit is stalled (value range: 0% (optimal) to 100%)"
|
||||
"``MemWrites32B``", "Total number of effective 32B write transactions to the memory"
|
||||
"``TCA_BUSY_sum``", "Total number of cycles texture cache arbiter has a pending request, over all texture cache arbiter instances"
|
||||
"``TCA_CYCLE_sum``", "Total number of cycles over all texture cache arbiter instances"
|
||||
"``SALUBusy``", "Percentage of GPU time scalar ALU instructions are processed (value range: 0% to 100% (optimal))"
|
||||
"``SALUInsts``", "Average number of scalar ALU instructions run per work item (affected by flow control)"
|
||||
"``SFetchInsts``", "Average number of scalar fetch instructions from the video memory run per work item (affected by flow control)"
|
||||
"``VALUBusy``", "Percentage of GPU time vector ALU instructions are processed (value range: 0% to 100% (optimal))"
|
||||
"``VALUInsts``", "Average number of vector ALU instructions run per work item (affected by flow control)"
|
||||
"``VALUUtilization``", "Percentage of active vector ALU threads in a wave, where a lower number can mean either more thread divergence in a wave or that the work-group size is not a multiple of 64 (value range: 0%, 100% (optimal - no thread divergence))"
|
||||
"``VFetchInsts``", "Average number of vector fetch instructions from the video memory run per work-item (affected by flow control); excludes flat instructions that fetch from video memory"
|
||||
"``VWriteInsts``", "Average number of vector write instructions to the video memory run per work-item (affected by flow control); excludes flat instructions that write to video memory"
|
||||
"``Wavefronts``", "Total wavefronts"
|
||||
"``WRITE_REQ_32B``", "Total number of 32-byte effective memory writes"
|
||||
"``WriteSize``", "Total kilobytes written to the video memory; measured with all extra fetches and any cache or memory effects taken into account"
|
||||
"``WriteUnitStalled``", "Percentage of GPU time the write unit is stalled (value range: 0% (optimal) to 100%)"
|
||||
|
||||
You can lower ``ALUStalledByLDS`` by reducing LDS bank conflicts or number of LDS accesses.
|
||||
You can lower ``MemUnitStalled`` by reducing the number or size of fetches and writes.
|
||||
``MemUnitBusy`` includes the stall time (``MemUnitStalled``).
|
||||
|
||||
Hardware counters by and over all texture addressing unit instances
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
The following table shows the hardware counters *by* all texture addressing unit instances.
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``TA_BUFFER_WAVEFRONTS_sum``", "Total number of buffer wavefronts processed"
|
||||
"``TA_BUFFER_READ_WAVEFRONTS_sum``", "Total number of buffer read wavefronts processed"
|
||||
"``TA_BUFFER_WRITE_WAVEFRONTS_sum``", "Total number of buffer write wavefronts processed"
|
||||
"``TA_BUFFER_ATOMIC_WAVEFRONTS_sum``", "Total number of buffer atomic wavefronts processed"
|
||||
"``TA_BUFFER_TOTAL_CYCLES_sum``", "Total number of buffer cycles (including read and write) issued to texture cache"
|
||||
"``TA_BUFFER_COALESCED_READ_CYCLES_sum``", "Total number of coalesced buffer read cycles issued to texture cache"
|
||||
"``TA_BUFFER_COALESCED_WRITE_CYCLES_sum``", "Total number of coalesced buffer write cycles issued to texture cache"
|
||||
"``TA_FLAT_READ_WAVEFRONTS_sum``", "Sum of flat opcode reads processed"
|
||||
"``TA_FLAT_WRITE_WAVEFRONTS_sum``", "Sum of flat opcode writes processed"
|
||||
"``TA_FLAT_WAVEFRONTS_sum``", "Total number of flat opcode wavefronts processed"
|
||||
"``TA_FLAT_ATOMIC_WAVEFRONTS_sum``", "Total number of flat opcode atomic wavefronts processed"
|
||||
"``TA_TOTAL_WAVEFRONTS_sum``", "Total number of wavefronts processed"
|
||||
|
||||
The following table shows the hardware counters *over* all texture addressing unit instances.
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``TA_ADDR_STALLED_BY_TC_CYCLES_sum``", "Total number of cycles texture addressing unit address path is stalled by texture cache"
|
||||
"``TA_ADDR_STALLED_BY_TD_CYCLES_sum``", "Total number of cycles texture addressing unit address path is stalled by texture data unit"
|
||||
"``TA_BUSY_avr``", "Average number of busy cycles"
|
||||
"``TA_BUSY_max``", "Maximum number of texture addressing unit busy cycles"
|
||||
"``TA_BUSY_min``", "Minimum number of texture addressing unit busy cycles"
|
||||
"``TA_DATA_STALLED_BY_TC_CYCLES_sum``", "Total number of cycles texture addressing unit data path is stalled by texture cache"
|
||||
"``TA_TA_BUSY_sum``", "Total number of texture addressing unit busy cycles"
|
||||
|
||||
Hardware counters over all texture cache per channel instances
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``TCC_ALL_TC_OP_WB_WRITEBACK_sum``", "Total number of writebacks due to all ``TC_OP`` writeback requests."
|
||||
"``TCC_ALL_TC_OP_INV_EVICT_sum``", "Total number of evictions due to all ``TC_OP`` invalidate requests."
|
||||
"``TCC_ATOMIC_sum``", "Total number of L2 cache atomic requests of all types."
|
||||
"``TCC_BUSY_avr``", "Average number of L2 cache busy cycles."
|
||||
"``TCC_BUSY_sum``", "Total number of L2 cache busy cycles."
|
||||
"``TCC_CC_REQ_sum``", "Total number of coherently cached requests."
|
||||
"``TCC_CYCLE_sum``", "Total number of L2 cache free running clocks."
|
||||
"``TCC_EA0_WRREQ_sum``", "Total number of 32-byte and 64-byte transactions going over the ``TC_EA0_wrreq`` interface. Atomics may travel over the same interface and are generally classified as write requests. This does not include probe commands."
|
||||
"``TCC_EA0_WRREQ_64B_sum``", "Total number of 64-byte transactions (write or `CMPSWAP`) going over the ``TC_EA0_wrreq`` interface."
|
||||
"``TCC_EA0_WR_UNCACHED_32B_sum``", "Total Number of 32-byte write or atomic going over the ``TC_EA0_wrreq`` interface due to uncached traffic. Note that coherently cached mtypes can produce uncached requests, and those are included in this. A 64-byte request is counted as 2."
|
||||
"``TCC_EA0_WRREQ_STALL_sum``", "Total Number of cycles a write request is stalled, over all instances."
|
||||
"``TCC_EA0_WRREQ_IO_CREDIT_STALL_sum``", "Total number of cycles an efficiency arbiter write request is stalled due to the interface running out of IO credits, over all instances."
|
||||
"``TCC_EA0_WRREQ_GMI_CREDIT_STALL_sum``", "Total number of cycles an efficiency arbiter write request is stalled due to the interface running out of GMI credits, over all instances."
|
||||
"``TCC_EA0_WRREQ_DRAM_CREDIT_STALL_sum``", "Total number of cycles an efficiency arbiter write request is stalled due to the interface running out of DRAM credits, over all instances."
|
||||
"``TCC_EA0_WRREQ_LEVEL_sum``", "Total number of efficiency arbiter write requests in flight."
|
||||
"``TCC_EA0_RDREQ_LEVEL_sum``", "Total number of efficiency arbiter read requests in flight."
|
||||
"``TCC_EA0_ATOMIC_sum``", "Total Number of 32-byte or 64-byte atomic requests going over the ``TC_EA0_wrreq`` interface."
|
||||
"``TCC_EA0_ATOMIC_LEVEL_sum``", "Total number of efficiency arbiter atomic requests in flight."
|
||||
"``TCC_EA0_RDREQ_sum``", "Total number of 32-byte or 64-byte read requests to efficiency arbiter."
|
||||
"``TCC_EA0_RDREQ_32B_sum``", "Total number of 32-byte read requests to efficiency arbiter."
|
||||
"``TCC_EA0_RD_UNCACHED_32B_sum``", "Total number of 32-byte efficiency arbiter reads due to uncached traffic."
|
||||
"``TCC_EA0_RDREQ_IO_CREDIT_STALL_sum``", "Total number of cycles there is a stall due to the read request interface running out of IO credits."
|
||||
"``TCC_EA0_RDREQ_GMI_CREDIT_STALL_sum``", "Total number of cycles there is a stall due to the read request interface running out of GMI credits."
|
||||
"``TCC_EA0_RDREQ_DRAM_CREDIT_STALL_sum``", "Total number of cycles there is a stall due to the read request interface running out of DRAM credits."
|
||||
"``TCC_EA0_RDREQ_DRAM_sum``", "Total number of 32-byte or 64-byte efficiency arbiter read requests to HBM."
|
||||
"``TCC_EA0_WRREQ_DRAM_sum``", "Total number of 32-byte or 64-byte efficiency arbiter write requests to HBM."
|
||||
"``TCC_HIT_sum``", "Total number of L2 cache hits."
|
||||
"``TCC_MISS_sum``", "Total number of L2 cache misses."
|
||||
"``TCC_NC_REQ_sum``", "Total number of non-coherently cached requests."
|
||||
"``TCC_NORMAL_WRITEBACK_sum``", "Total number of writebacks due to requests that are not writeback requests."
|
||||
"``TCC_NORMAL_EVICT_sum``", "Total number of evictions due to requests that are not invalidate or probe requests."
|
||||
"``TCC_PROBE_sum``", "Total number of probe requests."
|
||||
"``TCC_PROBE_ALL_sum``", "Total number of external probe requests with ``EA0_TCC_preq_all == 1``."
|
||||
"``TCC_READ_sum``", "Total number of L2 cache read requests (including compressed reads but not metadata reads)."
|
||||
"``TCC_REQ_sum``", "Total number of all types of L2 cache requests."
|
||||
"``TCC_RW_REQ_sum``", "Total number of coherently cached with write requests."
|
||||
"``TCC_STREAMING_REQ_sum``", "Total number of L2 cache streaming requests."
|
||||
"``TCC_TAG_STALL_sum``", "Total number of cycles the normal request pipeline in the tag is stalled for any reason."
|
||||
"``TCC_TOO_MANY_EA0_WRREQS_STALL_sum``", "Total number of cycles L2 cache is unable to send an efficiency arbiter write request due to it reaching its maximum capacity of pending efficiency arbiter write requests."
|
||||
"``TCC_UC_REQ_sum``", "Total number of uncached requests."
|
||||
"``TCC_WRITE_sum``", "Total number of L2 cache write requests."
|
||||
"``TCC_WRITEBACK_sum``", "Total number of lines written back to the main memory including writebacks of dirty lines and uncached write or atomic requests."
|
||||
"``TCC_WRREQ_STALL_max``", "Maximum number of cycles a write request is stalled."
|
||||
|
||||
Hardware counters by, for, or over all texture cache per pipe instances
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
The following table shows the hardware counters *by* all texture cache per pipe instances.
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``TCP_TA_TCP_STATE_READ_sum``", "Total number of state reads by ATCPPI"
|
||||
"``TCP_TOTAL_CACHE_ACCESSES_sum``", "Total number of vector L1d accesses (including hits and misses)"
|
||||
"``TCP_UTCL1_PERMISSION_MISS_sum``", "Total number of unified translation cache (L1) permission misses"
|
||||
"``TCP_UTCL1_REQUEST_sum``", "Total number of address translation requests to unified translation cache (L1)"
|
||||
"``TCP_UTCL1_TRANSLATION_MISS_sum``", "Total number of unified translation cache (L1) translation misses"
|
||||
"``TCP_UTCL1_TRANSLATION_HIT_sum``", "Total number of unified translation cache (L1) translation hits"
|
||||
|
||||
The following table shows the hardware counters *for* all texture cache per pipe instances.
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``TCP_TCC_READ_REQ_LATENCY_sum``", "Total vector L1d to L2 request latency over all wavefronts for reads and atomics with return"
|
||||
"``TCP_TCC_WRITE_REQ_LATENCY_sum``", "Total vector L1d to L2 request latency over all wavefronts for writes and atomics without return"
|
||||
"``TCP_TCP_LATENCY_sum``", "Total wave access latency to vector L1d over all wavefronts"
|
||||
|
||||
The following table shows the hardware counters *over* all texture cache per pipe instances.
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``TCP_ATOMIC_TAGCONFLICT_STALL_CYCLES_sum``", "Total number of cycles tag RAM conflict stalls on an atomic"
|
||||
"``TCP_GATE_EN1_sum``", "Total number of cycles vector L1d interface clocks are turned on"
|
||||
"``TCP_GATE_EN2_sum``", "Total number of cycles vector L1d core clocks are turned on"
|
||||
"``TCP_PENDING_STALL_CYCLES_sum``", "Total number of cycles vector L1d cache is stalled due to data pending from L2 Cache"
|
||||
"``TCP_READ_TAGCONFLICT_STALL_CYCLES_sum``", "Total number of cycles tag RAM conflict stalls on a read"
|
||||
"``TCP_TCC_ATOMIC_WITH_RET_REQ_sum``", "Total number of atomic requests to L2 cache with return"
|
||||
"``TCP_TCC_ATOMIC_WITHOUT_RET_REQ_sum``", "Total number of atomic requests to L2 cache without return"
|
||||
"``TCP_TCC_CC_READ_REQ_sum``", "Total number of coherently cached read requests to L2 cache"
|
||||
"``TCP_TCC_CC_WRITE_REQ_sum``", "Total number of coherently cached write requests to L2 cache"
|
||||
"``TCP_TCC_CC_ATOMIC_REQ_sum``", "Total number of coherently cached atomic requests to L2 cache"
|
||||
"``TCP_TCC_NC_READ_REQ_sum``", "Total number of non-coherently cached read requests to L2 cache"
|
||||
"``TCP_TCC_NC_WRITE_REQ_sum``", "Total number of non-coherently cached write requests to L2 cache"
|
||||
"``TCP_TCC_NC_ATOMIC_REQ_sum``", "Total number of non-coherently cached atomic requests to L2 cache"
|
||||
"``TCP_TCC_READ_REQ_sum``", "Total number of read requests to L2 cache"
|
||||
"``TCP_TCC_RW_READ_REQ_sum``", "Total number of coherently cached with write read requests to L2 cache"
|
||||
"``TCP_TCC_RW_WRITE_REQ_sum``", "Total number of coherently cached with write write requests to L2 cache"
|
||||
"``TCP_TCC_RW_ATOMIC_REQ_sum``", "Total number of coherently cached with write atomic requests to L2 cache"
|
||||
"``TCP_TCC_UC_READ_REQ_sum``", "Total number of uncached read requests to L2 cache"
|
||||
"``TCP_TCC_UC_WRITE_REQ_sum``", "Total number of uncached write requests to L2 cache"
|
||||
"``TCP_TCC_UC_ATOMIC_REQ_sum``", "Total number of uncached atomic requests to L2 cache"
|
||||
"``TCP_TCC_WRITE_REQ_sum``", "Total number of write requests to L2 cache"
|
||||
"``TCP_TCR_TCP_STALL_CYCLES_sum``", "Total number of cycles texture cache router stalls vector L1d"
|
||||
"``TCP_TD_TCP_STALL_CYCLES_sum``", "Total number of cycles texture data unit stalls vector L1d"
|
||||
"``TCP_TOTAL_ACCESSES_sum``", "Total number of vector L1d accesses"
|
||||
"``TCP_TOTAL_READ_sum``", "Total number of vector L1d read accesses"
|
||||
"``TCP_TOTAL_WRITE_sum``", "Total number of vector L1d write accesses"
|
||||
"``TCP_TOTAL_ATOMIC_WITH_RET_sum``", "Total number of vector L1d atomic requests with return"
|
||||
"``TCP_TOTAL_ATOMIC_WITHOUT_RET_sum``", "Total number of vector L1d atomic requests without return"
|
||||
"``TCP_TOTAL_WRITEBACK_INVALIDATES_sum``", "Total number of vector L1d writebacks and invalidates"
|
||||
"``TCP_VOLATILE_sum``", "Total number of L1 volatile pixels or buffers from texture addressing unit"
|
||||
"``TCP_WRITE_TAGCONFLICT_STALL_CYCLES_sum``", "Total number of cycles tag RAM conflict stalls on a write"
|
||||
|
||||
Hardware counter over all texture data unit instances
|
||||
--------------------------------------------------------
|
||||
|
||||
.. csv-table::
|
||||
:header: "Hardware counter", "Definition"
|
||||
|
||||
"``TD_ATOMIC_WAVEFRONT_sum``", "Total number of atomic wavefront instructions"
|
||||
"``TD_COALESCABLE_WAVEFRONT_sum``", "Total number of coalescable wavefronts according to texture addressing unit"
|
||||
"``TD_LOAD_WAVEFRONT_sum``", "Total number of wavefront instructions (read, write, atomic)"
|
||||
"``TD_SPI_STALL_sum``", "Total number of cycles texture data unit is stalled by shader processor input"
|
||||
"``TD_STORE_WAVEFRONT_sum``", "Total number of write wavefront instructions"
|
||||
"``TD_TC_STALL_sum``", "Total number of cycles texture data unit is stalled waiting for texture cache data"
|
||||
"``TD_TD_BUSY_sum``", "Total number of texture data unit busy cycles while it is processing or waiting for data"
|
||||
@@ -1,129 +0,0 @@
|
||||
---
|
||||
myst:
|
||||
html_meta:
|
||||
"description lang=en": "Learn about the AMD Instinct MI300 Series architecture."
|
||||
"keywords": "Instinct, MI300X, MI300A, microarchitecture, AMD, ROCm"
|
||||
---
|
||||
|
||||
# AMD Instinct™ MI300 Series microarchitecture
|
||||
|
||||
The AMD Instinct MI300 Series GPUs are based on the AMD CDNA 3
|
||||
architecture which was designed to deliver leadership performance for HPC, artificial intelligence (AI), and machine
|
||||
learning (ML) workloads. The AMD Instinct MI300 Series GPUs are well-suited for extreme scalability and compute performance, running
|
||||
on everything from individual servers to the world’s largest exascale supercomputers.
|
||||
|
||||
With the MI300 Series, AMD is introducing the Accelerator Complex Die (XCD), which contains the
|
||||
GPU computational elements of the processor along with the lower levels of the cache hierarchy.
|
||||
|
||||
The following image depicts the structure of a single XCD in the AMD Instinct MI300 GPU Series.
|
||||
|
||||
```{figure} ../../data/shared/xcd-sys-arch.png
|
||||
---
|
||||
name: mi300-xcd
|
||||
align: center
|
||||
---
|
||||
XCD-level system architecture showing 40 Compute Units, each with 32 KB L1 cache, a Unified Compute System with 4 ACE Compute Accelerators, shared 4MB of L2 cache and an HWS Hardware Scheduler.
|
||||
```
|
||||
|
||||
On the XCD, four Asynchronous Compute Engines (ACEs) send compute shader workgroups to the
|
||||
Compute Units (CUs). The XCD has 40 CUs: 38 active CUs at the aggregate level and 2 disabled CUs for
|
||||
yield management. The CUs all share a 4 MB L2 cache that serves to coalesce all memory traffic for the
|
||||
die. With less than half of the CUs of the AMD Instinct MI200 Series compute die, the AMD CDNA™ 3
|
||||
XCD die is a smaller building block. However, it uses more advanced packaging and the processor
|
||||
can include 6 or 8 XCDs for up to 304 CUs, roughly 40% more than MI250X.
|
||||
|
||||
The MI300 Series integrate up to 8 vertically stacked XCDs, 8 stacks of
|
||||
High-Bandwidth Memory 3 (HBM3) and 4 I/O dies (containing system
|
||||
infrastructure) using the AMD Infinity Fabric™ technology as interconnect.
|
||||
|
||||
The Matrix Cores inside the CDNA 3 CUs have significant improvements, emphasizing AI and machine
|
||||
learning, enhancing throughput of existing data types while adding support for new data types.
|
||||
CDNA 2 Matrix Cores support FP16 and BF16, while offering INT8 for inference. Compared to MI250X
|
||||
GPUs, CDNA 3 Matrix Cores triple the performance for FP16 and BF16, while providing a
|
||||
performance gain of 6.8 times for INT8. FP8 has a performance gain of 16 times compared to FP32,
|
||||
while TF32 has a gain of 4 times compared to FP32.
|
||||
|
||||
```{list-table} Peak-performance capabilities of the MI300X for different data types.
|
||||
:header-rows: 1
|
||||
:name: mi300x-perf-table
|
||||
|
||||
*
|
||||
- Computation and Data Type
|
||||
- FLOPS/CLOCK/CU
|
||||
- Peak TFLOPS
|
||||
*
|
||||
- Matrix FP64
|
||||
- 256
|
||||
- 163.4
|
||||
*
|
||||
- Vector FP64
|
||||
- 128
|
||||
- 81.7
|
||||
*
|
||||
- Matrix FP32
|
||||
- 256
|
||||
- 163.4
|
||||
*
|
||||
- Vector FP32
|
||||
- 256
|
||||
- 163.4
|
||||
*
|
||||
- Vector TF32
|
||||
- 1024
|
||||
- 653.7
|
||||
*
|
||||
- Matrix FP16
|
||||
- 2048
|
||||
- 1307.4
|
||||
*
|
||||
- Matrix BF16
|
||||
- 2048
|
||||
- 1307.4
|
||||
*
|
||||
- Matrix FP8
|
||||
- 4096
|
||||
- 2614.9
|
||||
*
|
||||
- Matrix INT8
|
||||
- 4096
|
||||
- 2614.9
|
||||
```
|
||||
|
||||
The above table summarizes the aggregated peak performance of the AMD Instinct MI300X Open
|
||||
Compute Platform (OCP) Open Accelerator Modules (OAMs) for different data types and command
|
||||
processors. The middle column lists the peak performance (number of data elements processed in a
|
||||
single instruction) of a single compute unit if a SIMD (or matrix) instruction is submitted in each clock
|
||||
cycle. The third column lists the theoretical peak performance of the OAM. The theoretical aggregated
|
||||
peak memory bandwidth of the GPU is 5.3 TB per second.
|
||||
|
||||
The following image shows the block diagram of the APU (left) and the OAM package (right) both
|
||||
connected via AMD Infinity Fabric™ network on-chip.
|
||||
|
||||
```{figure} ../../data/conceptual/gpu-arch/image008.png
|
||||
---
|
||||
name: mi300-arch
|
||||
alt:
|
||||
align: center
|
||||
---
|
||||
MI300 Series system architecture showing MI300A (left) with 6 XCDs and 3 CCDs, while the MI300X (right) has 8 XCDs.
|
||||
```
|
||||
|
||||
## Node-level architecture
|
||||
|
||||
```{figure} ../../data/shared/mi300-node-level-arch.png
|
||||
---
|
||||
name: mi300-node
|
||||
|
||||
align: center
|
||||
---
|
||||
MI300 Series node-level architecture showing 8 fully interconnected MI300X OAM modules connected to (optional) PCIEe switches via retimers and HGX connectors.
|
||||
```
|
||||
|
||||
The image above shows the node-level architecture of a system with AMD EPYC processors in a
|
||||
dual-socket configuration and eight AMD Instinct MI300X GPUs. The MI300X OAMs attach to the
|
||||
host system via PCIe Gen 5 x16 links (yellow lines). The GPUs are using seven high-bandwidth,
|
||||
low-latency AMD Infinity Fabric™ links (red lines) to form a fully connected 8-GPU system.
|
||||
|
||||
<!---
|
||||
We need performance data about the P2P communication here.
|
||||
-->
|
||||
@@ -1,530 +0,0 @@
|
||||
.. meta::
|
||||
:description: MI355 Series performance counters and metrics
|
||||
:keywords: MI355, MI355X, MI3XX
|
||||
|
||||
***********************************
|
||||
MI350 Series performance counters
|
||||
***********************************
|
||||
|
||||
This topic lists and describes the hardware performance counters and derived metrics available on the AMD Instinct MI350 and MI355 GPUs. These counters are available for profiling using `ROCprofiler-SDK <https://rocm.docs.amd.com/projects/rocprofiler-sdk/en/latest/index.html>`_ and `ROCm Compute Profiler <https://rocm.docs.amd.com/projects/rocprofiler-compute/en/latest/>`_.
|
||||
|
||||
The following sections list the performance counters based on the IP blocks.
|
||||
|
||||
Command processor packet processor counters (CPC)
|
||||
==================================================
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Hardware counter
|
||||
- Definition
|
||||
|
||||
* - CPC_ALWAYS_COUNT
|
||||
- Always count.
|
||||
|
||||
* - CPC_ADC_VALID_CHUNK_NOT_AVAIL
|
||||
- ADC valid chunk is not available when dispatch walking is in progress in the multi-xcc mode.
|
||||
|
||||
* - CPC_ADC_DISPATCH_ALLOC_DONE
|
||||
- ADC dispatch allocation is done.
|
||||
|
||||
* - CPC_ADC_VALID_CHUNK_END
|
||||
- ADC crawler's valid chunk end in the multi-xcc mode.
|
||||
|
||||
* - CPC_SYNC_FIFO_FULL_LEVEL
|
||||
- SYNC FIFO full last cycles.
|
||||
|
||||
* - CPC_SYNC_FIFO_FULL
|
||||
- SYNC FIFO full times.
|
||||
|
||||
* - CPC_GD_BUSY
|
||||
- ADC busy.
|
||||
|
||||
* - CPC_TG_SEND
|
||||
- ADC thread group send.
|
||||
|
||||
* - CPC_WALK_NEXT_CHUNK
|
||||
- ADC walking next valid chunk in the multi-xcc mode.
|
||||
|
||||
* - CPC_STALLED_BY_SE0_SPI
|
||||
- ADC CSDATA stalled by SE0SPI.
|
||||
|
||||
* - CPC_STALLED_BY_SE1_SPI
|
||||
- ADC CSDATA stalled by SE1SPI.
|
||||
|
||||
* - CPC_STALLED_BY_SE2_SPI
|
||||
- ADC CSDATA stalled by SE2SPI.
|
||||
|
||||
* - CPC_STALLED_BY_SE3_SPI
|
||||
- ADC CSDATA stalled by SE3SPI.
|
||||
|
||||
* - CPC_LTE_ALL
|
||||
- CPC sync counter LteAll. Only Master XCD manages LteAll.
|
||||
|
||||
* - CPC_SYNC_WRREQ_FIFO_BUSY
|
||||
- CPC sync counter request FIFO is not empty.
|
||||
|
||||
* - CPC_CANE_BUSY
|
||||
- CPC CANE bus is busy, which indicates the presence of inflight sync counter requests.
|
||||
|
||||
* - CPC_CANE_STALL
|
||||
- CPC sync counter sending is stalled by CANE.
|
||||
|
||||
Shader pipe interpolators (SPI) counters
|
||||
=========================================
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Hardware counter
|
||||
- Definition
|
||||
|
||||
* - SPI_CS0_WINDOW_VALID
|
||||
- Clock count enabled by PIPE0 perfcounter_start event.
|
||||
|
||||
* - SPI_CS0_BUSY
|
||||
- Number of clocks with outstanding waves for PIPE0 (SPI or SH).
|
||||
|
||||
* - SPI_CS0_NUM_THREADGROUPS
|
||||
- Number of thread groups launched for PIPE0.
|
||||
|
||||
* - SPI_CS0_CRAWLER_STALL
|
||||
- Number of clocks when PIPE0 event or wave order FIFO is full.
|
||||
|
||||
* - SPI_CS0_EVENT_WAVE
|
||||
- Number of PIPE0 events and waves.
|
||||
|
||||
* - SPI_CS0_WAVE
|
||||
- Number of PIPE0 waves.
|
||||
|
||||
* - SPI_CS1_WINDOW_VALID
|
||||
- Clock count enabled by PIPE1 perfcounter_start event.
|
||||
|
||||
* - SPI_CS1_BUSY
|
||||
- Number of clocks with outstanding waves for PIPE1 (SPI or SH).
|
||||
|
||||
* - SPI_CS1_NUM_THREADGROUPS
|
||||
- Number of thread groups launched for PIPE1.
|
||||
|
||||
* - SPI_CS1_CRAWLER_STALL
|
||||
- Number of clocks when PIPE1 event or wave order FIFO is full.
|
||||
|
||||
* - SPI_CS1_EVENT_WAVE
|
||||
- Number of PIPE1 events and waves.
|
||||
|
||||
* - SPI_CS1_WAVE
|
||||
- Number of PIPE1 waves.
|
||||
|
||||
* - SPI_CS2_WINDOW_VALID
|
||||
- Clock count enabled by PIPE2 perfcounter_start event.
|
||||
|
||||
* - SPI_CS2_BUSY
|
||||
- Number of clocks with outstanding waves for PIPE2 (SPI or SH).
|
||||
|
||||
* - SPI_CS2_NUM_THREADGROUPS
|
||||
- Number of thread groups launched for PIPE2.
|
||||
|
||||
* - SPI_CS2_CRAWLER_STALL
|
||||
- Number of clocks when PIPE2 event or wave order FIFO is full.
|
||||
|
||||
* - SPI_CS2_EVENT_WAVE
|
||||
- Number of PIPE2 events and waves.
|
||||
|
||||
* - SPI_CS2_WAVE
|
||||
- Number of PIPE2 waves.
|
||||
|
||||
* - SPI_CS3_WINDOW_VALID
|
||||
- Clock count enabled by PIPE3 perfcounter_start event.
|
||||
|
||||
* - SPI_CS3_BUSY
|
||||
- Number of clocks with outstanding waves for PIPE3 (SPI or SH).
|
||||
|
||||
* - SPI_CS3_NUM_THREADGROUPS
|
||||
- Number of thread groups launched for PIPE3.
|
||||
|
||||
* - SPI_CS3_CRAWLER_STALL
|
||||
- Number of clocks when PIPE3 event or wave order FIFO is full.
|
||||
|
||||
* - SPI_CS3_EVENT_WAVE
|
||||
- Number of PIPE3 events and waves.
|
||||
|
||||
* - SPI_CS3_WAVE
|
||||
- Number of PIPE3 waves.
|
||||
|
||||
* - SPI_CSQ_P0_Q0_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue0.
|
||||
|
||||
* - SPI_CSQ_P0_Q1_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue1.
|
||||
|
||||
* - SPI_CSQ_P0_Q2_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue2.
|
||||
|
||||
* - SPI_CSQ_P0_Q3_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue3.
|
||||
|
||||
* - SPI_CSQ_P0_Q4_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue4.
|
||||
|
||||
* - SPI_CSQ_P0_Q5_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue5.
|
||||
|
||||
* - SPI_CSQ_P0_Q6_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue6.
|
||||
|
||||
* - SPI_CSQ_P0_Q7_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE0 Queue7.
|
||||
|
||||
* - SPI_CSQ_P1_Q0_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue0.
|
||||
|
||||
* - SPI_CSQ_P1_Q1_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue1.
|
||||
|
||||
* - SPI_CSQ_P1_Q2_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue2.
|
||||
|
||||
* - SPI_CSQ_P1_Q3_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue3.
|
||||
|
||||
* - SPI_CSQ_P1_Q4_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue4.
|
||||
|
||||
* - SPI_CSQ_P1_Q5_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue5.
|
||||
|
||||
* - SPI_CSQ_P1_Q6_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue6.
|
||||
|
||||
* - SPI_CSQ_P1_Q7_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE1 Queue7.
|
||||
|
||||
* - SPI_CSQ_P2_Q0_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue0.
|
||||
|
||||
* - SPI_CSQ_P2_Q1_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue1.
|
||||
|
||||
* - SPI_CSQ_P2_Q2_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue2.
|
||||
|
||||
* - SPI_CSQ_P2_Q3_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue3.
|
||||
|
||||
* - SPI_CSQ_P2_Q4_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue4.
|
||||
|
||||
* - SPI_CSQ_P2_Q5_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue5.
|
||||
|
||||
* - SPI_CSQ_P2_Q6_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue6.
|
||||
|
||||
* - SPI_CSQ_P2_Q7_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE2 Queue7.
|
||||
|
||||
* - SPI_CSQ_P3_Q0_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue0.
|
||||
|
||||
* - SPI_CSQ_P3_Q1_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue1.
|
||||
|
||||
* - SPI_CSQ_P3_Q2_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue2.
|
||||
|
||||
* - SPI_CSQ_P3_Q3_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue3.
|
||||
|
||||
* - SPI_CSQ_P3_Q4_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue4.
|
||||
|
||||
* - SPI_CSQ_P3_Q5_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue5.
|
||||
|
||||
* - SPI_CSQ_P3_Q6_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue6.
|
||||
|
||||
* - SPI_CSQ_P3_Q7_OCCUPANCY
|
||||
- Sum of occupancy info for PIPE3 Queue7.
|
||||
|
||||
* - SPI_CSQ_P0_OCCUPANCY
|
||||
- Sum of occupancy info for all PIPE0 queues.
|
||||
|
||||
* - SPI_CSQ_P1_OCCUPANCY
|
||||
- Sum of occupancy info for all PIPE1 queues.
|
||||
|
||||
* - SPI_CSQ_P2_OCCUPANCY
|
||||
- Sum of occupancy info for all PIPE2 queues.
|
||||
|
||||
* - SPI_CSQ_P3_OCCUPANCY
|
||||
- Sum of occupancy info for all PIPE3 queues.
|
||||
|
||||
* - SPI_VWC0_VDATA_VALID_WR
|
||||
- Number of clocks VGPR bus_0 writes VGPRs.
|
||||
|
||||
* - SPI_VWC1_VDATA_VALID_WR
|
||||
- Number of clocks VGPR bus_1 writes VGPRs.
|
||||
|
||||
* - SPI_CSC_WAVE_CNT_BUSY
|
||||
- Number of cycles when there is any wave in the pipe.
|
||||
|
||||
Compute unit (SQ) counters
|
||||
===========================
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Hardware counter
|
||||
- Definition
|
||||
|
||||
* - SQ_INSTS_VALU_MFMA_F6F4
|
||||
- Number of VALU V_MFMA_*_F6F4 instructions.
|
||||
|
||||
* - SQ_INSTS_VALU_MFMA_MOPS_F6F4
|
||||
- Number of VALU matrix with the performed math operations (add or mul) divided by 512, assuming a full EXEC mask of F6 or F4 data type.
|
||||
|
||||
* - SQ_ACTIVE_INST_VALU2
|
||||
- Number of quad-cycles when two VALU instructions are issued (per-simd, nondeterministic).
|
||||
|
||||
* - SQ_INSTS_LDS_LOAD
|
||||
- Number of LDS load instructions issued (per-simd, emulated).
|
||||
|
||||
* - SQ_INSTS_LDS_STORE
|
||||
- Number of LDS store instructions issued (per-simd, emulated).
|
||||
|
||||
* - SQ_INSTS_LDS_ATOMIC
|
||||
- Number of LDS atomic instructions issued (per-simd, emulated).
|
||||
|
||||
* - SQ_INSTS_LDS_LOAD_BANDWIDTH
|
||||
- Total number of 64-bytes loaded (instrSize * CountOnes(EXEC))/64 (per-simd, emulated).
|
||||
|
||||
* - SQ_INSTS_LDS_STORE_BANDWIDTH
|
||||
- Total number of 64-bytes written (instrSize * CountOnes(EXEC))/64 (per-simd, emulated).
|
||||
|
||||
* - SQ_INSTS_LDS_ATOMIC_BANDWIDTH
|
||||
- Total number of 64-bytes atomic (instrSize * CountOnes(EXEC))/64 (per-simd, emulated).
|
||||
|
||||
* - SQ_INSTS_VALU_FLOPS_FP16
|
||||
- Counts FLOPS per instruction on float 16 excluding MFMA/SMFMA.
|
||||
|
||||
* - SQ_INSTS_VALU_FLOPS_FP32
|
||||
- Counts FLOPS per instruction on float 32 excluding MFMA/SMFMA.
|
||||
|
||||
* - SQ_INSTS_VALU_FLOPS_FP64
|
||||
- Counts FLOPS per instruction on float 64 excluding MFMA/SMFMA.
|
||||
|
||||
* - SQ_INSTS_VALU_FLOPS_FP16_TRANS
|
||||
- Counts FLOPS per instruction on float 16 trans excluding MFMA/SMFMA.
|
||||
|
||||
* - SQ_INSTS_VALU_FLOPS_FP32_TRANS
|
||||
- Counts FLOPS per instruction on float 32 trans excluding MFMA/SMFMA.
|
||||
|
||||
* - SQ_INSTS_VALU_FLOPS_FP64_TRANS
|
||||
- Counts FLOPS per instruction on float 64 trans excluding MFMA/SMFMA.
|
||||
|
||||
* - SQ_INSTS_VALU_IOPS
|
||||
- Counts OPS per instruction on integer or unsigned or bit data (per-simd, emulated).
|
||||
|
||||
* - SQ_LDS_DATA_FIFO_FULL
|
||||
- Number of cycles LDS data FIFO is full (nondeterministic, unwindowed).
|
||||
|
||||
* - SQ_LDS_CMD_FIFO_FULL
|
||||
- Number of cycles LDS command FIFO is full (nondeterministic, unwindowed).
|
||||
|
||||
* - SQ_VMEM_TA_ADDR_FIFO_FULL
|
||||
- Number of cycles texture requests are stalled due to full address FIFO in TA (nondeterministic, unwindowed).
|
||||
|
||||
* - SQ_VMEM_TA_CMD_FIFO_FULL
|
||||
- Number of cycles texture requests are stalled due to full cmd FIFO in TA (nondeterministic, unwindowed).
|
||||
|
||||
* - SQ_VMEM_WR_TA_DATA_FIFO_FULL
|
||||
- Number of cycles texture writes are stalled due to full data FIFO in TA (nondeterministic, unwindowed).
|
||||
|
||||
* - SQC_ICACHE_MISSES_DUPLICATE
|
||||
- Number of duplicate misses (access to a non-resident, miss pending CL) (per-SQ, per-Bank, nondeterministic).
|
||||
|
||||
* - SQC_DCACHE_MISSES_DUPLICATE
|
||||
- Number of duplicate misses (access to a non-resident, miss pending CL) (per-SQ, per-Bank, nondeterministic).
|
||||
|
||||
Texture addressing (TA) unit counters
|
||||
======================================
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Hardware counter
|
||||
- Definition
|
||||
|
||||
* - TA_BUFFER_READ_LDS_WAVEFRONTS
|
||||
- Number of buffer read wavefronts for LDS return processed by the TA.
|
||||
|
||||
* - TA_FLAT_READ_LDS_WAVEFRONTS
|
||||
- Number of flat opcode reads for LDS return processed by the TA.
|
||||
|
||||
Texture data (TD) unit counters
|
||||
================================
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Hardware counter
|
||||
- Definition
|
||||
|
||||
* - TD_WRITE_ACKT_WAVEFRONT
|
||||
- Number of write acknowledgments, sent to SQ and not to SP.
|
||||
|
||||
* - TD_TD_SP_TRAFFIC
|
||||
- Number of times this TD sends data to the SP.
|
||||
|
||||
Texture cache per pipe (TCP) counters
|
||||
======================================
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Hardware counter
|
||||
- Definition
|
||||
|
||||
* - TCP_TCP_TA_ADDR_STALL_CYCLES
|
||||
- TCP stalls TA addr interface.
|
||||
|
||||
* - TCP_TCP_TA_DATA_STALL_CYCLES
|
||||
- TCP stalls TA data interface. Now windowed.
|
||||
|
||||
* - TCP_LFIFO_STALL_CYCLES
|
||||
- Memory latency FIFOs full stall.
|
||||
|
||||
* - TCP_RFIFO_STALL_CYCLES
|
||||
- Memory Request FIFOs full stall.
|
||||
|
||||
* - TCP_TCR_RDRET_STALL
|
||||
- Write into cache stalled by read return from TCR.
|
||||
|
||||
* - TCP_PENDING_STALL_CYCLES
|
||||
- Stall due to data pending from L2.
|
||||
|
||||
* - TCP_UTCL1_SERIALIZATION_STALL
|
||||
- Total number of stalls caused due to serializing translation requests through the UTCL1.
|
||||
|
||||
* - TCP_UTCL1_THRASHING_STALL
|
||||
- Stall caused by thrashing feature in any probe. Lacks accuracy when the stall signal overlaps between probe0 and probe1, which is worse with MECO of thrashing deadlock. Some probe0 events could miss being counted in with MECO on. This perf count provides a rough thrashing estimate.
|
||||
|
||||
* - TCP_UTCL1_TRANSLATION_MISS_UNDER_MISS
|
||||
- Translation miss_under_miss.
|
||||
|
||||
* - TCP_UTCL1_STALL_INFLIGHT_MAX
|
||||
- Total UTCL1 stalls due to inflight counter saturation.
|
||||
|
||||
* - TCP_UTCL1_STALL_LRU_INFLIGHT
|
||||
- Total UTCL1 stalls due to LRU cache line with inflight traffic.
|
||||
|
||||
* - TCP_UTCL1_STALL_MULTI_MISS
|
||||
- Total UTCL1 stalls due to arbitrated multiple misses.
|
||||
|
||||
* - TCP_UTCL1_LFIFO_FULL
|
||||
- Total UTCL1 and UTCL2 latency, which hides FIFO full cycles.
|
||||
|
||||
* - TCP_UTCL1_STALL_LFIFO_NOT_RES
|
||||
- Total UTCL1 stalls due to UTCL2 latency, which hides FIFO output (not resident).
|
||||
|
||||
* - TCP_UTCL1_STALL_UTCL2_REQ_OUT_OF_CREDITS
|
||||
- Total UTCL1 stalls due to UTCL2_req being out of credits.
|
||||
|
||||
* - TCP_CLIENT_UTCL1_INFLIGHT
|
||||
- The sum of inflight client to UTCL1 requests per cycle.
|
||||
|
||||
* - TCP_TAGRAM0_REQ
|
||||
- Total L2 requests mapping to TagRAM 0 from this TCP to all TCCs.
|
||||
|
||||
* - TCP_TAGRAM1_REQ
|
||||
- Total L2 requests mapping to TagRAM 1 from this TCP to all TCCs.
|
||||
|
||||
* - TCP_TAGRAM2_REQ
|
||||
- Total L2 requests mapping to TagRAM 2 from this TCP to all TCCs.
|
||||
|
||||
* - TCP_TAGRAM3_REQ
|
||||
- Total L2 requests mapping to TagRAM 3 from this TCP to all TCCs.
|
||||
|
||||
* - TCP_TCP_LATENCY
|
||||
- Total TCP wave latency (from the first clock of wave entering to the first clock of wave leaving). Divide by TA_TCP_STATE_READ to find average wave latency.
|
||||
|
||||
* - TCP_TCC_READ_REQ_LATENCY
|
||||
- Total TCP to TCC request latency for reads and atomics with return. Not Windowed.
|
||||
|
||||
* - TCP_TCC_WRITE_REQ_LATENCY
|
||||
- Total TCP to TCC request latency for writes and atomics without return. Not Windowed.
|
||||
|
||||
* - TCP_TCC_WRITE_REQ_HOLE_LATENCY
|
||||
- Total TCP req to TCC hole latency for writes and atomics. Not Windowed.
|
||||
|
||||
Texture cache per channel (TCC) counters
|
||||
=========================================
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Hardware counter
|
||||
- Definition
|
||||
|
||||
* - TCC_READ_SECTORS
|
||||
- Total number of 32B data sectors in read requests.
|
||||
|
||||
* - TCC_WRITE_SECTORS
|
||||
- Total number of 32B data sectors in write requests.
|
||||
|
||||
* - TCC_ATOMIC_SECTORS
|
||||
- Total number of 32B data sectors in atomic requests.
|
||||
|
||||
* - TCC_BYPASS_REQ
|
||||
- Number of bypass requests. This is measured at the tag block.
|
||||
|
||||
* - TCC_LATENCY_FIFO_FULL
|
||||
- Number of cycles when the latency FIFO is full.
|
||||
|
||||
* - TCC_SRC_FIFO_FULL
|
||||
- Number of cycles when the SRC FIFO is assumed to be full as measured at the IB block.
|
||||
|
||||
* - TCC_EA0_RDREQ_64B
|
||||
- Number of 64-byte TCC/EA read requests.
|
||||
|
||||
* - TCC_EA0_RDREQ_128B
|
||||
- Number of 128-byte TCC/EA read requests.
|
||||
|
||||
* - TCC_IB_REQ
|
||||
- Number of requests through the IB. This measures the number of raw requests from graphics clients to this TCC.
|
||||
|
||||
* - TCC_IB_STALL
|
||||
- Number of cycles when the IB output is stalled.
|
||||
|
||||
* - TCC_EA0_WRREQ_WRITE_DRAM
|
||||
- Number of TCC/EA write requests (32-byte or 64-byte) destined for DRAM (MC).
|
||||
|
||||
* - TCC_EA0_WRREQ_ATOMIC_DRAM
|
||||
- Number of TCC/EA atomic requests (32-byte or 64-byte) destined for DRAM (MC).
|
||||
|
||||
* - TCC_EA0_RDREQ_DRAM_32B
|
||||
- Number of 32-byte TCC/EA read requests due to DRAM traffic. One 64-byte request is counted as two and one 128-byte as four.
|
||||
|
||||
* - TCC_EA0_RDREQ_GMI_32B
|
||||
- Number of 32-byte TCC/EA read requests due to GMI traffic. One 64-byte request is counted as two and one 128-byte as four.
|
||||
|
||||
* - TCC_EA0_RDREQ_IO_32B
|
||||
- Number of 32-byte TCC/EA read requests due to IO traffic. One 64-byte request is counted as two and one 128-byte as four.
|
||||
|
||||
* - TCC_EA0_WRREQ_WRITE_DRAM_32B
|
||||
- Number of 32-byte TCC/EA write requests due to DRAM traffic. One 64-byte request is counted as two.
|
||||
|
||||
* - TCC_EA0_WRREQ_ATOMIC_DRAM_32B
|
||||
- Number of 32-byte TCC/EA atomic requests due to DRAM traffic. One 64-byte request is counted as two.
|
||||
|
||||
* - TCC_EA0_WRREQ_WRITE_GMI_32B
|
||||
- Number of 32-byte TCC/EA write requests due to GMI traffic. One 64-byte request is counted as two.
|
||||
|
||||
* - TCC_EA0_WRREQ_ATOMIC_GMI_32B
|
||||
- Number of 32-byte TCC/EA atomic requests due to GMI traffic. One 64-byte request is counted as two.
|
||||
|
||||
* - TCC_EA0_WRREQ_WRITE_IO_32B
|
||||
- Number of 32-byte TCC/EA write requests due to IO traffic. One 64-byte request is counted as two.
|
||||
|
||||
* - TCC_EA0_WRREQ_ATOMIC_IO_32B
|
||||
- Number of 32-byte TCC/EA atomic requests due to IO traffic. One 64-byte request is counted as two.
|
||||
@@ -1,10 +1,3 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="GPU isolation techniques">
|
||||
<meta name="keywords" content="GPU isolation techniques, UUID, universally unique identifier,
|
||||
environment variables, virtual machines, AMD, ROCm">
|
||||
</head>
|
||||
|
||||
# GPU isolation techniques
|
||||
|
||||
Restricting the access of applications to a subset of GPUs, aka isolating
|
||||
@@ -29,12 +22,12 @@ A list of device indices or {abbr}`UUID (universally unique identifier)`s
|
||||
that will be exposed to applications.
|
||||
|
||||
Runtime
|
||||
: ROCm Software Runtime. Applies to all applications using the user mode ROCm
|
||||
: ROCm Platform Runtime. Applies to all applications using the user mode ROCm
|
||||
software stack.
|
||||
|
||||
```{code-block} shell
|
||||
:caption: Example to expose the 1. device and a device based on UUID.
|
||||
export ROCR_VISIBLE_DEVICES="0,GPU-4b2c1a9f-8d3e-6f7a-b5c9-2e4d8a1f6c3b"
|
||||
export ROCR_VISIBLE_DEVICES="0,GPU-DEADBEEFDEADBEEF"
|
||||
```
|
||||
|
||||
### `GPU_DEVICE_ORDINAL`
|
||||
@@ -42,7 +35,7 @@ export ROCR_VISIBLE_DEVICES="0,GPU-4b2c1a9f-8d3e-6f7a-b5c9-2e4d8a1f6c3b"
|
||||
Devices indices exposed to OpenCL and HIP applications.
|
||||
|
||||
Runtime
|
||||
: ROCm Compute Language Runtime (`ROCclr`). Applies to applications and runtimes
|
||||
: ROCm Common Language Runtime (`ROCclr`). Applies to applications and runtimes
|
||||
using the `ROCclr` abstraction layer including HIP and OpenCL applications.
|
||||
|
||||
```{code-block} shell
|
||||
|
||||
234
docs/conceptual/gpu-memory.md
Normal file
@@ -0,0 +1,234 @@
|
||||
# GPU memory
|
||||
|
||||
For the HIP reference documentation, see:
|
||||
|
||||
* {doc}`hip:.doxygen/docBin/html/group___memory`
|
||||
* {doc}`hip:.doxygen/docBin/html/group___memory_m`
|
||||
|
||||
Host memory exists on the host (e.g. CPU) of the machine in random access memory (RAM).
|
||||
|
||||
Device memory exists on the device (e.g. GPU) of the machine in video random access memory (VRAM).
|
||||
Recent architectures use graphics double data rate (GDDR) synchronous dynamic random-access memory (SDRAM)such as GDDR6, or high-bandwidth memory (HBM) such as HBM2e.
|
||||
|
||||
## Memory allocation
|
||||
|
||||
Memory can be allocated in two ways: pageable memory, and pinned memory.
|
||||
The following API calls with result in these allocations:
|
||||
|
||||
| API | Data location | Allocation |
|
||||
|--------------------|---------------|------------|
|
||||
| System allocated | Host | Pageable |
|
||||
| `hipMallocManaged` | Host | Managed |
|
||||
| `hipHostMalloc` | Host | Pinned |
|
||||
| `hipMalloc` | Device | Pinned |
|
||||
|
||||
:::{tip}
|
||||
`hipMalloc` and `hipFree` are blocking calls, however, HIP recently added non-blocking versions `hipMallocAsync` and `hipFreeAsync` which take in a stream as an additional argument.
|
||||
:::
|
||||
|
||||
### Pageable memory
|
||||
|
||||
Pageable memory is usually gotten when calling `malloc` or `new` in a C++ application.
|
||||
It is unique in that it exists on "pages" (blocks of memory), which can be migrated to other memory storage.
|
||||
For example, migrating memory between CPU sockets on a motherboard, or a system that runs out of space in RAM and starts dumping pages of RAM into the swap partition of your hard drive.
|
||||
|
||||
### Pinned memory
|
||||
|
||||
Pinned memory (or page-locked memory, or non-pageable memory) is host memory that is mapped into the address space of all GPUs, meaning that the pointer can be used on both host and device.
|
||||
Accessing host-resident pinned memory in device kernels is generally not recommended for performance, as it can force the data to traverse the host-device interconnect (e.g. PCIe), which is much slower than the on-device bandwidth (>40x on MI200).
|
||||
|
||||
Pinned host memory can be allocated with one of two types of coherence support:
|
||||
|
||||
:::{note}
|
||||
In HIP, pinned memory allocations are coherent by default (`hipHostMallocDefault`).
|
||||
There are additional pinned memory flags (e.g. `hipHostMallocMapped` and `hipHostMallocPortable`).
|
||||
On MI200 these options do not impact performance.
|
||||
<!-- TODO: link to programming_manual#memory-allocation-flags -->
|
||||
For more information, see the section *memory allocation flags* in the HIP Programming Guide: {doc}`hip:user_guide/programming_manual`.
|
||||
:::
|
||||
|
||||
Much like how a process can be locked to a CPU core by setting affinity, a pinned memory allocator does this with the memory storage system.
|
||||
On multi-socket systems it is important to ensure that pinned memory is located on the same socket as the owning process, or else each cache line will be moved through the CPU-CPU interconnect, thereby increasing latency and potentially decreasing bandwidth.
|
||||
|
||||
In practice, pinned memory is used to improve transfer times between host and device.
|
||||
For transfer operations, such as `hipMemcpy` or `hipMemcpyAsync`, using pinned memory instead of pageable memory on host can lead to a ~3x improvement in bandwidth.
|
||||
|
||||
:::{tip}
|
||||
If the application needs to move data back and forth between device and host (separate allocations), use pinned memory on the host side.
|
||||
:::
|
||||
|
||||
### Managed memory
|
||||
|
||||
Managed memory refers to universally addressable, or unified memory available on the MI200 series of GPUs.
|
||||
Much like pinned memory, managed memory shares a pointer between host and device and (by default) supports fine-grained coherence, however, managed memory can also automatically migrate pages between host and device.
|
||||
The allocation will be managed by AMD GPU driver using the Linux HMM (Heterogeneous Memory Management) mechanism.
|
||||
|
||||
If heterogenous memory management (HMM) is not available, then `hipMallocManaged` will default back to using system memory and will act like pinned host memory.
|
||||
Other managed memory API calls will have undefined behavior.
|
||||
It is therefore recommended to check for managed memory capability with: `hipDeviceGetAttribute` and `hipDeviceAttributeManagedMemory`.
|
||||
|
||||
HIP supports additional calls that work with page migration:
|
||||
|
||||
* `hipMemAdvise`
|
||||
* `hipMemPrefetchAsync`
|
||||
|
||||
:::{tip}
|
||||
If the application needs to use data on both host and device regularly, does not want to deal with separate allocations, and is not worried about maxing out the VRAM on MI200 GPUs (64 GB per GCD), use managed memory.
|
||||
:::
|
||||
|
||||
:::{tip}
|
||||
If managed memory performance is poor, check to see if managed memory is supported on your system and if page migration (XNACK) is enabled.
|
||||
:::
|
||||
|
||||
## Access behavior
|
||||
|
||||
Memory allocations for GPUs behave as follow:
|
||||
|
||||
| API | Data location | Host access | Device access |
|
||||
|--------------------|---------------|--------------|----------------------|
|
||||
| System allocated | Host | Local access | Unhandled page fault |
|
||||
| `hipMallocManaged` | Host | Local access | Zero-copy |
|
||||
| `hipHostMalloc` | Host | Local access | Zero-copy* |
|
||||
| `hipMalloc` | Device | Zero-copy | Local access |
|
||||
|
||||
Zero-copy accesses happen over the Infinity Fabric interconnect or PCI-E lanes on discrete GPUs.
|
||||
|
||||
:::{note}
|
||||
While `hipHostMalloc` allocated memory is accessible by a device, the host pointer must be converted to a device pointer with `hipHostGetDevicePointer`.
|
||||
|
||||
Memory allocated through standard system allocators such as `malloc`, can be accessed a device by registering the memory via `hipHostRegister`.
|
||||
The device pointer to be used in kernels can be retrieved with `hipHostGetDevicePointer`.
|
||||
Registered memory is treated like `hipHostMalloc` and will have similar performance.
|
||||
|
||||
On devices that support and have [](#xnack) enabled, such as the MI250X, `hipHostRegister` is not required as memory accesses are handled via automatic page migration.
|
||||
:::
|
||||
|
||||
### XNACK
|
||||
|
||||
Normally, host and device memory are separate and data has to be transferred manually via `hipMemcpy`.
|
||||
|
||||
On a subset of GPUs, such as the MI200, there is an option to automatically migrate pages of memory between host and device.
|
||||
This is important for managed memory, where the locality of the data is important for performance.
|
||||
Depending on the system, page migration may be disabled by default in which case managed memory will act like pinned host memory and suffer degraded performance.
|
||||
|
||||
*XNACK* describes the GPUs ability to retry memory accesses that failed due a page fault (which normally would lead to a memory access error), and instead retrieve the missing page.
|
||||
|
||||
This also affects memory allocated by the system as indicated by the following table:
|
||||
|
||||
| API | Data location | Host after device access | Device after host access |
|
||||
|--------------------|---------------|--------------------------|--------------------------|
|
||||
| System allocated | Host | Migrate page to host | Migrate page to device |
|
||||
| `hipMallocManaged` | Host | Migrate page to host | Migrate page to device |
|
||||
| `hipHostMalloc` | Host | Local access | Zero-copy |
|
||||
| `hipMalloc` | Device | Zero-copy | Local access |
|
||||
|
||||
To check if page migration is available on a platform, use `rocminfo`:
|
||||
|
||||
```sh
|
||||
$ rocminfo | grep xnack
|
||||
Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack-
|
||||
```
|
||||
|
||||
Here, `xnack-` means that XNACK is available but is disabled by default.
|
||||
Turning on XNACK by setting the environment variable `HSA_XNACK=1` and gives the expected result, `xnack+`:
|
||||
|
||||
```sh
|
||||
$ HSA_XNACK=1 rocminfo | grep xnack
|
||||
Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack+
|
||||
```
|
||||
|
||||
`hipcc`by default will generate code that runs correctly with both XNACK enabled or disabled.
|
||||
Setting the `--offload-arch=`-option with `xnack+` or `xnack-` forces code to be only run with XNACK enabled or disabled respectively.
|
||||
|
||||
```sh
|
||||
# Compiled kernels will run regardless if XNACK is enabled or is disabled.
|
||||
hipcc --offload-arch=gfx90a
|
||||
|
||||
# Compiled kernels will only be run if XNACK is enabled with XNACK=1.
|
||||
hipcc --offload-arch=gfx90a:xnack+
|
||||
|
||||
# Compiled kernels will only be run if XNACK is disabled with XNACK=0.
|
||||
hipcc --offload-arch=gfx90a:xnack-
|
||||
```
|
||||
|
||||
:::{tip}
|
||||
If you want to make use of page migration, use managed memory. While pageable memory will migrate correctly, it is not a portable solution and can have performance issues if the accessed data isn't page aligned.
|
||||
:::
|
||||
|
||||
### Coherence
|
||||
|
||||
* *Coarse-grained coherence* means that memory is only considered up to date at kernel boundaries, which can be enforced through `hipDeviceSynchronize`, `hipStreamSynchronize`, or any blocking operation that acts on the null stream (e.g. `hipMemcpy`).
|
||||
For example, cacheable memory is a type of coarse-grained memory where an up-to-date copy of the data can be stored elsewhere (e.g. in an L2 cache).
|
||||
* *Fine-grained coherence* means the coherence is supported while a CPU/GPU kernel is running.
|
||||
This can be useful if both host and device are operating on the same dataspace using system-scope atomic operations (e.g. updating an error code or flag to a buffer).
|
||||
Fine-grained memory implies that up-to-date data may be made visible to others regardless of kernel boundaries as discussed above.
|
||||
|
||||
| API | Flag | Coherence |
|
||||
|-------------------------|------------------------------|----------------|
|
||||
| `hipHostMalloc` | `hipHostMallocDefault` | Fine-grained |
|
||||
| `hipHostMalloc` | `hipHostMallocNonCoherent` | Coarse-grained |
|
||||
|
||||
| API | Flag | Coherence |
|
||||
|-------------------------|------------------------------|----------------|
|
||||
| `hipExtMallocWithFlags` | `hipHostMallocDefault` | Fine-grained |
|
||||
| `hipExtMallocWithFlags` | `hipDeviceMallocFinegrained` | Coarse-grained |
|
||||
|
||||
| API | `hipMemAdvise` argument | Coherence |
|
||||
|-------------------------|------------------------------|----------------|
|
||||
| `hipMallocManaged` | | Fine-grained |
|
||||
| `hipMallocManaged` | `hipMemAdviseSetCoarseGrain` | Coarse-grained |
|
||||
| `malloc` | | Fine-grained |
|
||||
| `malloc` | `hipMemAdviseSetCoarseGrain` | Coarse-grained |
|
||||
|
||||
:::{tip}
|
||||
Try to design your algorithms to avoid host-device memory coherence (e.g. system scope atomics). While it can be a useful feature in very specific cases, it is not supported on all systems, and can negatively impact performance by introducing the host-device interconnect bottleneck.
|
||||
:::
|
||||
|
||||
The availability of fine- and coarse-grained memory pools can be checked with `rocminfo`:
|
||||
|
||||
```sh
|
||||
$ rocminfo
|
||||
...
|
||||
*******
|
||||
Agent 1
|
||||
*******
|
||||
Name: AMD EPYC 7742 64-Core Processor
|
||||
...
|
||||
Pool Info:
|
||||
Pool 1
|
||||
Segment: GLOBAL; FLAGS: FINE GRAINED
|
||||
...
|
||||
Pool 3
|
||||
Segment: GLOBAL; FLAGS: COARSE GRAINED
|
||||
...
|
||||
*******
|
||||
Agent 9
|
||||
*******
|
||||
Name: gfx90a
|
||||
...
|
||||
Pool Info:
|
||||
Pool 1
|
||||
Segment: GLOBAL; FLAGS: COARSE GRAINED
|
||||
...
|
||||
```
|
||||
|
||||
## System direct memory access
|
||||
|
||||
In most cases, the default behavior for HIP in transferring data from a pinned host allocation to device will run at the limit of the interconnect.
|
||||
However, there are certain cases where the interconnect is not the bottleneck.
|
||||
|
||||
The primary way to transfer data onto and off of a GPU, such as the MI200, is to use the onboard System Direct Memory Access engine, which is used to feed blocks of memory to the off-device interconnect (either GPU-CPU or GPU-GPU).
|
||||
Each GCD has a separate SDMA engine for host-to-device and device-to-host memory transfers.
|
||||
Importantly, SDMA engines are separate from the computing infrastructure, meaning that memory transfers to and from a device will not impact kernel compute performance, though they do impact memory bandwidth to a limited extent.
|
||||
The SDMA engines are mainly tuned for PCIe-4.0 x16, which means they are designed to operate at bandwidths up to 32 GB/s.
|
||||
|
||||
:::{note}
|
||||
An important feature of the MI250X platform is the Infinity Fabric™ interconnect between host and device.
|
||||
The Infinity Fabric interconnect supports improved performance over standard PCIe-4.0 (usually ~50% more bandwidth); however, since the SDMA engine does not run at this speed, it will not max out the bandwidth of the faster interconnect.
|
||||
:::
|
||||
|
||||
The bandwidth limitation can be countered by bypassing the SDMA engine and replacing it with a type of copy kernel known as a "blit" kernel.
|
||||
Blit kernels will use the compute units on the GPU, thereby consuming compute resources, which may not always be beneficial.
|
||||
The easiest way to enable blit kernels is to set an environment variable `HSA_ENABLE_SDMA=0`, which will disable the SDMA engine.
|
||||
On systems where the GPU uses a PCIe interconnect instead of an Infinity Fabric interconnect, blit kernels will not impact bandwidth, but will still consume compute resources.
|
||||
The use of SDMA vs blit kernels also applies to MPI data transfers and GPU-GPU transfers.
|
||||
241
docs/conceptual/using-gpu-sanitizer.md
Normal file
@@ -0,0 +1,241 @@
|
||||
# Using the LLVM ASan on a GPU (beta release)
|
||||
|
||||
The LLVM AddressSanitizer (ASan) provides a process that allows developers to detect runtime addressing errors in applications and libraries. The detection is achieved using a combination of compiler-added instrumentation and runtime techniques, including function interception and replacement.
|
||||
|
||||
Until now, the LLVM ASan process was only available for traditional purely CPU applications. However, ROCm has extended this mechanism to additionally allow the detection of some addressing errors on the GPU in heterogeneous applications. Ideally, developers should treat heterogeneous HIP and OpenMP applications exactly like pure CPU applications. However, this simplicity has not been achieved yet.
|
||||
|
||||
This document provides documentation on using ROCm ASan.
|
||||
For information about LLVM ASan, see the [LLVM documentation](https://clang.llvm.org/docs/AddressSanitizer.html).
|
||||
|
||||
**Note**: The beta release of LLVM ASan for ROCm is currently tested and validated on Ubuntu 20.04.
|
||||
|
||||
## Compiling for ASan
|
||||
|
||||
The ASan process begins by compiling the application of interest with the ASan instrumentation.
|
||||
|
||||
Recommendations for doing this are:
|
||||
|
||||
* Compile as many application and dependent library sources as possible using an AMD-built clang-based compiler such as `amdclang++`.
|
||||
* Add the following options to the existing compiler and linker options:
|
||||
* `-fsanitize=address` - enables instrumentation
|
||||
* `-shared-libsan` - use shared version of runtime
|
||||
* `-g` - add debug info for improved reporting
|
||||
* Explicitly use `xnack+` in the offload architecture option. For example, `--offload-arch=gfx90a:xnack+`
|
||||
Other architectures are allowed, but their device code will not be instrumented and a warning will be emitted.
|
||||
|
||||
It is not an error to compile some files without ASan instrumentation, but doing so reduces the ability of the process to detect addressing errors. However, if the main program "`a.out`" does not directly depend on the ASan runtime (`libclang_rt.asan-x86_64.so`) after the build completes (check by running `ldd` (List Dynamic Dependencies) or `readelf`), the application will immediately report an error at runtime as described in the next section.
|
||||
|
||||
### About compilation time
|
||||
|
||||
When `-fsanitize=address` is used, the LLVM compiler adds instrumentation code around every memory operation. This added code must be handled by all of the downstream components of the compiler toolchain and results in increased overall compilation time. This increase is especially evident in the AMDGPU device compiler and has in a few instances raised the compile time to an unacceptable level.
|
||||
|
||||
There are a few options if the compile time becomes unacceptable:
|
||||
|
||||
* Avoid instrumentation of the files which have the worst compile times. This will reduce the effectiveness of the ASan process.
|
||||
* Add the option `-fsanitize-recover=address` to the compiles with the worst compile times. This option simplifies the added instrumentation resulting in faster compilation. See below for more information.
|
||||
* Disable instrumentation on a per-function basis by adding `__attribute__`((no_sanitize("address"))) to functions found to be responsible for the large compile time. Again, this will reduce the effectiveness of the process.
|
||||
|
||||
## Installing ROCm GPU ASan packages
|
||||
|
||||
For a complete ROCm GPU Sanitizer installation, including packages, instrumented HSA and HIP runtimes, tools, and math libraries, use the following instruction,
|
||||
|
||||
```bash
|
||||
sudo apt-get install rocm-ml-sdk-asan
|
||||
|
||||
```
|
||||
|
||||
## Using AMD-supplied ASan instrumented libraries
|
||||
|
||||
ROCm releases have optional packages that contain additional ASan instrumented builds of the ROCm libraries (usually found in `/opt/rocm-<version>/lib`). The instrumented libraries have identical names to the regular uninstrumented libraries, and are located in `/opt/rocm-<version>/lib/asan`.
|
||||
These additional libraries are built using the `amdclang++` and `hipcc` compilers, while some uninstrumented libraries are built with g++. The preexisting build options are used but, as described above, additional options are used: `-fsanitize=address`, `-shared-libsan` and `-g`.
|
||||
|
||||
These additional libraries avoid additional developer effort to locate repositories, identify the correct branch, check out the correct tags, and other efforts needed to build the libraries from the source. And they extend the ability of the process to detect addressing errors into the ROCm libraries themselves.
|
||||
|
||||
When adjusting an application build to add instrumentation, linking against these instrumented libraries is unnecessary. For example, any `-L` `/opt/rocm-<version>/lib` compiler options need not be changed. However, the instrumented libraries should be used when the application is run. It is particularly important that the instrumented language runtimes, like `libamdhip64.so` and `librocm-core.so`, are used; otherwise, device invalid access detections may not be reported.
|
||||
|
||||
## Running ASan instrumented applications
|
||||
|
||||
### Preparing to run an instrumented application
|
||||
|
||||
Here are a few recommendations to consider before running an ASan instrumented heterogeneous application.
|
||||
|
||||
* Ensure the Linux kernel running on the system has Heterogeneous Memory Management (HMM) support. A kernel version of 5.6 or higher should be sufficient.
|
||||
* Ensure XNACK is enabled
|
||||
* For `gfx90a` (MI-2X0) or `gfx940` (MI-3X0) use environment `HSA_XNACK = 1`.
|
||||
* For `gfx906` (MI-50) or `gfx908` (MI-100) use environment `HSA_XNACK = 1` but also ensure the amdgpu kernel module is loaded with module argument `noretry=0`.
|
||||
This requirement is due to the fact that the XNACK setting for these GPUs is system-wide.
|
||||
|
||||
* Ensure that the application will use the instrumented libraries when it runs. The output from the shell command `ldd <application name>` can be used to see which libraries will be used.
|
||||
If the instrumented libraries are not listed by `ldd`, the environment variable `LD_LIBRARY_PATH` may need to be adjusted, or in some cases an `RPATH` compiled into the application may need to be changed and the application recompiled.
|
||||
|
||||
* Ensure that the application depends on the ASan runtime. This can be checked by running the command `readelf -d <application name> | grep NEEDED` and verifying that shared library: `libclang_rt.asan-x86_64.so` appears in the output.
|
||||
If it does not appear, when executed the application will quickly output an ASan error that looks like:
|
||||
|
||||
```bash
|
||||
==3210==ASan runtime does not come first in initial library list; you should either link runtime to your application or manually preload it with LD_PRELOAD.
|
||||
```
|
||||
|
||||
* Ensure that the application `llvm-symbolizer` can be executed, and that it is located in `/opt/rocm-<version>/llvm/bin`. This executable is not strictly required, but if found is used to translate ("symbolize") a host-side instruction address into a more useful function name, file name, and line number (assuming the application has been built to include debug information).
|
||||
|
||||
There is an environment variable, `ASAN_OPTIONS`, that can be used to adjust the runtime behavior of the ASAN runtime itself. There are more than a hundred "flags" that can be adjusted (see an old list at [flags](https://github.com/google/sanitizers/wiki/AddressSanitizerFlags)) but the default settings are correct and should be used in most cases. It must be noted that these options only affect the host ASAN runtime. The device runtime only currently supports the default settings for the few relevant options.
|
||||
|
||||
There are two `ASAN_OPTION` flags of particular note.
|
||||
|
||||
* `halt_on_error=0/1 default 1`.
|
||||
|
||||
This tells the ASAN runtime to halt the application immediately after detecting and reporting an addressing error. The default makes sense because the application has entered the realm of undefined behavior. If the developer wishes to have the application continue anyway, this option can be set to zero. However, the application and libraries should then be compiled with the additional option `-fsanitize-recover=address`. Note that the ROCm optional ASan instrumented libraries are not compiled with this option and if an error is detected within one of them, but halt_on_error is set to 0, more undefined behavior will occur.
|
||||
|
||||
* `detect_leaks=0/1 default 1`.
|
||||
This option directs the ASan runtime to enable the [Leak Sanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) (LSAN). Unfortunately, for heterogeneous applications, this default will result in significant output from the leak sanitizer when the application exits due to allocations made by the language runtime which are not considered to be to be leaks. This output can be avoided by adding `detect_leaks=0` to the `ASAN_OPTIONS`, or alternatively by producing an LSAN suppression file (syntax described [here](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer)) and activating it with environment variable `LSAN_OPTIONS=suppressions=/path/to/suppression/file`. When using a suppression file, a suppression report is printed by default. The suppression report can be disabled by using the `LSAN_OPTIONS` flag `print_suppressions=0`.
|
||||
|
||||
## Runtime overhead
|
||||
|
||||
Running an ASan instrumented application incurs
|
||||
overheads which may result in unacceptably long runtimes
|
||||
or failure to run at all.
|
||||
|
||||
### Higher execution time
|
||||
|
||||
ASan detection works by checking each address at runtime
|
||||
before the address is actually accessed by a load, store, or atomic
|
||||
instruction.
|
||||
This checking involves an additional load to "shadow" memory which
|
||||
records whether the address is "poisoned" or not, and additional logic
|
||||
that decides whether to produce an detection report or not.
|
||||
|
||||
This extra runtime work can cause the application to slow down by
|
||||
a factor of three or more, depending on how many memory accesses are
|
||||
executed.
|
||||
For heterogeneous applications, the shadow memory must be accessible by all devices
|
||||
and this can mean that shadow accesses from some devices may be more costly
|
||||
than non-shadow accesses.
|
||||
|
||||
### Higher memory use
|
||||
|
||||
The address checking described above relies on the compiler to surround
|
||||
each program variable with a red zone and on ASan
|
||||
runtime to surround each runtime memory allocation with a red zone and
|
||||
fill the shadow corresponding to each red zone with poison.
|
||||
The added memory for the red zones is additional overhead on top
|
||||
of the 13% overhead for the shadow memory itself.
|
||||
|
||||
Applications which consume most one or more available memory pools when
|
||||
run normally are likely to encounter allocation failures when run with
|
||||
instrumentation.
|
||||
|
||||
## Runtime reporting
|
||||
|
||||
It is not the intention of this document to provide a detailed explanation of all of the types of reports that can be output by the ASan runtime. Instead, the focus is on the differences between the standard reports for CPU issues, and reports for GPU issues.
|
||||
|
||||
An invalid address detection report for the CPU always starts with
|
||||
|
||||
```bash
|
||||
==<PID>==ERROR: AddressSanitizer: <problem type> on address <memory address> at pc <pc> bp <bp> sp <sp> <access> of size <N> at <memory address> thread T0
|
||||
```
|
||||
|
||||
and continues with a stack trace for the access, a stack trace for the allocation and deallocation, if relevant, and a dump of the shadow near the <memory address>.
|
||||
|
||||
In contrast, an invalid address detection report for the GPU always starts with
|
||||
|
||||
```bash
|
||||
==<PID>==ERROR: AddressSanitizer: <problem type> on amdgpu device <device> at pc <pc> <access> of size <n> in workgroup id (<X>,<Y>,<Z>)
|
||||
```
|
||||
|
||||
Above, `<device>` is the integer device ID, and `(<X>, <Y>, <Z>)` is the ID of the workgroup or block where the invalid address was detected.
|
||||
|
||||
While the CPU report include a call stack for the thread attempting the invalid access, the GPU is currently to a call stack of size one, i.e. the (symbolized) of the invalid access, e.g.
|
||||
|
||||
```bash
|
||||
#0 <pc> in <fuction signature> at /path/to/file.hip:<line>:<column>
|
||||
```
|
||||
|
||||
This short call stack is followed by a GPU unique section that looks like
|
||||
|
||||
```bash
|
||||
Thread ids and accessed addresses:
|
||||
<lid0> <maddr 0> : <lid1> <maddr1> : ...
|
||||
```
|
||||
|
||||
where each `<lid j> <maddr j>` indicates the lane ID and the invalid memory address held by lane `j` of the wavefront attempting the invalid access.
|
||||
|
||||
Additionally, reports for invalid GPU accesses to memory allocated by GPU code via `malloc` or new starting with, for example,
|
||||
|
||||
```bash
|
||||
==1234==ERROR: AddressSanitizer: heap-buffer-overflow on amdgpu device 0 at pc 0x7fa9f5c92dcc
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
==5678==ERROR: AddressSanitizer: heap-use-after-free on amdgpu device 3 at pc 0x7f4c10062d74
|
||||
```
|
||||
|
||||
currently may include one or two surprising CPU side tracebacks mentioning :`hostcall`". This is due to how `malloc` and `free` are implemented for GPU code and these call stacks can be ignored.
|
||||
|
||||
### Running with `rocgdb`
|
||||
|
||||
`rocgdb` can be used to further investigate ASan detected errors, with some preparation.
|
||||
|
||||
Currently, the ASan runtime complains when starting `rocgdb` without preparation.
|
||||
|
||||
```bash
|
||||
$ rocgdb my_app
|
||||
==1122==ASan` runtime does not come first in initial library list; you should either link runtime to your application or manually preload it with LD_PRELOAD.
|
||||
```
|
||||
|
||||
This is solved by setting environment variable `LD_PRELOAD` to the path to the ASan runtime, whose path can be obtained using the command
|
||||
|
||||
```bash
|
||||
amdclang++ -print-file-name=libclang_rt.asan-x86_64.so
|
||||
```
|
||||
|
||||
It is also recommended to set the environment variable `HIP_ENABLE_DEFERRED_LOADING=0` before debugging HIP applications.
|
||||
|
||||
After starting `rocgdb` breakpoints can be set on the ASan runtime error reporting entry points of interest. For example, if an ASan error report includes
|
||||
|
||||
```bash
|
||||
WRITE of size 4 in workgroup id (10,0,0)
|
||||
```
|
||||
|
||||
the `rocgdb` command needed to stop the program before the report is printed is
|
||||
|
||||
```bash
|
||||
(gdb) break __asan_report_store4
|
||||
```
|
||||
|
||||
Similarly, the appropriate command for a report including
|
||||
|
||||
```bash
|
||||
READ of size <N> in workgroup ID (1,2,3)
|
||||
```
|
||||
|
||||
is
|
||||
|
||||
```bash
|
||||
(gdb) break __asan_report_load<N>
|
||||
```
|
||||
|
||||
It is possible to set breakpoints on all ASan report functions using these commands:
|
||||
|
||||
```bash
|
||||
$ rocgdb <path to application>
|
||||
(gdb) start <commmand line arguments>
|
||||
(gdb) rbreak ^__asan_report
|
||||
(gdb) c
|
||||
```
|
||||
|
||||
### Using ASan with a short HIP application
|
||||
|
||||
Refer to the following example to use ASan with a short HIP application,
|
||||
|
||||
https://github.com/Rmalavally/rocm-examples/blob/Rmalavally-patch-1/LLVM_ASAN/Using-Address-Sanitizer-with-a-Short-HIP-Application.md
|
||||
|
||||
### Known issues with using GPU sanitizer
|
||||
|
||||
* Red zones must have limited size and it is possible for an invalid access to completely miss a red zone and not be detected.
|
||||
|
||||
* Lack of detection or false reports can be caused by the runtime not properly maintaining red zone shadows.
|
||||
|
||||
* Lack of detection on the GPU might also be due to the implementation not instrumenting accesses to all GPU specific address spaces. For example, in the current implementation accesses to "private" or "stack" variables on the GPU are not instrumented, and accesses to HIP shared variables (also known as "local data store" or "LDS") are also not instrumented.
|
||||
|
||||
* It can also be the case that a memory fault is hit for an invalid address even with the instrumentation. This is usually caused by the invalid address being so wild that its shadow address is outside of any memory region, and the fault actually occurs on the access to the shadow address. It is also possible to hit a memory fault for the `NULL` pointer. While address 0 does have a shadow location, it is not poisoned by the runtime.
|
||||
296
docs/conf.py
@@ -4,72 +4,31 @@
|
||||
# list see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from subprocess import run
|
||||
import jinja2
|
||||
import os
|
||||
|
||||
gh_release_path = os.path.join("..", "RELEASE.md")
|
||||
gh_changelog_path = os.path.join("..", "CHANGELOG.md")
|
||||
sphinx_release_path = os.path.join("about", "release-notes.md")
|
||||
sphinx_changelog_path = os.path.join("release", "changelog.md")
|
||||
shutil.copy2(gh_release_path, sphinx_release_path)
|
||||
shutil.copy2(gh_changelog_path, sphinx_changelog_path)
|
||||
from rocm_docs import ROCmDocs
|
||||
|
||||
# Mark the consolidated changelog as orphan to prevent Sphinx from warning about missing toctree entries
|
||||
with open(sphinx_changelog_path, "r+", encoding="utf-8") as file:
|
||||
content = file.read()
|
||||
file.seek(0)
|
||||
file.write(":orphan:\n" + content)
|
||||
# Environement to process Jinja templates.
|
||||
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader("."))
|
||||
|
||||
# Replace GitHub-style [!ADMONITION]s with Sphinx-compatible ```{admonition} blocks
|
||||
with open(sphinx_changelog_path, "r", encoding="utf-8") as file:
|
||||
lines = file.readlines()
|
||||
# Jinja templates to render out.
|
||||
templates = [
|
||||
|
||||
modified_lines = []
|
||||
in_admonition_section = False
|
||||
]
|
||||
|
||||
# Map for matching the specific admonition type to its corresponding Sphinx markdown syntax
|
||||
admonition_types = {
|
||||
'> [!NOTE]': '```{note}',
|
||||
'> [!TIP]': '```{tip}',
|
||||
'> [!IMPORTANT]': '```{important}',
|
||||
'> [!WARNING]': '```{warning}',
|
||||
'> [!CAUTION]': '```{caution}'
|
||||
}
|
||||
# Render templates and output files without the last extension.
|
||||
# For example: 'install.md.jinja' becomes 'install.md'.
|
||||
for template in templates:
|
||||
rendered = jinja_env.get_template(template).render()
|
||||
with open(os.path.splitext(template)[0], 'w') as file:
|
||||
file.write(rendered)
|
||||
|
||||
for line in lines:
|
||||
if any(line.startswith(k) for k in admonition_types):
|
||||
for key in admonition_types:
|
||||
if(line.startswith(key)):
|
||||
modified_lines.append(admonition_types[key] + '\n')
|
||||
break
|
||||
in_admonition_section = True
|
||||
elif in_admonition_section:
|
||||
if line.strip() == '':
|
||||
# If we encounter an empty line, close the admonition section
|
||||
modified_lines.append('```\n\n') # Close the admonition block
|
||||
in_admonition_section = False
|
||||
else:
|
||||
modified_lines.append(line.lstrip('> '))
|
||||
else:
|
||||
modified_lines.append(line)
|
||||
|
||||
# In case the file ended while still in a admonition section, close it
|
||||
if in_admonition_section:
|
||||
modified_lines.append('```')
|
||||
|
||||
file.close()
|
||||
|
||||
with open(sphinx_changelog_path, "w", encoding="utf-8") as file:
|
||||
file.writelines(modified_lines)
|
||||
|
||||
matrix_path = os.path.join("compatibility", "compatibility-matrix-historical-6.0.csv")
|
||||
rtd_path = os.path.join("..", "_readthedocs", "html", "downloads")
|
||||
if not os.path.exists(rtd_path):
|
||||
os.makedirs(rtd_path)
|
||||
shutil.copy2(matrix_path, rtd_path)
|
||||
shutil.copy2('../CONTRIBUTING.md','./contribute/index.md')
|
||||
shutil.copy2('../RELEASE.md','./about/release-notes.md')
|
||||
# Keep capitalization due to similar linking on GitHub's markdown preview.
|
||||
shutil.copy2('../CHANGELOG.md','./about/CHANGELOG.md')
|
||||
|
||||
latex_engine = "xelatex"
|
||||
latex_elements = {
|
||||
@@ -80,204 +39,69 @@ latex_elements = {
|
||||
"""
|
||||
}
|
||||
|
||||
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "rocm.docs.amd.com")
|
||||
html_context = {"docs_header_version": "7.1.1"}
|
||||
if os.environ.get("READTHEDOCS", "") == "True":
|
||||
html_context["READTHEDOCS"] = True
|
||||
|
||||
# Check if the branch is a docs/ branch
|
||||
official_branch = run(["git", "rev-parse", "--abbrev-ref", "HEAD"], capture_output=True, text=True).stdout.find("docs/")
|
||||
|
||||
# configurations for PDF output by Read the Docs
|
||||
project = "ROCm Documentation"
|
||||
project_path = os.path.abspath(".").replace("\\", "/")
|
||||
author = "Advanced Micro Devices, Inc."
|
||||
copyright = "Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved."
|
||||
version = "7.2.0"
|
||||
release = "7.2.0"
|
||||
copyright = "Copyright (c) 2023 Advanced Micro Devices, Inc. All rights reserved."
|
||||
version = "5.7.1"
|
||||
release = "5.7.1"
|
||||
setting_all_article_info = True
|
||||
all_article_info_os = ["linux", "windows"]
|
||||
all_article_info_author = ""
|
||||
|
||||
# pages with specific settings
|
||||
article_pages = [
|
||||
{"file": "about/release-notes", "os": ["linux"], "date": "2026-01-21"},
|
||||
{"file": "release/changelog", "os": ["linux"],},
|
||||
{"file": "compatibility/compatibility-matrix", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/pytorch-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/tensorflow-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/jax-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/verl-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/stanford-megatron-lm-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/dgl-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/megablocks-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/ray-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/llama-cpp-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/flashinfer-compatibility", "os": ["linux"]},
|
||||
{"file": "how-to/deep-learning-rocm", "os": ["linux"]},
|
||||
{
|
||||
"file":"release",
|
||||
"os":["linux", "windows"],
|
||||
"date":"2023-07-27"
|
||||
},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/install", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/system-setup/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/system-setup/multi-node-setup", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/system-setup/prerequisite-system-validation", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/system-setup/system-health-check", "os": ["linux"]},
|
||||
{"file":"install/windows/install-quick", "os":["windows"]},
|
||||
{"file":"install/linux/install-quick", "os":["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/training/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/train-a-model", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/prerequisite-system-validation", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/scale-model-training", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/megatron-lm", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-history", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v24.12-dev", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.3", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.4", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.5", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.6", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.7", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.8", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.9", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.10", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-v25.11", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/megatron-lm-primus-migration-guide", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/primus-megatron", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-megatron-v25.7", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-megatron-v25.8", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-megatron-v25.9", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-megatron-v25.10", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-megatron-v25.11", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/pytorch-training", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-history", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.3", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.4", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.5", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.6", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.7", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.8", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.9", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.10", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/pytorch-training-v25.11", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/primus-pytorch", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-pytorch-v25.8", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-pytorch-v25.9", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-pytorch-v25.10", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/primus-pytorch-v25.11", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/jax-maxtext", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-history", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.4", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.5", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.9", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/previous-versions/jax-maxtext-v25.11", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/benchmark-docker/mpt-llm-foundry", "os": ["linux"]},
|
||||
{"file":"install/linux/install", "os":["linux"]},
|
||||
{"file":"install/linux/install-options", "os":["linux"]},
|
||||
{"file":"install/linux/prerequisites", "os":["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/overview", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/fine-tuning-and-inference", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference", "os": ["linux"]},
|
||||
{"file":"install/docker", "os":["linux"]},
|
||||
{"file":"install/magma-install", "os":["linux"]},
|
||||
{"file":"install/pytorch-install", "os":["linux"]},
|
||||
{"file":"install/tensorflow-install", "os":["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/inference/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/hugging-face-models", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/llm-inference-frameworks", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/vllm", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-history", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.4.3", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.6.4", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.6.6", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.7.3-20250325", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.8.3-20250415", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.8.5-20250513", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.8.5-20250521", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.0.1-20250605", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.0.1-20250702", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250702", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.9.1-20250715", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.0-20250812", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.1-20250909", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.10.2-20251006", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/vllm-0.11.1-20251103", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/sglang-history", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/pytorch-inference", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/sglang", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/vllm-mori-distributed", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/sglang-mori-distributed", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/sglang-distributed", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/xdit-diffusion-inference", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/xdit-25.10", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/xdit-25.11", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/xdit-25.12", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/benchmark-docker/previous-versions/xdit-25.13", "os": ["linux"]},
|
||||
{"file":"install/windows/install", "os":["windows"]},
|
||||
{"file":"install/windows/prerequisites", "os":["windows"]},
|
||||
{"file":"install/windows/cli/index", "os":["windows"]},
|
||||
{"file":"install/windows/gui/index", "os":["windows"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/inference/deploy-your-model", "os": ["linux"]},
|
||||
{"file":"about/compatibility/linux-support", "os":["linux"]},
|
||||
{"file":"about/compatibility/windows-support", "os":["windows"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/model-quantization", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/model-acceleration-libraries", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/optimizing-with-composable-kernel", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/optimizing-triton-kernel", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/profiling-and-debugging", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/workload", "os": ["linux"]},
|
||||
{"file":"about/compatibility/docker-image-support-matrix", "os":["linux"]},
|
||||
{"file":"about/compatibility/user-kernel-space-compat-matrix", "os":["linux"]},
|
||||
|
||||
{"file":"reference/library-index", "os":["linux"]},
|
||||
|
||||
{"file":"how-to/deep-learning-rocm", "os":["linux"]},
|
||||
{"file":"how-to/gpu-enabled-mpi", "os":["linux"]},
|
||||
{"file":"how-to/system-debugging", "os":["linux"]},
|
||||
{"file":"how-to/tuning-guides", "os":["linux", "windows"]},
|
||||
|
||||
{"file":"rocm-a-z", "os":["linux", "windows"]},
|
||||
|
||||
{"file": "how-to/system-optimization/index", "os": ["linux"]},
|
||||
{"file": "how-to/system-optimization/mi300x", "os": ["linux"]},
|
||||
{"file": "how-to/system-optimization/mi200", "os": ["linux"]},
|
||||
{"file": "how-to/system-optimization/mi100", "os": ["linux"]},
|
||||
{"file": "how-to/system-optimization/w6000-v620", "os": ["linux"]},
|
||||
{"file": "how-to/tuning-guides/mi300x/index", "os": ["linux"]},
|
||||
{"file": "how-to/tuning-guides/mi300x/system", "os": ["linux"]},
|
||||
{"file": "how-to/tuning-guides/mi300x/workload", "os": ["linux"]},
|
||||
{"file": "how-to/system-debugging", "os": ["linux"]},
|
||||
{"file": "how-to/gpu-enabled-mpi", "os": ["linux"]},
|
||||
]
|
||||
|
||||
exclude_patterns = ['temp']
|
||||
|
||||
external_toc_path = "./sphinx/_toc.yml"
|
||||
|
||||
# Add the _extensions directory to Python's search path
|
||||
sys.path.append(str(Path(__file__).parent / 'extension'))
|
||||
|
||||
extensions = ["rocm_docs", "sphinx_reredirects", "sphinx_sitemap", "sphinxcontrib.datatemplates", "remote-content", "version-ref", "csv-to-list-table"]
|
||||
|
||||
compatibility_matrix_file = str(Path(__file__).parent / 'compatibility/compatibility-matrix-historical-6.0.csv')
|
||||
docs_core = ROCmDocs("ROCm Documentation")
|
||||
docs_core.setup()
|
||||
|
||||
external_projects_current_project = "rocm"
|
||||
|
||||
# Uncomment if facing rate limit exceed issue with local build
|
||||
# external_projects_remote_repository = ""
|
||||
|
||||
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "https://rocm-stg.amd.com/")
|
||||
html_context = {"docs_header_version": "7.1.0"}
|
||||
if os.environ.get("READTHEDOCS", "") == "True":
|
||||
html_context["READTHEDOCS"] = True
|
||||
|
||||
html_context["official_branch"] = official_branch
|
||||
html_context["version"] = version
|
||||
html_context["release"] = release
|
||||
|
||||
html_theme = "rocm_docs_theme"
|
||||
html_theme_options = {"flavor": "rocm-docs-home"}
|
||||
|
||||
html_static_path = ["sphinx/static/css", "extension/how-to/rocm-for-ai/inference"]
|
||||
html_css_files = ["rocm_custom.css", "rocm_rn.css", "vllm-benchmark.css"]
|
||||
html_js_files = ["vllm-benchmark.js"]
|
||||
|
||||
html_title = "ROCm Documentation"
|
||||
|
||||
html_theme_options = {"link_main_doc": False}
|
||||
|
||||
redirects = {"reference/openmp/openmp": "../../about/compatibility/openmp.html"}
|
||||
|
||||
numfig = False
|
||||
suppress_warnings = ["autosectionlabel.*"]
|
||||
|
||||
html_context = {
|
||||
"project_path" : {project_path},
|
||||
"gpu_type" : [('AMD Instinct GPUs', 'intrinsic'), ('AMD gfx families', 'gfx'), ('NVIDIA families', 'nvidia') ],
|
||||
"atomics_type" : [('HW atomics', 'hw-atomics'), ('CAS emulation', 'cas-atomics')],
|
||||
"pcie_type" : [('No PCIe atomics', 'nopcie'), ('PCIe atomics', 'pcie')],
|
||||
"memory_type" : [('Device DRAM', 'device-dram'), ('Migratable Host DRAM', 'migratable-host-dram'), ('Pinned Host DRAM', 'pinned-host-dram')],
|
||||
"granularity_type" : [('Coarse-grained', 'coarse-grained'), ('Fine-grained', 'fine-grained')],
|
||||
"scope_type" : [('Device', 'device'), ('System', 'system')]
|
||||
for sphinx_var in ROCmDocs.SPHINX_VARS:
|
||||
globals()[sphinx_var] = getattr(docs_core, sphinx_var)
|
||||
html_theme_options = {
|
||||
"link_main_doc": False
|
||||
}
|
||||
|
||||
# Disable figure and table numbering
|
||||
numfig = False
|
||||
|
||||
@@ -1,58 +1,38 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="Building ROCm documentation">
|
||||
<meta name="keywords" content="documentation, Visual Studio Code, GitHub, command line,
|
||||
AMD, ROCm">
|
||||
</head>
|
||||
|
||||
# Building documentation
|
||||
|
||||
You can build our documentation via GitHub (in a pull request) or locally (using the command line or
|
||||
Visual Studio (VS) Code.
|
||||
|
||||
## GitHub
|
||||
|
||||
If you open a pull request and scroll down to the summary panel,
|
||||
there is a commit status section. Next to the line
|
||||
`docs/readthedocs.com:advanced-micro-devices-demo`, there is a `Details` link.
|
||||
If you click this, it takes you to the Read the Docs build for your pull request.
|
||||
If you open a pull request on the `develop` branch of a ROCm repository and scroll to the bottom of
|
||||
the page, there is a summary panel. Next to the line
|
||||
`docs/readthedocs.com:advanced-micro-devices-demo`, there is a `Details` link. If you click this, it takes
|
||||
you to the Read the Docs build for your pull request.
|
||||
|
||||

|
||||

|
||||
|
||||
If you don't see this line, click `Show all checks` to get an itemized view.
|
||||
|
||||
## Command line
|
||||
|
||||
You can build our documentation via the command line using Python.
|
||||
|
||||
See the `build.tools.python` setting in the [Read the Docs configuration file](https://github.com/ROCm/ROCm/blob/develop/.readthedocs.yaml) for the Python version used by Read the Docs to build documentation.
|
||||
|
||||
See the [Python requirements file](https://github.com/ROCm/ROCm/blob/develop/docs/sphinx/requirements.txt) for Python packages needed to build the documentation.
|
||||
You can build our documentation via the command line using Python. We use Python 3.8; other
|
||||
versions may not support the build.
|
||||
|
||||
Use the Python Virtual Environment (`venv`) and run the following commands from the project root:
|
||||
|
||||
::::{tab-set}
|
||||
:::{tab-item} Linux and WSL
|
||||
:sync: linux
|
||||
|
||||
```sh
|
||||
python3 -mvenv .venv
|
||||
|
||||
.venv/bin/python -m pip install -r docs/sphinx/requirements.txt
|
||||
.venv/bin/python -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html
|
||||
# Windows
|
||||
.venv/Scripts/python -m pip install -r docs/sphinx/requirements.txt
|
||||
.venv/Scripts/python -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html
|
||||
|
||||
# Linux
|
||||
.venv/bin/python -m pip install -r docs/sphinx/requirements.txt
|
||||
.venv/bin/python -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html
|
||||
```
|
||||
|
||||
:::
|
||||
:::{tab-item} Windows
|
||||
:sync: windows
|
||||
|
||||
```powershell
|
||||
python -mvenv .venv
|
||||
|
||||
.venv\Scripts\python.exe -m pip install -r docs/sphinx/requirements.txt
|
||||
.venv\Scripts\python.exe -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html
|
||||
```
|
||||
|
||||
:::
|
||||
::::
|
||||
|
||||
Navigate to `_build/html/index.html` and open this file in a web browser.
|
||||
|
||||
## Visual Studio Code
|
||||
@@ -142,12 +122,12 @@ documentation locally using Visual Studio (VS) Code. Follow these steps to confi
|
||||
}
|
||||
```
|
||||
|
||||
> Implementation detail: two problem matchers were needed to be defined,
|
||||
> (Implementation detail: two problem matchers were needed to be defined,
|
||||
> because VS Code doesn't tolerate some problem information being potentially
|
||||
> absent. While a single regex could match all types of errors, if a capture
|
||||
> group remains empty (the line number doesn't show up in all warning/error
|
||||
> messages) but the `pattern` references said empty capture group, VS Code
|
||||
> discards the message completely.
|
||||
> discards the message completely.)
|
||||
|
||||
4. Configure the Python virtual environment (`venv`).
|
||||
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="Contributing to ROCm">
|
||||
<meta name="keywords" content="ROCm, contributing, contribute, maintainer, contributor">
|
||||
</head>
|
||||
|
||||
# Contributing to the ROCm documentation
|
||||
|
||||
The ROCm documentation, like all of ROCm, is open source and available on GitHub. You can contribute to the ROCm documentation by forking the appropriate repository, making your changes, and opening a pull request.
|
||||
|
||||
To provide feedback on the ROCm documentation, including submitting an issue or suggesting a feature, see [Providing feedback about the ROCm documentation](./feedback.md).
|
||||
|
||||
## The ROCm repositories
|
||||
|
||||
The repositories for ROCm and all ROCm components are available on GitHub.
|
||||
|
||||
| Module | Documentation location |
|
||||
| --- | --- |
|
||||
| ROCm framework | [https://github.com/ROCm/ROCm/tree/develop/docs](https://github.com/ROCm/ROCm/tree/develop/docs) |
|
||||
| ROCm installation for Linux | [https://github.com/ROCm/rocm-install-on-linux/tree/develop/docs](https://github.com/ROCm/rocm-install-on-linux/tree/develop/docs) |
|
||||
| ROCm HIP SDK installation for Windows | [https://github.com/ROCm/rocm-install-on-windows/tree/develop/docs](https://github.com/ROCm/rocm-install-on-windows/tree/develop/docs) |
|
||||
|
||||
Individual components have their own repositories with their own documentation in their own `docs` folders.
|
||||
|
||||
The sub-folders within the `docs` folders across ROCm are typically structured as follows:
|
||||
|
||||
| Sub-folder name | Documentation type |
|
||||
|-------|----------|
|
||||
| `install` | Installation instructions, build instructions, and prerequisites |
|
||||
| `conceptual` | Important concepts |
|
||||
| `how-to` | How to implement specific use cases |
|
||||
| `tutorials` | Tutorials |
|
||||
| `reference` | API references and other reference resources |
|
||||
|
||||
## Editing and adding to the documentation
|
||||
|
||||
ROCm documentation follows the [Google developer documentation style guide](https://developers.google.com/style/highlights).
|
||||
|
||||
Most topics in the ROCm documentation are written in [reStructuredText (rst)](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html), with some topics written in Markdown. Only use reStructuredText when adding new topics. Only use Markdown if the topic you are editing is already in Markdown.
|
||||
|
||||
To edit or add to the documentation:
|
||||
|
||||
1. Fork the repository you want to add to or edit.
|
||||
2. Clone your fork locally.
|
||||
3. Create a new local branch cut from the `develop` branch of the repository.
|
||||
4. Make your changes to the documentation.
|
||||
|
||||
5. Optionally, build the documentation locally before creating a pull request by running the following commands from within the `docs` folder:
|
||||
|
||||
```bash
|
||||
pip3 install -r sphinx/requirements.txt # You only need to run this command once
|
||||
python3 -m sphinx -T -E -b html -d _build/doctrees -D language=en . _build/html
|
||||
```
|
||||
|
||||
The output files will be located in the `docs/_build` folder. Open `docs/_build/html/index.html` to view the documentation.
|
||||
|
||||
For more information on ROCm build tools, see [Documentation toolchain](toolchain.md).
|
||||
6. Push your changes. A GitHub link will be returned in the output of the `git push` command. Open this link in a browser to create the pull request.
|
||||
|
||||
The documentation is built as part of the checks on pull request, along with spell checking and linting. Scroll to the bottom of your pull request to view all the checks.
|
||||
|
||||
Verify that the linting and spell checking have passed, and that the documentation was built successfully. New words or acronyms can be added to the [wordlist file](https://github.com/ROCm/rocm-docs-core/blob/develop/.wordlist.txt). The wordlist is subject to approval by the ROCm documentation team.
|
||||
|
||||
The Read The Docs build of your pull request can be accessed by clicking on the Details link next to the Read The Docs build check. Verify that your changes are in the build and look as expected.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
Your pull request will be reviewed by a member of the ROCm documentation team.
|
||||
|
||||
See the [GitHub documentation](https://docs.github.com/en) for information on how to fork and clone a repository, and how to create and push a local branch.
|
||||
|
||||
```{important}
|
||||
By creating a pull request (PR), you agree to allow your contribution to be licensed under the terms of the
|
||||
LICENSE.txt file in the corresponding repository. Different repositories can use different licenses.
|
||||
```
|
||||
@@ -1,27 +1,27 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="Providing feedback for ROCm documentation">
|
||||
<meta name="keywords" content="documentation, pull request, GitHub, AMD, ROCm">
|
||||
</head>
|
||||
# How to provide feedback for ROCm documentation
|
||||
|
||||
# Providing feedback about the ROCm documentation
|
||||
There are four standard ways to provide feedback for this repository.
|
||||
|
||||
Feedback about the ROCm documentation is welcome. You can provide feedback about the ROCm documentation either through GitHub Discussions or GitHub Issues.
|
||||
## Pull request
|
||||
|
||||
## Participating in discussions through GitHub Discussions
|
||||
All contributions to ROCm documentation should arrive via the
|
||||
[GitHub Flow](https://docs.github.com/en/get-started/quickstart/github-flow)
|
||||
targeting the develop branch of the repository. If you are unable to contribute
|
||||
via the GitHub Flow, feel free to email us at [rocm-feedback@amd.com](mailto:rocm-feedback@amd.com?subject=Documentation%20Feedback).
|
||||
|
||||
You can ask questions, view announcements, suggest new features, and communicate with other members of the community through [GitHub Discussions](https://github.com/ROCm/ROCm/discussions).
|
||||
## GitHub discussions
|
||||
|
||||
## Submitting issues through GitHub Issues
|
||||
To ask questions or view answers to frequently asked questions, refer to
|
||||
[GitHub Discussions](https://github.com/RadeonOpenCompute/ROCm/discussions).
|
||||
On GitHub Discussions, in addition to asking and answering questions,
|
||||
members can share updates, have open-ended conversations,
|
||||
and follow along on via public announcements.
|
||||
|
||||
You can submit issues through [GitHub Issues](https://github.com/ROCm/ROCm/issues).
|
||||
## GitHub issue
|
||||
|
||||
When creating a new issue, follow the following guidelines:
|
||||
Issues on existing or absent docs can be filed as
|
||||
[GitHub Issues](https://github.com/RadeonOpenCompute/ROCm/issues).
|
||||
|
||||
1. Always do a search to see if the same issue already exists. If the issue already exists, upvote it, and comment or post to provide any additional details you might have.
|
||||
2. If you find an issue that is similar to your issue, log your issue, then add a comment that includes a link to the similar issue, as well as its issue number.
|
||||
3. Always provide as much information as possible. This helps reduce the time required to reproduce the issue.
|
||||
## Email
|
||||
|
||||
After creating your issue, make sure to check it regularly for any requests for additional information.
|
||||
|
||||
For information about contributing content to the ROCm documentation, see [Contributing to the ROCm documentation](./contributing.md).
|
||||
Send other feedback or questions to [rocm-feedback@amd.com](mailto:rocm-feedback@amd.com?subject=Documentation%20Feedback).
|
||||
|
||||
@@ -1,46 +1,71 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="ROCm documentation toolchain">
|
||||
<meta name="keywords" content="documentation, toolchain, Sphinx, Doxygen, MyST, AMD, ROCm">
|
||||
</head>
|
||||
|
||||
# ROCm documentation toolchain
|
||||
|
||||
The ROCm documentation relies on several open source toolchains and sites.
|
||||
Our documentation relies on several open source toolchains and sites.
|
||||
|
||||
## rocm-docs-core
|
||||
## `rocm-docs-core`
|
||||
|
||||
[rocm-docs-core](https://github.com/ROCm/rocm-docs-core) is an AMD-maintained
|
||||
project that applies customizations for the ROCm documentation. This project is the tool most ROCm repositories use as part of their documentation build pipeline. It is available as a [pip package on PyPI](https://pypi.org/project/rocm-docs-core/).
|
||||
[rocm-docs-core](https://github.com/RadeonOpenCompute/rocm-docs-core) is an AMD-maintained
|
||||
project that applies customization for our documentation. This
|
||||
project is the tool most ROCm repositories use as part of the documentation
|
||||
build. It is also available as a [pip package on PyPI](https://pypi.org/project/rocm-docs-core/).
|
||||
|
||||
See the user and developer guides for rocm-docs-core at
|
||||
{doc}`rocm-docs-core documentation<rocm-docs-core:index>`.
|
||||
See the user and developer guides for rocm-docs-core at {doc}`rocm-docs-core documentation<rocm-docs-core:index>`.
|
||||
|
||||
## Sphinx
|
||||
|
||||
[Sphinx](https://www.sphinx-doc.org/en/master/) is a documentation generator originally used for Python. It is now widely used in the open source community.
|
||||
|
||||
### Sphinx External ToC
|
||||
|
||||
[Sphinx External ToC](https://sphinx-external-toc.readthedocs.io/en/latest/intro.html) is a Sphinx extension used for ROCm documentation navigation. This tool generates a navigation menu on the left
|
||||
based on a YAML file (`_toc.yml.in`) that contains the table of contents.
|
||||
|
||||
### Sphinx-book-theme
|
||||
|
||||
[Sphinx-book-theme](https://sphinx-book-theme.readthedocs.io/en/latest/) is a Sphinx theme that defines the base appearance for ROCm documentation. ROCm documentation applies some customization, such as a custom header and footer, on top of the Sphinx Book Theme.
|
||||
|
||||
### Sphinx Design
|
||||
|
||||
[Sphinx design](https://sphinx-design.readthedocs.io/en/latest/index.html) is a Sphinx extension that adds design functionality. ROCm documentation uses Sphinx Design for grids, cards, and synchronized tabs.
|
||||
|
||||
## Doxygen
|
||||
|
||||
[Doxygen](https://www.doxygen.nl/) is a documentation generator that extracts information from in-code comments. It is used for API documentation.
|
||||
|
||||
## Breathe
|
||||
|
||||
[Breathe](https://www.breathe-doc.org/) is a Sphinx plugin for integrating Doxygen content.
|
||||
[Sphinx](https://www.sphinx-doc.org/en/master/) is a documentation generator
|
||||
originally used for Python. It is now widely used in the open source community.
|
||||
Originally, Sphinx supported reStructuredText (RST) based documentation, but
|
||||
Markdown support is now available.
|
||||
ROCm documentation plans to default to Markdown for new projects.
|
||||
Existing projects using RST are under no obligation to convert to Markdown. New
|
||||
projects that believe Markdown is not suitable should contact the documentation
|
||||
team prior to selecting RST.
|
||||
|
||||
## Read the Docs
|
||||
|
||||
[Read the Docs](https://docs.readthedocs.io/en/stable/) is the service that builds and hosts the HTML version of the ROCm documentation.
|
||||
[Read the Docs](https://docs.readthedocs.io/en/stable/) is the service that builds
|
||||
and hosts the HTML documentation generated using Sphinx to our end users.
|
||||
|
||||
## Doxygen
|
||||
|
||||
[Doxygen](https://www.doxygen.nl/) is a documentation generator that extracts
|
||||
information from inline code.
|
||||
ROCm projects typically use Doxygen for public API documentation unless the
|
||||
upstream project uses a different tool.
|
||||
|
||||
### Breathe
|
||||
|
||||
[Breathe](https://www.breathe-doc.org/) is a Sphinx plugin to integrate Doxygen
|
||||
content.
|
||||
|
||||
### MyST
|
||||
|
||||
[Markedly Structured Text (MyST)](https://myst-tools.org/docs/spec) is an extended
|
||||
flavor of Markdown ([CommonMark](https://commonmark.org/)) influenced by reStructuredText (RST) and Sphinx.
|
||||
It is integrated into ROCm documentation by the Sphinx extension [`myst-parser`](https://myst-parser.readthedocs.io/en/latest/).
|
||||
A cheat sheet that showcases how to use the MyST syntax is available over at
|
||||
the [Jupyter reference](https://jupyterbook.org/en/stable/reference/cheatsheet.html).
|
||||
|
||||
### Sphinx External ToC
|
||||
|
||||
[Sphinx External ToC](https://sphinx-external-toc.readthedocs.io/en/latest/intro.html)
|
||||
is a Sphinx extension used for ROCm documentation navigation. This tool generates a navigation menu on the left
|
||||
based on a YAML file that specifies the table of contents.
|
||||
It was selected due to its flexibility that allows scripts to operate on the
|
||||
YAML file. Please transition to this file for the project's navigation. You can
|
||||
see the `_toc.yml.in` file in this repository in the `docs/sphinx` folder for an
|
||||
example.
|
||||
|
||||
### Sphinx-book-theme
|
||||
|
||||
[Sphinx-book-theme](https://sphinx-book-theme.readthedocs.io/en/latest/) is a Sphinx theme
|
||||
that defines the base appearance for ROCm documentation.
|
||||
ROCm documentation applies some customization,
|
||||
such as a custom header and footer on top of the Sphinx Book Theme.
|
||||
|
||||
### Sphinx design
|
||||
|
||||
[Sphinx design](https://sphinx-design.readthedocs.io/en/latest/index.html) is a Sphinx extension that adds design
|
||||
functionality.
|
||||
ROCm documentation uses Sphinx Design for grids, cards, and synchronized tabs.
|
||||
|
||||
|
Before Width: | Height: | Size: 114 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 33 KiB |
|
Before Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 2.1 MiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 15 KiB |
|
Before Width: | Height: | Size: 98 KiB |
BIN
docs/data/how-to/gpu-enabled-mpi-1.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 112 KiB |
|
Before Width: | Height: | Size: 188 KiB |
|
Before Width: | Height: | Size: 138 KiB |
|
Before Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 86 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 43 KiB |
|
Before Width: | Height: | Size: 25 KiB |
|
Before Width: | Height: | Size: 139 KiB |
|
Before Width: | Height: | Size: 30 KiB |
@@ -1,91 +0,0 @@
|
||||
vllm_benchmark:
|
||||
unified_docker:
|
||||
latest:
|
||||
pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.0_20250812
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.0_20250812/images/sha256-4c277ad39af3a8c9feac9b30bf78d439c74d9b4728e788a419d3f1d0c30cacaa
|
||||
rocm_version: 6.4.1
|
||||
vllm_version: 0.10.0 (0.10.1.dev395+g340ea86df.rocm641)
|
||||
pytorch_version: 2.7.0+gitf717b2a
|
||||
hipblaslt_version: 0.15
|
||||
model_groups:
|
||||
- group: Meta Llama
|
||||
tag: llama
|
||||
models:
|
||||
- model: Llama 3.1 8B
|
||||
mad_tag: pyt_vllm_llama-3.1-8b
|
||||
model_repo: meta-llama/Llama-3.1-8B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-8B
|
||||
precision: float16
|
||||
- model: Llama 3.1 70B
|
||||
mad_tag: pyt_vllm_llama-3.1-70b
|
||||
model_repo: meta-llama/Llama-3.1-70B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct
|
||||
precision: float16
|
||||
- model: Llama 3.1 405B
|
||||
mad_tag: pyt_vllm_llama-3.1-405b
|
||||
model_repo: meta-llama/Llama-3.1-405B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct
|
||||
precision: float16
|
||||
- model: Llama 2 70B
|
||||
mad_tag: pyt_vllm_llama-2-70b
|
||||
model_repo: meta-llama/Llama-2-70b-chat-hf
|
||||
url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
|
||||
precision: float16
|
||||
- model: Llama 3.1 8B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-8b_fp8
|
||||
model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
- model: Llama 3.1 70B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-70b_fp8
|
||||
model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
- model: Llama 3.1 405B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-405b_fp8
|
||||
model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
- group: Mistral AI
|
||||
tag: mistral
|
||||
models:
|
||||
- model: Mixtral MoE 8x7B
|
||||
mad_tag: pyt_vllm_mixtral-8x7b
|
||||
model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
precision: float16
|
||||
- model: Mixtral MoE 8x22B
|
||||
mad_tag: pyt_vllm_mixtral-8x22b
|
||||
model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
precision: float16
|
||||
- model: Mixtral MoE 8x7B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x7b_fp8
|
||||
model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
- model: Mixtral MoE 8x22B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x22b_fp8
|
||||
model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
- group: Qwen
|
||||
tag: qwen
|
||||
models:
|
||||
- model: QwQ-32B
|
||||
mad_tag: pyt_vllm_qwq-32b
|
||||
model_repo: Qwen/QwQ-32B
|
||||
url: https://huggingface.co/Qwen/QwQ-32B
|
||||
precision: float16
|
||||
- model: Qwen3 30B A3B
|
||||
mad_tag: pyt_vllm_qwen3-30b-a3b
|
||||
model_repo: Qwen/Qwen3-30B-A3B
|
||||
url: https://huggingface.co/Qwen/Qwen3-30B-A3B
|
||||
precision: float16
|
||||
- group: Microsoft Phi
|
||||
tag: phi
|
||||
models:
|
||||
- model: Phi-4
|
||||
mad_tag: pyt_vllm_phi-4
|
||||
model_repo: microsoft/phi-4
|
||||
url: https://huggingface.co/microsoft/phi-4
|
||||
@@ -1,188 +0,0 @@
|
||||
dockers:
|
||||
- pull_tag: rocm/vllm:rocm6.4.1_vllm_0.10.1_20250909
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.4.1_vllm_0.10.1_20250909/images/sha256-1113268572e26d59b205792047bea0e61e018e79aeadceba118b7bf23cb3715c
|
||||
components:
|
||||
ROCm: 6.4.1
|
||||
vLLM: 0.10.1 (0.10.1rc2.dev409+g0b6bf6691.rocm641)
|
||||
PyTorch: 2.7.0+gitf717b2a
|
||||
hipBLASLt: 0.15
|
||||
model_groups:
|
||||
- group: Meta Llama
|
||||
tag: llama
|
||||
models:
|
||||
- model: Llama 3.1 8B
|
||||
mad_tag: pyt_vllm_llama-3.1-8b
|
||||
model_repo: meta-llama/Llama-3.1-8B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-8B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 131072
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 70B
|
||||
mad_tag: pyt_vllm_llama-3.1-70b
|
||||
model_repo: meta-llama/Llama-3.1-70B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 131072
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B
|
||||
mad_tag: pyt_vllm_llama-3.1-405b
|
||||
model_repo: meta-llama/Llama-3.1-405B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 131072
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 2 70B
|
||||
mad_tag: pyt_vllm_llama-2-70b
|
||||
model_repo: meta-llama/Llama-2-70b-chat-hf
|
||||
url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 4096
|
||||
max_num_batched_tokens: 4096
|
||||
max_model_len: 4096
|
||||
- model: Llama 3.1 8B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-8b_fp8
|
||||
model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_seq_len_to_capture: 131072
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 70B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-70b_fp8
|
||||
model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_seq_len_to_capture: 131072
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-405b_fp8
|
||||
model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_seq_len_to_capture: 131072
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- group: Mistral AI
|
||||
tag: mistral
|
||||
models:
|
||||
- model: Mixtral MoE 8x7B
|
||||
mad_tag: pyt_vllm_mixtral-8x7b
|
||||
model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 32768
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x22B
|
||||
mad_tag: pyt_vllm_mixtral-8x22b
|
||||
model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 65536
|
||||
max_num_batched_tokens: 65536
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x7B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x7b_fp8
|
||||
model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_seq_len_to_capture: 32768
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x22B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x22b_fp8
|
||||
model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_seq_len_to_capture: 65536
|
||||
max_num_batched_tokens: 65536
|
||||
max_model_len: 8192
|
||||
- group: Qwen
|
||||
tag: qwen
|
||||
models:
|
||||
- model: QwQ-32B
|
||||
mad_tag: pyt_vllm_qwq-32b
|
||||
model_repo: Qwen/QwQ-32B
|
||||
url: https://huggingface.co/Qwen/QwQ-32B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 131072
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 30B A3B
|
||||
mad_tag: pyt_vllm_qwen3-30b-a3b
|
||||
model_repo: Qwen/Qwen3-30B-A3B
|
||||
url: https://huggingface.co/Qwen/Qwen3-30B-A3B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 32768
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- group: Microsoft Phi
|
||||
tag: phi
|
||||
models:
|
||||
- model: Phi-4
|
||||
mad_tag: pyt_vllm_phi-4
|
||||
model_repo: microsoft/phi-4
|
||||
url: https://huggingface.co/microsoft/phi-4
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_seq_len_to_capture: 16384
|
||||
max_num_batched_tokens: 16384
|
||||
max_model_len: 8192
|
||||
@@ -1,316 +0,0 @@
|
||||
dockers:
|
||||
- pull_tag: rocm/vllm:rocm7.0.0_vllm_0.10.2_20251006
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm7.0.0_vllm_0.10.2_20251006/images/sha256-94fd001964e1cf55c3224a445b1fb5be31a7dac302315255db8422d813edd7f5
|
||||
components:
|
||||
ROCm: 7.0.0
|
||||
vLLM: 0.10.2 (0.11.0rc2.dev160+g790d22168.rocm700)
|
||||
PyTorch: 2.9.0a0+git1c57644
|
||||
hipBLASLt: 1.0.0
|
||||
dockerfile:
|
||||
commit: 790d22168820507f3105fef29596549378cfe399
|
||||
model_groups:
|
||||
- group: Meta Llama
|
||||
tag: llama
|
||||
models:
|
||||
- model: Llama 2 70B
|
||||
mad_tag: pyt_vllm_llama-2-70b
|
||||
model_repo: meta-llama/Llama-2-70b-chat-hf
|
||||
url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 4096
|
||||
max_model_len: 4096
|
||||
- model: Llama 3.1 8B
|
||||
mad_tag: pyt_vllm_llama-3.1-8b
|
||||
model_repo: meta-llama/Llama-3.1-8B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-8B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 8B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-8b_fp8
|
||||
model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B
|
||||
mad_tag: pyt_vllm_llama-3.1-405b
|
||||
model_repo: meta-llama/Llama-3.1-405B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-405b_fp8
|
||||
model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B MXFP4
|
||||
mad_tag: pyt_vllm_llama-3.1-405b_fp4
|
||||
model_repo: amd/Llama-3.1-405B-Instruct-MXFP4-Preview
|
||||
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-MXFP4-Preview
|
||||
precision: float4
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.3 70B
|
||||
mad_tag: pyt_vllm_llama-3.3-70b
|
||||
model_repo: meta-llama/Llama-3.3-70B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.3 70B FP8
|
||||
mad_tag: pyt_vllm_llama-3.3-70b_fp8
|
||||
model_repo: amd/Llama-3.3-70B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.3-70B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.3 70B MXFP4
|
||||
mad_tag: pyt_vllm_llama-3.3-70b_fp4
|
||||
model_repo: amd/Llama-3.3-70B-Instruct-MXFP4-Preview
|
||||
url: https://huggingface.co/amd/Llama-3.3-70B-Instruct-MXFP4-Preview
|
||||
precision: float4
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 4 Scout 17Bx16E
|
||||
mad_tag: pyt_vllm_llama-4-scout-17b-16e
|
||||
model_repo: meta-llama/Llama-4-Scout-17B-16E-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Llama 4 Maverick 17Bx128E
|
||||
mad_tag: pyt_vllm_llama-4-maverick-17b-128e
|
||||
model_repo: meta-llama/Llama-4-Maverick-17B-128E-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Llama 4 Maverick 17Bx128E FP8
|
||||
mad_tag: pyt_vllm_llama-4-maverick-17b-128e_fp8
|
||||
model_repo: meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8
|
||||
url: https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- group: DeepSeek
|
||||
tag: deepseek
|
||||
models:
|
||||
- model: DeepSeek R1 0528 FP8
|
||||
mad_tag: pyt_vllm_deepseek-r1
|
||||
model_repo: deepseek-ai/DeepSeek-R1-0528
|
||||
url: https://huggingface.co/deepseek-ai/DeepSeek-R1-0528
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_seqs: 1024
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- group: OpenAI GPT OSS
|
||||
tag: gpt-oss
|
||||
models:
|
||||
- model: GPT OSS 20B
|
||||
mad_tag: pyt_vllm_gpt-oss-20b
|
||||
model_repo: openai/gpt-oss-20b
|
||||
url: https://huggingface.co/openai/gpt-oss-20b
|
||||
precision: bfloat16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 8192
|
||||
max_model_len: 8192
|
||||
- model: GPT OSS 120B
|
||||
mad_tag: pyt_vllm_gpt-oss-120b
|
||||
model_repo: openai/gpt-oss-120b
|
||||
url: https://huggingface.co/openai/gpt-oss-120b
|
||||
precision: bfloat16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 8192
|
||||
max_model_len: 8192
|
||||
- group: Mistral AI
|
||||
tag: mistral
|
||||
models:
|
||||
- model: Mixtral MoE 8x7B
|
||||
mad_tag: pyt_vllm_mixtral-8x7b
|
||||
model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x7B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x7b_fp8
|
||||
model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x22B
|
||||
mad_tag: pyt_vllm_mixtral-8x22b
|
||||
model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 65536
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x22B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x22b_fp8
|
||||
model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 65536
|
||||
max_model_len: 8192
|
||||
- group: Qwen
|
||||
tag: qwen
|
||||
models:
|
||||
- model: Qwen3 8B
|
||||
mad_tag: pyt_vllm_qwen3-8b
|
||||
model_repo: Qwen/Qwen3-8B
|
||||
url: https://huggingface.co/Qwen/Qwen3-8B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 32B
|
||||
mad_tag: pyt_vllm_qwen3-32b
|
||||
model_repo: Qwen/Qwen3-32b
|
||||
url: https://huggingface.co/Qwen/Qwen3-32B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 30B A3B
|
||||
mad_tag: pyt_vllm_qwen3-30b-a3b
|
||||
model_repo: Qwen/Qwen3-30B-A3B
|
||||
url: https://huggingface.co/Qwen/Qwen3-30B-A3B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 30B A3B FP8
|
||||
mad_tag: pyt_vllm_qwen3-30b-a3b_fp8
|
||||
model_repo: Qwen/Qwen3-30B-A3B-FP8
|
||||
url: https://huggingface.co/Qwen/Qwen3-30B-A3B-FP8
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 235B A22B
|
||||
mad_tag: pyt_vllm_qwen3-235b-a22b
|
||||
model_repo: Qwen/Qwen3-235B-A22B
|
||||
url: https://huggingface.co/Qwen/Qwen3-235B-A22B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 235B A22B FP8
|
||||
mad_tag: pyt_vllm_qwen3-235b-a22b_fp8
|
||||
model_repo: Qwen/Qwen3-235B-A22B-FP8
|
||||
url: https://huggingface.co/Qwen/Qwen3-235B-A22B-FP8
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- group: Microsoft Phi
|
||||
tag: phi
|
||||
models:
|
||||
- model: Phi-4
|
||||
mad_tag: pyt_vllm_phi-4
|
||||
model_repo: microsoft/phi-4
|
||||
url: https://huggingface.co/microsoft/phi-4
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 16384
|
||||
max_model_len: 8192
|
||||
@@ -1,316 +0,0 @@
|
||||
dockers:
|
||||
- pull_tag: rocm/vllm:rocm7.0.0_vllm_0.11.1_20251103
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm7.0.0_vllm_0.11.1_20251103/images/sha256-8d60429043d4d00958da46039a1de0d9b82df814d45da482497eef26a6076506
|
||||
components:
|
||||
ROCm: 7.0.0
|
||||
vLLM: 0.11.1 (0.11.1rc2.dev141+g38f225c2a.rocm700)
|
||||
PyTorch: 2.9.0a0+git1c57644
|
||||
hipBLASLt: 1.0.0
|
||||
dockerfile:
|
||||
commit: 38f225c2abeadc04c2cc398814c2f53ea02c3c72
|
||||
model_groups:
|
||||
- group: Meta Llama
|
||||
tag: llama
|
||||
models:
|
||||
- model: Llama 2 70B
|
||||
mad_tag: pyt_vllm_llama-2-70b
|
||||
model_repo: meta-llama/Llama-2-70b-chat-hf
|
||||
url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 4096
|
||||
max_model_len: 4096
|
||||
- model: Llama 3.1 8B
|
||||
mad_tag: pyt_vllm_llama-3.1-8b
|
||||
model_repo: meta-llama/Llama-3.1-8B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-8B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 8B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-8b_fp8
|
||||
model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B
|
||||
mad_tag: pyt_vllm_llama-3.1-405b
|
||||
model_repo: meta-llama/Llama-3.1-405B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-405b_fp8
|
||||
model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.1 405B MXFP4
|
||||
mad_tag: pyt_vllm_llama-3.1-405b_fp4
|
||||
model_repo: amd/Llama-3.1-405B-Instruct-MXFP4-Preview
|
||||
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-MXFP4-Preview
|
||||
precision: float4
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.3 70B
|
||||
mad_tag: pyt_vllm_llama-3.3-70b
|
||||
model_repo: meta-llama/Llama-3.3-70B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.3 70B FP8
|
||||
mad_tag: pyt_vllm_llama-3.3-70b_fp8
|
||||
model_repo: amd/Llama-3.3-70B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.3-70B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 3.3 70B MXFP4
|
||||
mad_tag: pyt_vllm_llama-3.3-70b_fp4
|
||||
model_repo: amd/Llama-3.3-70B-Instruct-MXFP4-Preview
|
||||
url: https://huggingface.co/amd/Llama-3.3-70B-Instruct-MXFP4-Preview
|
||||
precision: float4
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- model: Llama 4 Scout 17Bx16E
|
||||
mad_tag: pyt_vllm_llama-4-scout-17b-16e
|
||||
model_repo: meta-llama/Llama-4-Scout-17B-16E-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Llama 4 Maverick 17Bx128E
|
||||
mad_tag: pyt_vllm_llama-4-maverick-17b-128e
|
||||
model_repo: meta-llama/Llama-4-Maverick-17B-128E-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Llama 4 Maverick 17Bx128E FP8
|
||||
mad_tag: pyt_vllm_llama-4-maverick-17b-128e_fp8
|
||||
model_repo: meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8
|
||||
url: https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- group: DeepSeek
|
||||
tag: deepseek
|
||||
models:
|
||||
- model: DeepSeek R1 0528 FP8
|
||||
mad_tag: pyt_vllm_deepseek-r1
|
||||
model_repo: deepseek-ai/DeepSeek-R1-0528
|
||||
url: https://huggingface.co/deepseek-ai/DeepSeek-R1-0528
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_seqs: 1024
|
||||
max_num_batched_tokens: 131072
|
||||
max_model_len: 8192
|
||||
- group: OpenAI GPT OSS
|
||||
tag: gpt-oss
|
||||
models:
|
||||
- model: GPT OSS 20B
|
||||
mad_tag: pyt_vllm_gpt-oss-20b
|
||||
model_repo: openai/gpt-oss-20b
|
||||
url: https://huggingface.co/openai/gpt-oss-20b
|
||||
precision: bfloat16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 8192
|
||||
max_model_len: 8192
|
||||
- model: GPT OSS 120B
|
||||
mad_tag: pyt_vllm_gpt-oss-120b
|
||||
model_repo: openai/gpt-oss-120b
|
||||
url: https://huggingface.co/openai/gpt-oss-120b
|
||||
precision: bfloat16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 8192
|
||||
max_model_len: 8192
|
||||
- group: Mistral AI
|
||||
tag: mistral
|
||||
models:
|
||||
- model: Mixtral MoE 8x7B
|
||||
mad_tag: pyt_vllm_mixtral-8x7b
|
||||
model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x7B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x7b_fp8
|
||||
model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 32768
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x22B
|
||||
mad_tag: pyt_vllm_mixtral-8x22b
|
||||
model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 65536
|
||||
max_model_len: 8192
|
||||
- model: Mixtral MoE 8x22B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x22b_fp8
|
||||
model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 65536
|
||||
max_model_len: 8192
|
||||
- group: Qwen
|
||||
tag: qwen
|
||||
models:
|
||||
- model: Qwen3 8B
|
||||
mad_tag: pyt_vllm_qwen3-8b
|
||||
model_repo: Qwen/Qwen3-8B
|
||||
url: https://huggingface.co/Qwen/Qwen3-8B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 32B
|
||||
mad_tag: pyt_vllm_qwen3-32b
|
||||
model_repo: Qwen/Qwen3-32b
|
||||
url: https://huggingface.co/Qwen/Qwen3-32B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 30B A3B
|
||||
mad_tag: pyt_vllm_qwen3-30b-a3b
|
||||
model_repo: Qwen/Qwen3-30B-A3B
|
||||
url: https://huggingface.co/Qwen/Qwen3-30B-A3B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 30B A3B FP8
|
||||
mad_tag: pyt_vllm_qwen3-30b-a3b_fp8
|
||||
model_repo: Qwen/Qwen3-30B-A3B-FP8
|
||||
url: https://huggingface.co/Qwen/Qwen3-30B-A3B-FP8
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 235B A22B
|
||||
mad_tag: pyt_vllm_qwen3-235b-a22b
|
||||
model_repo: Qwen/Qwen3-235B-A22B
|
||||
url: https://huggingface.co/Qwen/Qwen3-235B-A22B
|
||||
precision: float16
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- model: Qwen3 235B A22B FP8
|
||||
mad_tag: pyt_vllm_qwen3-235b-a22b_fp8
|
||||
model_repo: Qwen/Qwen3-235B-A22B-FP8
|
||||
url: https://huggingface.co/Qwen/Qwen3-235B-A22B-FP8
|
||||
precision: float8
|
||||
config:
|
||||
tp: 8
|
||||
dtype: auto
|
||||
kv_cache_dtype: fp8
|
||||
max_num_batched_tokens: 40960
|
||||
max_model_len: 8192
|
||||
- group: Microsoft Phi
|
||||
tag: phi
|
||||
models:
|
||||
- model: Phi-4
|
||||
mad_tag: pyt_vllm_phi-4
|
||||
model_repo: microsoft/phi-4
|
||||
url: https://huggingface.co/microsoft/phi-4
|
||||
precision: float16
|
||||
config:
|
||||
tp: 1
|
||||
dtype: auto
|
||||
kv_cache_dtype: auto
|
||||
max_num_batched_tokens: 16384
|
||||
max_model_len: 8192
|
||||
@@ -1,159 +0,0 @@
|
||||
vllm_benchmark:
|
||||
unified_docker:
|
||||
latest:
|
||||
pull_tag: rocm/vllm:rocm6.3.1_instinct_vllm0.7.3_20250325
|
||||
docker_hub_url: https://hub.docker.com/layers/rocm/vllm/rocm6.3.1_instinct_vllm0.7.3_20250325/images/sha256-25245924f61750b19be6dcd8e787e46088a496c1fe17ee9b9e397f3d84d35640
|
||||
rocm_version: 6.3.1
|
||||
vllm_version: 0.7.3
|
||||
pytorch_version: 2.7.0 (dev nightly)
|
||||
hipblaslt_version: 0.13
|
||||
model_groups:
|
||||
- group: Llama
|
||||
tag: llama
|
||||
models:
|
||||
- model: Llama 3.1 8B
|
||||
mad_tag: pyt_vllm_llama-3.1-8b
|
||||
model_repo: meta-llama/Llama-3.1-8B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-8B
|
||||
precision: float16
|
||||
- model: Llama 3.1 70B
|
||||
mad_tag: pyt_vllm_llama-3.1-70b
|
||||
model_repo: meta-llama/Llama-3.1-70B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct
|
||||
precision: float16
|
||||
- model: Llama 3.1 405B
|
||||
mad_tag: pyt_vllm_llama-3.1-405b
|
||||
model_repo: meta-llama/Llama-3.1-405B-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct
|
||||
precision: float16
|
||||
- model: Llama 3.2 11B Vision
|
||||
mad_tag: pyt_vllm_llama-3.2-11b-vision-instruct
|
||||
model_repo: meta-llama/Llama-3.2-11B-Vision-Instruct
|
||||
url: https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct
|
||||
precision: float16
|
||||
- model: Llama 2 7B
|
||||
mad_tag: pyt_vllm_llama-2-7b
|
||||
model_repo: meta-llama/Llama-2-7b-chat-hf
|
||||
url: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
|
||||
precision: float16
|
||||
- model: Llama 2 70B
|
||||
mad_tag: pyt_vllm_llama-2-70b
|
||||
model_repo: meta-llama/Llama-2-70b-chat-hf
|
||||
url: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
|
||||
precision: float16
|
||||
- model: Llama 3.1 8B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-8b_fp8
|
||||
model_repo: amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
- model: Llama 3.1 70B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-70b_fp8
|
||||
model_repo: amd/Llama-3.1-70B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
- model: Llama 3.1 405B FP8
|
||||
mad_tag: pyt_vllm_llama-3.1-405b_fp8
|
||||
model_repo: amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV
|
||||
precision: float8
|
||||
- group: Mistral
|
||||
tag: mistral
|
||||
models:
|
||||
- model: Mixtral MoE 8x7B
|
||||
mad_tag: pyt_vllm_mixtral-8x7b
|
||||
model_repo: mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
precision: float16
|
||||
- model: Mixtral MoE 8x22B
|
||||
mad_tag: pyt_vllm_mixtral-8x22b
|
||||
model_repo: mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
url: https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1
|
||||
precision: float16
|
||||
- model: Mistral 7B
|
||||
mad_tag: pyt_vllm_mistral-7b
|
||||
model_repo: mistralai/Mistral-7B-Instruct-v0.3
|
||||
url: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3
|
||||
precision: float16
|
||||
- model: Mixtral MoE 8x7B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x7b_fp8
|
||||
model_repo: amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
- model: Mixtral MoE 8x22B FP8
|
||||
mad_tag: pyt_vllm_mixtral-8x22b_fp8
|
||||
model_repo: amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV
|
||||
precision: float8
|
||||
- model: Mistral 7B FP8
|
||||
mad_tag: pyt_vllm_mistral-7b_fp8
|
||||
model_repo: amd/Mistral-7B-v0.1-FP8-KV
|
||||
url: https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV
|
||||
precision: float8
|
||||
- group: Qwen
|
||||
tag: qwen
|
||||
models:
|
||||
- model: Qwen2 7B
|
||||
mad_tag: pyt_vllm_qwen2-7b
|
||||
model_repo: Qwen/Qwen2-7B-Instruct
|
||||
url: https://huggingface.co/Qwen/Qwen2-7B-Instruct
|
||||
precision: float16
|
||||
- model: Qwen2 72B
|
||||
mad_tag: pyt_vllm_qwen2-72b
|
||||
model_repo: Qwen/Qwen2-72B-Instruct
|
||||
url: https://huggingface.co/Qwen/Qwen2-72B-Instruct
|
||||
precision: float16
|
||||
- group: JAIS
|
||||
tag: jais
|
||||
models:
|
||||
- model: JAIS 13B
|
||||
mad_tag: pyt_vllm_jais-13b
|
||||
model_repo: core42/jais-13b-chat
|
||||
url: https://huggingface.co/core42/jais-13b-chat
|
||||
precision: float16
|
||||
- model: JAIS 30B
|
||||
mad_tag: pyt_vllm_jais-30b
|
||||
model_repo: core42/jais-30b-chat-v3
|
||||
url: https://huggingface.co/core42/jais-30b-chat-v3
|
||||
precision: float16
|
||||
- group: DBRX
|
||||
tag: dbrx
|
||||
models:
|
||||
- model: DBRX Instruct
|
||||
mad_tag: pyt_vllm_dbrx-instruct
|
||||
model_repo: databricks/dbrx-instruct
|
||||
url: https://huggingface.co/databricks/dbrx-instruct
|
||||
precision: float16
|
||||
- model: DBRX Instruct FP8
|
||||
mad_tag: pyt_vllm_dbrx_fp8
|
||||
model_repo: amd/dbrx-instruct-FP8-KV
|
||||
url: https://huggingface.co/amd/dbrx-instruct-FP8-KV
|
||||
precision: float8
|
||||
- group: Gemma
|
||||
tag: gemma
|
||||
models:
|
||||
- model: Gemma 2 27B
|
||||
mad_tag: pyt_vllm_gemma-2-27b
|
||||
model_repo: google/gemma-2-27b
|
||||
url: https://huggingface.co/google/gemma-2-27b
|
||||
precision: float16
|
||||
- group: Cohere
|
||||
tag: cohere
|
||||
models:
|
||||
- model: C4AI Command R+ 08-2024
|
||||
mad_tag: pyt_vllm_c4ai-command-r-plus-08-2024
|
||||
model_repo: CohereForAI/c4ai-command-r-plus-08-2024
|
||||
url: https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024
|
||||
precision: float16
|
||||
- model: C4AI Command R+ 08-2024 FP8
|
||||
mad_tag: pyt_vllm_command-r-plus_fp8
|
||||
model_repo: amd/c4ai-command-r-plus-FP8-KV
|
||||
url: https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV
|
||||
precision: float8
|
||||
- group: DeepSeek
|
||||
tag: deepseek
|
||||
models:
|
||||
- model: DeepSeek MoE 16B
|
||||
mad_tag: pyt_vllm_deepseek-moe-16b-chat
|
||||
model_repo: deepseek-ai/deepseek-moe-16b-chat
|
||||
url: https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat
|
||||
precision: float16
|
||||