mirror of
https://github.com/ROCm/ROCm.git
synced 2026-01-09 14:48:06 -05:00
Compare commits
49 Commits
rocm-7.1.1
...
update-HIP
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af9f9f1919 | ||
|
|
64ecf4ec07 | ||
|
|
304d422ab2 | ||
|
|
e3d4b4836e | ||
|
|
edf3a3975b | ||
|
|
4964169c82 | ||
|
|
d734eea462 | ||
|
|
690cc0e1d0 | ||
|
|
3c34ba2980 | ||
|
|
f05dcf6b00 | ||
|
|
a633bef754 | ||
|
|
2e5dad7628 | ||
|
|
b50e861eef | ||
|
|
69c3ec2167 | ||
|
|
e097719ba5 | ||
|
|
34fee2db6c | ||
|
|
f2962df5cf | ||
|
|
b98d9db046 | ||
|
|
12f54bf824 | ||
|
|
faeb282e76 | ||
|
|
d1ca7ebd66 | ||
|
|
01fd243fb8 | ||
|
|
9a62ff8651 | ||
|
|
92c792b623 | ||
|
|
7e7534f5aa | ||
|
|
1f36fa0d3e | ||
|
|
3e9f3a6986 | ||
|
|
702500dacc | ||
|
|
9bb63ef477 | ||
|
|
21167b9441 | ||
|
|
18772310d2 | ||
|
|
b71477a521 | ||
|
|
ecc4d589e0 | ||
|
|
deb4895b11 | ||
|
|
b8ce0db5b2 | ||
|
|
5e9fea1494 | ||
|
|
d4d03208be | ||
|
|
2b044f98c9 | ||
|
|
6791382b26 | ||
|
|
8c036531e8 | ||
|
|
484cbefc2e | ||
|
|
721b60d52f | ||
|
|
8ebe7be283 | ||
|
|
7e8947fdb4 | ||
|
|
66cac5301f | ||
|
|
9f3a1de117 | ||
|
|
0915fb17e8 | ||
|
|
0d3eb1d774 | ||
|
|
7a258cdba9 |
@@ -26,6 +26,7 @@ ASm
|
||||
ATI
|
||||
AddressSanitizer
|
||||
AlexNet
|
||||
Andrej
|
||||
Arb
|
||||
Autocast
|
||||
BARs
|
||||
@@ -73,6 +74,7 @@ Conda
|
||||
ConnectX
|
||||
CuPy
|
||||
Dashboarding
|
||||
DBRX
|
||||
DDR
|
||||
DF
|
||||
DGEMM
|
||||
@@ -91,6 +93,7 @@ DataFrame
|
||||
DataLoader
|
||||
DataParallel
|
||||
Debian
|
||||
DeepSeek
|
||||
DeepSpeed
|
||||
Dependabot
|
||||
Deprecations
|
||||
@@ -107,6 +110,7 @@ FFT
|
||||
FFTs
|
||||
FFmpeg
|
||||
FHS
|
||||
FIXME
|
||||
FMA
|
||||
FP
|
||||
FX
|
||||
@@ -127,10 +131,12 @@ GDS
|
||||
GEMM
|
||||
GEMMs
|
||||
GFortran
|
||||
Gemma
|
||||
GiB
|
||||
GIM
|
||||
GL
|
||||
GLXT
|
||||
Gloo
|
||||
GMI
|
||||
GPG
|
||||
GPR
|
||||
@@ -149,6 +155,8 @@ HGX
|
||||
HIPCC
|
||||
HIPExtension
|
||||
HIPIFY
|
||||
HIPification
|
||||
HIPify
|
||||
HPC
|
||||
HPCG
|
||||
HPE
|
||||
@@ -184,15 +192,17 @@ Interop
|
||||
Intersphinx
|
||||
Intra
|
||||
Ioffe
|
||||
JAX's
|
||||
Jinja
|
||||
JSON
|
||||
Jupyter
|
||||
KFD
|
||||
KFDTest
|
||||
KiB
|
||||
KMD
|
||||
KV
|
||||
KVM
|
||||
Karpathy's
|
||||
KiB
|
||||
Keras
|
||||
Khronos
|
||||
LAPACK
|
||||
@@ -243,6 +253,8 @@ MyEnvironment
|
||||
MyST
|
||||
NBIO
|
||||
NBIOs
|
||||
NCCL
|
||||
NCF
|
||||
NIC
|
||||
NICs
|
||||
NLI
|
||||
@@ -284,6 +296,7 @@ OpenVX
|
||||
OpenXLA
|
||||
Oversubscription
|
||||
PagedAttention
|
||||
Pallas
|
||||
PCC
|
||||
PCI
|
||||
PCIe
|
||||
@@ -401,9 +414,14 @@ TensorFlow
|
||||
TensorParallel
|
||||
ToC
|
||||
TorchAudio
|
||||
torchaudio
|
||||
TorchElastic
|
||||
TorchMIGraphX
|
||||
torchrec
|
||||
TorchScript
|
||||
TorchServe
|
||||
torchserve
|
||||
torchtext
|
||||
TorchVision
|
||||
TransferBench
|
||||
TrapStatus
|
||||
@@ -510,6 +528,9 @@ copyable
|
||||
cpp
|
||||
csn
|
||||
cuBLAS
|
||||
cuda
|
||||
cuDNN
|
||||
cudnn
|
||||
cuFFT
|
||||
cuLIB
|
||||
cuRAND
|
||||
@@ -564,6 +585,7 @@ gRPC
|
||||
galb
|
||||
gcc
|
||||
gdb
|
||||
gemm
|
||||
gfortran
|
||||
gfx
|
||||
githooks
|
||||
@@ -628,6 +650,7 @@ len
|
||||
libfabric
|
||||
libjpeg
|
||||
libs
|
||||
linalg
|
||||
linearized
|
||||
linter
|
||||
linux
|
||||
@@ -650,6 +673,7 @@ mutex
|
||||
mvffr
|
||||
namespace
|
||||
namespaces
|
||||
nanoGPT
|
||||
num
|
||||
numref
|
||||
ocl
|
||||
@@ -661,7 +685,9 @@ optimizers
|
||||
os
|
||||
oversubscription
|
||||
pageable
|
||||
pallas
|
||||
parallelization
|
||||
parallelizing
|
||||
parameterization
|
||||
passthrough
|
||||
perfcounter
|
||||
@@ -674,6 +700,7 @@ prebuilt
|
||||
precompiled
|
||||
preconditioner
|
||||
preconfigured
|
||||
preemptible
|
||||
prefetch
|
||||
prefetchable
|
||||
prefill
|
||||
@@ -690,10 +717,13 @@ profilers
|
||||
protobuf
|
||||
pseudorandom
|
||||
py
|
||||
recommender
|
||||
recommenders
|
||||
quantile
|
||||
quantizer
|
||||
quasirandom
|
||||
queueing
|
||||
radeon
|
||||
rccl
|
||||
rdc
|
||||
rdma
|
||||
@@ -747,6 +777,7 @@ runtimes
|
||||
sL
|
||||
scalability
|
||||
scalable
|
||||
scipy
|
||||
seealso
|
||||
sendmsg
|
||||
seqs
|
||||
@@ -754,6 +785,7 @@ serializers
|
||||
shader
|
||||
sharding
|
||||
sigmoid
|
||||
single-node
|
||||
sm
|
||||
smi
|
||||
softmax
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (c) 2023 - 2025 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
17
RELEASE.md
17
RELEASE.md
@@ -275,7 +275,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
<th rowspan="7">System management</th>
|
||||
<td><a href="https://rocm.docs.amd.com/projects/amdsmi/en/docs-6.3.1/index.html">AMD SMI</a></td>
|
||||
<td>24.7.1 ⇒ <a href="#amd-smi-24-7-1">24.7.1</a></td>
|
||||
<td><a href="https://github.com/ROCm/rocm-cmake"><i class="fab fa-github fa-lg"></i></a></td>
|
||||
<td><a href="https://github.com/ROCm/amdsmi"><i class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://rocm.docs.amd.com/projects/rdc/en/docs-6.3.1/index.html">ROCm Data Center Tool</a></td>
|
||||
@@ -328,7 +328,7 @@ Click {fab}`github` to go to the component's source code on GitHub.
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://rocm.docs.amd.com/projects/rocprofiler-sdk/en/docs-6.3.1/index.html">ROCprofiler-SDK</a></td>
|
||||
<td>0.5.0</td>
|
||||
<td>0.5.0 ⇒ <a href="#rocprofiler-sdk-0-5-0">0.5.0</a></td>
|
||||
<td><a href="https://github.com/ROCm/rocprofiler-sdk/"><i
|
||||
class="fab fa-github fa-lg"></i></a></td>
|
||||
</tr>
|
||||
@@ -429,11 +429,11 @@ See the full [AMD SMI changelog](https://github.com/ROCm/amdsmi/blob/6.3.x/CHANG
|
||||
|
||||
#### Added
|
||||
|
||||
* An activeQueues set that tracks only the queues that have a command submitted to them, which allows fast iteration in ``waitActiveStreams``.
|
||||
* An activeQueues set that tracks only the queues that have a command submitted to them, which allows fast iteration in `waitActiveStreams`.
|
||||
|
||||
#### Resolved issues
|
||||
#### Optimized
|
||||
|
||||
* A deadlock in a specific customer application by preventing hipLaunchKernel latency degradation with number of idle streams.
|
||||
* Mechanism of preventing `hipLaunchKernel` latency degradation with number of idle streams is implemented for performance improvement.
|
||||
|
||||
### **HIPIFY** (18.0.0)
|
||||
|
||||
@@ -505,6 +505,13 @@ See the full [AMD SMI changelog](https://github.com/ROCm/amdsmi/blob/6.3.x/CHANG
|
||||
* Fixed a minor issue for users upgrading to ROCm 6.3 from 6.2 post-rename from `omnitrace`.
|
||||
See [ROCm Compute Profiler and ROCm Systems Profiler post-upgrade issues](#rocm-compute-profiler-and-rocm-systems-profiler-post-upgrade-issues).
|
||||
|
||||
### **ROCprofiler-SDK** (0.5.0)
|
||||
|
||||
#### Added
|
||||
|
||||
* SIMD_UTILIZATION metric.
|
||||
* New <a href="https://rocm.docs.amd.com/projects/rdc/en/docs-6.3.1/index.html">ROCm Data Center (RDC)</a> ops metrics.
|
||||
|
||||
## ROCm known issues
|
||||
|
||||
ROCm known issues are noted on {fab}`github` [GitHub](https://github.com/ROCm/ROCm/labels/Verified%20Issue). For known
|
||||
|
||||
@@ -25,15 +25,15 @@ additional licenses. Please review individual repositories for more information.
|
||||
<!-- spellcheck-disable -->
|
||||
| Component | License |
|
||||
|:---------------------|:-------------------------|
|
||||
| [AMD Compute Language Runtime (CLR)](https://github.com/ROCm/clr) | [MIT](https://github.com/ROCm/clr/blob/develop/LICENCE) |
|
||||
| [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/develop/LICENSE) |
|
||||
| [AMD Compute Language Runtime (CLR)](https://github.com/ROCm/clr) | [MIT](https://github.com/ROCm/clr/blob/amd-staging/LICENCE) |
|
||||
| [AMD SMI](https://github.com/ROCm/amdsmi) | [MIT](https://github.com/ROCm/amdsmi/blob/amd-staging/LICENSE) |
|
||||
| [aomp](https://github.com/ROCm/aomp/) | [Apache 2.0](https://github.com/ROCm/aomp/blob/aomp-dev/LICENSE) |
|
||||
| [aomp-extras](https://github.com/ROCm/aomp-extras/) | [MIT](https://github.com/ROCm/aomp-extras/blob/aomp-dev/LICENSE) |
|
||||
| [Code Object Manager (Comgr)](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/comgr) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/comgr/LICENSE.txt) |
|
||||
| [Composable Kernel](https://github.com/ROCm/composable_kernel) | [MIT](https://github.com/ROCm/composable_kernel/blob/develop/LICENSE) |
|
||||
| [half](https://github.com/ROCm/half/) | [MIT](https://github.com/ROCm/half/blob/rocm/LICENSE.txt) |
|
||||
| [HIP](https://github.com/ROCm/HIP/) | [MIT](https://github.com/ROCm/HIP/blob/develop/LICENSE.txt) |
|
||||
| [hipamd](https://github.com/ROCm/clr/tree/develop/hipamd) | [MIT](https://github.com/ROCm/clr/blob/develop/hipamd/LICENSE.txt) |
|
||||
| [HIP](https://github.com/ROCm/HIP/) | [MIT](https://github.com/ROCm/HIP/blob/amd-staging/LICENSE.txt) |
|
||||
| [hipamd](https://github.com/ROCm/clr/tree/amd-staging/hipamd) | [MIT](https://github.com/ROCm/clr/blob/amd-staging/hipamd/LICENSE.txt) |
|
||||
| [hipBLAS](https://github.com/ROCm/hipBLAS/) | [MIT](https://github.com/ROCm/hipBLAS/blob/develop/LICENSE.md) |
|
||||
| [hipBLASLt](https://github.com/ROCm/hipBLASLt/) | [MIT](https://github.com/ROCm/hipBLASLt/blob/develop/LICENSE.md) |
|
||||
| [HIPCC](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/hipcc) | [MIT](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/hipcc/LICENSE.txt) |
|
||||
@@ -58,29 +58,29 @@ additional licenses. Please review individual repositories for more information.
|
||||
| [ROCdbgapi](https://github.com/ROCm/ROCdbgapi/) | [MIT](https://github.com/ROCm/ROCdbgapi/blob/amd-staging/LICENSE.txt) |
|
||||
| [rocDecode](https://github.com/ROCm/rocDecode) | [MIT](https://github.com/ROCm/rocDecode/blob/develop/LICENSE) |
|
||||
| [rocFFT](https://github.com/ROCm/rocFFT/) | [MIT](https://github.com/ROCm/rocFFT/blob/develop/LICENSE.md) |
|
||||
| [ROCgdb](https://github.com/ROCm/ROCgdb/) | [GNU General Public License v3.0](https://github.com/ROCm/ROCgdb/blob/amd-master/COPYING3) |
|
||||
| [ROCgdb](https://github.com/ROCm/ROCgdb/) | [GNU General Public License v3.0](https://github.com/ROCm/ROCgdb/blob/amd-staging/COPYING3) |
|
||||
| [rocJPEG](https://github.com/ROCm/rocJPEG/) | [MIT](https://github.com/ROCm/rocJPEG/blob/develop/LICENSE) |
|
||||
| [ROCK-Kernel-Driver](https://github.com/ROCm/ROCK-Kernel-Driver/) | [GPL 2.0 WITH Linux-syscall-note](https://github.com/ROCm/ROCK-Kernel-Driver/blob/master/COPYING) |
|
||||
| [rocminfo](https://github.com/ROCm/rocminfo/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocminfo/blob/amd-staging/License.txt) |
|
||||
| [ROCm Bandwidth Test](https://github.com/ROCm/rocm_bandwidth_test/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [ROCm Bandwidth Test](https://github.com/ROCm/rocm_bandwidth_test/) | [MIT](https://github.com/ROCm/rocm_bandwidth_test/blob/master/LICENSE.txt) |
|
||||
| [ROCm CMake](https://github.com/ROCm/rocm-cmake/) | [MIT](https://github.com/ROCm/rocm-cmake/blob/develop/LICENSE) |
|
||||
| [ROCm Communication Collectives Library (RCCL)](https://github.com/ROCm/rccl/) | [Custom](https://github.com/ROCm/rccl/blob/develop/LICENSE.txt) |
|
||||
| [ROCm-Core](https://github.com/ROCm/rocm-core) | [MIT](https://github.com/ROCm/rocm-core/blob/master/copyright) |
|
||||
| [ROCm Compute Profiler](https://github.com/ROCm/rocprofiler-compute) | [MIT](https://github.com/ROCm/rocprofiler-compute/blob/amd-staging/LICENSE) |
|
||||
| [ROCm Data Center (RDC)](https://github.com/ROCm/rdc/) | [MIT](https://github.com/ROCm/rdc/blob/develop/LICENSE) |
|
||||
| [ROCm Data Center (RDC)](https://github.com/ROCm/rdc/) | [MIT](https://github.com/ROCm/rdc/blob/amd-staging/LICENSE) |
|
||||
| [ROCm-Device-Libs](https://github.com/ROCm/llvm-project/tree/amd-staging/amd/device-libs) | [The University of Illinois/NCSA](https://github.com/ROCm/llvm-project/blob/amd-staging/amd/device-libs/LICENSE.TXT) |
|
||||
| [ROCm-OpenCL-Runtime](https://github.com/ROCm/clr/tree/develop/opencl) | [MIT](https://github.com/ROCm/clr/blob/develop/opencl/LICENSE.txt) |
|
||||
| [ROCm-OpenCL-Runtime](https://github.com/ROCm/clr/tree/amd-staging/opencl) | [MIT](https://github.com/ROCm/clr/blob/amd-staging/opencl/LICENSE.txt) |
|
||||
| [ROCm Performance Primitives (RPP)](https://github.com/ROCm/rpp) | [MIT](https://github.com/ROCm/rpp/blob/develop/LICENSE) |
|
||||
| [ROCm SMI Lib](https://github.com/ROCm/rocm_smi_lib/) | [MIT](https://github.com/ROCm/rocm_smi_lib/blob/develop/License.txt) |
|
||||
| [ROCm SMI Lib](https://github.com/ROCm/rocm_smi_lib/) | [MIT](https://github.com/ROCm/rocm_smi_lib/blob/amd-staging/License.txt) |
|
||||
| [ROCm Systems Profiler](https://github.com/ROCm/rocprofiler-systems) | [MIT](https://github.com/ROCm/rocprofiler-systems/blob/amd-staging/LICENSE) |
|
||||
| [ROCm Validation Suite](https://github.com/ROCm/ROCmValidationSuite/) | [MIT](https://github.com/ROCm/ROCmValidationSuite/blob/master/LICENSE) |
|
||||
| [rocPRIM](https://github.com/ROCm/rocPRIM/) | [MIT](https://github.com/ROCm/rocPRIM/blob/develop/LICENSE.txt) |
|
||||
| [ROCProfiler](https://github.com/ROCm/rocprofiler/) | [MIT](https://github.com/ROCm/rocprofiler/blob/amd-master/LICENSE) |
|
||||
| [ROCProfiler](https://github.com/ROCm/rocprofiler/) | [MIT](https://github.com/ROCm/rocprofiler/blob/amd-staging/LICENSE) |
|
||||
| [ROCprofiler-SDK](https://github.com/ROCm/rocprofiler-sdk) | [MIT](https://github.com/ROCm/rocprofiler-sdk/blob/amd-mainline/LICENSE) |
|
||||
| [rocPyDecode](https://github.com/ROCm/rocPyDecode) | [MIT](https://github.com/ROCm/rocPyDecode/blob/develop/LICENSE) |
|
||||
| [rocRAND](https://github.com/ROCm/rocRAND/) | [MIT](https://github.com/ROCm/rocRAND/blob/develop/LICENSE.txt) |
|
||||
| [ROCr Debug Agent](https://github.com/ROCm/rocr_debug_agent/) | [The University of Illinois/NCSA](https://github.com/ROCm/rocr_debug_agent/blob/amd-staging/LICENSE.txt) |
|
||||
| [ROCR-Runtime](https://github.com/ROCm/ROCR-Runtime/) | [The University of Illinois/NCSA](https://github.com/ROCm/ROCR-Runtime/blob/master/LICENSE.txt) |
|
||||
| [ROCR-Runtime](https://github.com/ROCm/ROCR-Runtime/) | [The University of Illinois/NCSA](https://github.com/ROCm/ROCR-Runtime/blob/amd-staging/LICENSE.txt) |
|
||||
| [rocSOLVER](https://github.com/ROCm/rocSOLVER/) | [BSD-2-Clause](https://github.com/ROCm/rocSOLVER/blob/develop/LICENSE.md) |
|
||||
| [rocSPARSE](https://github.com/ROCm/rocSPARSE/) | [MIT](https://github.com/ROCm/rocSPARSE/blob/develop/LICENSE.md) |
|
||||
| [rocThrust](https://github.com/ROCm/rocThrust/) | [Apache 2.0](https://github.com/ROCm/rocThrust/blob/develop/LICENSE) |
|
||||
@@ -99,7 +99,7 @@ repositories to distinguish from open sourced packages.
|
||||
The following additional terms and conditions apply to your use of ROCm technical documentation.
|
||||
```
|
||||
|
||||
©2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.
|
||||
©2023 - 2025 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
The information presented in this document is for informational purposes only
|
||||
and may contain technical inaccuracies, omissions, and typographical errors. The
|
||||
|
||||
@@ -1,128 +1,129 @@
|
||||
ROCm Version,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,,
|
||||
,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2"
|
||||
,,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
|
||||
,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2"
|
||||
,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8"
|
||||
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4"
|
||||
,,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9
|
||||
,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,,,
|
||||
,Debian 12 [#mic300x-past-60]_,,,,,,,,,,
|
||||
,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2
|
||||
,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA
|
||||
,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3
|
||||
,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2
|
||||
,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>`,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100
|
||||
,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030
|
||||
,gfx942,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_
|
||||
,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a
|
||||
,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908
|
||||
,,,,,,,,,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`PyTorch <rocm-install-on-linux:install/3rd-party/pytorch-install>`,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <rocm-install-on-linux:install/3rd-party/tensorflow-install>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
|
||||
:doc:`JAX <rocm-install-on-linux:install/3rd-party/jax-install>`,0.4.35,0.4.35,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
|
||||
,,,,,,,,,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1
|
||||
,,,,,,,,,,,
|
||||
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
Thrust,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
CUB,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
,,,,,,,,,,,
|
||||
,,,,,,,,,,,
|
||||
KMD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,
|
||||
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
|
||||
,,,,,,,,,,,
|
||||
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0
|
||||
:doc:`MIOpen <miopen:index>`,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0
|
||||
:doc:`rocAL <rocal:index>`,2.1.0,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A
|
||||
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`RPP <rpp:index>`,1.9.1,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0
|
||||
,,,,,,,,,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`RCCL <rccl:index>`,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
|
||||
,,,,,,,,,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
|
||||
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13
|
||||
:doc:`hipfort <hipfort:index>`,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0
|
||||
:doc:`hipRAND <hiprand:index>`,2.11.1,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16
|
||||
,,,,,,,,,,,
|
||||
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0
|
||||
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3
|
||||
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23
|
||||
:doc:`rocRAND <rocrand:index>`,3.2.0,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2
|
||||
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0
|
||||
:doc:`Tensile <tensile:index>`,4.42.0,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0
|
||||
,,,,,,,,,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`hipCUB <hipcub:index>`,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0
|
||||
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
,,,,,,,,,,,
|
||||
SUPPORT LIBS,,,,,,,,,,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245
|
||||
,,,,,,,,,,,
|
||||
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000
|
||||
,,,,,,,,,,,
|
||||
PERFORMANCE TOOLS,,,,,,,,,,,
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.0,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60301,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000
|
||||
,,,,,,,,,,,
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.60301,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000
|
||||
,,,,,,,,,,,
|
||||
,,,,,,,,,,,
|
||||
DEVELOPMENT TOOLS,,,,,,,,,,,
|
||||
:doc:`HIPIFY <hipify:index>`,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,,
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,N/A,N/A
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3
|
||||
,,,,,,,,,,,
|
||||
COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
`Flang <https://github.com/ROCm/flang>`_,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
:doc:`llvm-project <llvm-project:index>`,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,,
|
||||
,,,,,,,,,,,
|
||||
RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,,
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,
|
||||
:doc:`HIP <hip:index>`,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0
|
||||
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0
|
||||
ROCm Version,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.5, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0
|
||||
:ref:`Operating systems & kernels <OS-kernel-versions>`,Ubuntu 24.04.2,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,,,
|
||||
,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2"
|
||||
,,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5"
|
||||
,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2"
|
||||
,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8"
|
||||
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4"
|
||||
,,,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9
|
||||
,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,,,
|
||||
,Debian 12 [#single-node-past-60]_,,,,,,,,,,,
|
||||
,Azure Linux 3.0 [#mic300x-past-60]_,,,,,,,,,,,
|
||||
,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2
|
||||
,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA
|
||||
,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3
|
||||
,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2
|
||||
,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`GPU / LLVM target <rocm-install-on-linux:reference/system-requirements>`,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100
|
||||
,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030
|
||||
,gfx942,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_
|
||||
,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a
|
||||
,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908
|
||||
,,,,,,,,,,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`PyTorch <../compatibility/ml-compatibility/pytorch-compatibility>`,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1"
|
||||
:doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.4.31,0.4.31,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1
|
||||
,,,,,,,,,,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
`UCC <https://github.com/ROCm/ucc>`_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0
|
||||
`UCX <https://github.com/ROCm/ucx>`_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1
|
||||
,,,,,,,,,,,,
|
||||
THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
Thrust,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
CUB,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1
|
||||
,,,,,,,,,,,,
|
||||
,,,,,,,,,,,,
|
||||
KMD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x"
|
||||
,,,,,,,,,,,,
|
||||
ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`Composable Kernel <composable_kernel:index>`,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0
|
||||
:doc:`MIGraphX <amdmigraphx:index>`,2.11.0,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0
|
||||
:doc:`MIOpen <miopen:index>`,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`MIVisionX <mivisionx:index>`,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0
|
||||
:doc:`rocAL <rocal:index>`,2.1.0,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`rocDecode <rocdecode:index>`,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A
|
||||
:doc:`rocJPEG <rocjpeg:index>`,0.6.0,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`rocPyDecode <rocpydecode:index>`,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`RPP <rpp:index>`,1.9.1,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0
|
||||
,,,,,,,,,,,,
|
||||
COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`RCCL <rccl:index>`,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3
|
||||
,,,,,,,,,,,,
|
||||
MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
`half <https://github.com/ROCm/half>`_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0
|
||||
:doc:`hipBLAS <hipblas:index>`,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipBLASLt <hipblaslt:index>`,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0
|
||||
:doc:`hipFFT <hipfft:index>`,1.0.17,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13
|
||||
:doc:`hipfort <hipfort:index>`,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0
|
||||
:doc:`hipRAND <hiprand:index>`,2.11.1,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16
|
||||
,,,,,,,,,,,,
|
||||
:doc:`hipSOLVER <hipsolver:index>`,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0
|
||||
:doc:`hipSPARSE <hipsparse:index>`,3.1.2,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
:doc:`hipSPARSELt <hipsparselt:index>`,0.2.2,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0
|
||||
:doc:`rocALUTION <rocalution:index>`,3.2.1,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3
|
||||
:doc:`rocBLAS <rocblas:index>`,4.3.0,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0
|
||||
:doc:`rocFFT <rocfft:index>`,1.0.31,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23
|
||||
:doc:`rocRAND <rocrand:index>`,3.2.0,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2
|
||||
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0
|
||||
:doc:`Tensile <tensile:src/index>`,4.42.0,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0
|
||||
,,,,,,,,,,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`hipCUB <hipcub:index>`,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`hipTensor <hiptensor:index>`,1.4.0,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0
|
||||
:doc:`rocPRIM <rocprim:index>`,3.3.0,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0
|
||||
:doc:`rocThrust <rocthrust:index>`,3.3.0,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0
|
||||
,,,,,,,,,,,,
|
||||
SUPPORT LIBS,,,,,,,,,,,,
|
||||
`hipother <https://github.com/ROCm/hipother>`_,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,,
|
||||
`rocm-core <https://github.com/ROCm/rocm-core>`_,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.5,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0
|
||||
`ROCT-Thunk-Interface <https://github.com/ROCm/ROCT-Thunk-Interface>`_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245
|
||||
,,,,,,,,,,,,
|
||||
SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`AMD SMI <amdsmi:index>`,24.7.1,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2
|
||||
:doc:`ROCm Data Center Tool <rdc:index>`,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0
|
||||
:doc:`rocminfo <rocminfo:index>`,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
:doc:`ROCm SMI <rocm_smi_lib:index>`,7.4.0,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0
|
||||
:doc:`ROCm Validation Suite <rocmvalidationsuite:index>`,1.1.0,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60105,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000
|
||||
,,,,,,,,,,,,
|
||||
PERFORMANCE TOOLS,,,,,,,,,,,,
|
||||
:doc:`ROCm Bandwidth Test <rocm_bandwidth_test:index>`,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0
|
||||
:doc:`ROCm Compute Profiler <rocprofiler-compute:index>`,3.0.0,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCm Systems Profiler <rocprofiler-systems:index>`,0.1.0,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCProfiler <rocprofiler:index>`,2.0.60301,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60105,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000
|
||||
,,,,,,,,,,,,
|
||||
:doc:`ROCprofiler-SDK <rocprofiler-sdk:index>`,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A,N/A
|
||||
:doc:`ROCTracer <roctracer:index>`,4.1.60301,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60105,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000
|
||||
,,,,,,,,,,,,
|
||||
,,,,,,,,,,,,
|
||||
DEVELOPMENT TOOLS,,,,,,,,,,,,
|
||||
:doc:`HIPIFY <hipify:index>`,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,,,
|
||||
:doc:`ROCm CMake <rocmcmakebuildtools:index>`,0.14.0,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0
|
||||
:doc:`ROCdbgapi <rocdbgapi:index>`,0.77.0,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0
|
||||
:doc:`ROCm Debugger (ROCgdb) <rocgdb:index>`,15.2.0,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0
|
||||
`rocprofiler-register <https://github.com/ROCm/rocprofiler-register>`_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,0.3.0,N/A,N/A
|
||||
:doc:`ROCr Debug Agent <rocr_debug_agent:index>`,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3
|
||||
,,,,,,,,,,,,
|
||||
COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
`clang-ocl <https://github.com/ROCm/clang-ocl>`_,N/A,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0
|
||||
:doc:`hipCC <hipcc:index>`,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0
|
||||
`Flang <https://github.com/ROCm/flang>`_,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
:doc:`llvm-project <llvm-project:index>`,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
`OpenMP <https://github.com/ROCm/llvm-project/tree/amd-staging/openmp>`_,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483
|
||||
,,,,,,,,,,,,
|
||||
,,,,,,,,,,,,
|
||||
RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,,,
|
||||
:doc:`AMD CLR <hip:understand/amd_clr>`,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,,
|
||||
:doc:`HIP <hip:index>`,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830
|
||||
,,,,,,,,,,,,
|
||||
`OpenCL Runtime <https://github.com/ROCm/clr/tree/develop/opencl>`_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0
|
||||
:doc:`ROCr Runtime <rocr-runtime:index>`,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0
|
||||
|
||||
|
@@ -32,7 +32,8 @@ compatibility and system requirements.
|
||||
,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9"
|
||||
,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5"
|
||||
,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.9 [#mi300x]_
|
||||
,Debian 12 [#mi300x]_,,
|
||||
,Debian 12 [#single-node]_,,
|
||||
,Azure Linux 3.0 [#mi300x]_,,
|
||||
,.. _architecture-support-compatibility-matrix:,,
|
||||
:doc:`Architecture <rocm-install-on-linux:reference/system-requirements>`,CDNA3,CDNA3,CDNA3
|
||||
,CDNA2,CDNA2,CDNA2
|
||||
@@ -47,9 +48,9 @@ compatibility and system requirements.
|
||||
,gfx908,gfx908,gfx908
|
||||
,,,
|
||||
FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix:,,
|
||||
:doc:`PyTorch <rocm-install-on-linux:install/3rd-party/pytorch-install>`,"2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <rocm-install-on-linux:install/3rd-party/tensorflow-install>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1"
|
||||
:doc:`JAX <rocm-install-on-linux:install/3rd-party/jax-install>`,0.4.35,0.4.35,0.4.26
|
||||
:doc:`PyTorch <../compatibility/ml-compatibility/pytorch-compatibility>`,"2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13"
|
||||
:doc:`TensorFlow <../compatibility/ml-compatibility/tensorflow-compatibility>`,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1"
|
||||
:doc:`JAX <../compatibility/ml-compatibility/jax-compatibility>`,0.4.31,0.4.31,0.4.26
|
||||
`ONNX Runtime <https://onnxruntime.ai/docs/build/eps.html#amd-migraphx>`_,1.17.3,1.17.3,1.17.3
|
||||
,,,
|
||||
THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix:,,
|
||||
@@ -94,7 +95,7 @@ compatibility and system requirements.
|
||||
:doc:`rocSOLVER <rocsolver:index>`,3.27.0,3.27.0,3.26.0
|
||||
:doc:`rocSPARSE <rocsparse:index>`,3.3.0,3.3.0,3.2.0
|
||||
:doc:`rocWMMA <rocwmma:index>`,1.6.0,1.6.0,1.5.0
|
||||
:doc:`Tensile <tensile:index>`,4.42.0,4.42.0,4.41.0
|
||||
:doc:`Tensile <tensile:src/index>`,4.42.0,4.42.0,4.41.0
|
||||
,,,
|
||||
PRIMITIVES,.. _primitivelibs-support-compatibility-matrix:,,
|
||||
:doc:`hipCUB <hipcub:index>`,3.3.0,3.3.0,3.2.0
|
||||
@@ -146,7 +147,8 @@ compatibility and system requirements.
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#mi300x] Oracle Linux and Debian are supported only on AMD Instinct MI300X.
|
||||
.. [#mi300x] Oracle Linux and Azure Linux are supported only on AMD Instinct MI300X.
|
||||
.. [#single-node] Debian 12 is supported only on AMD Instinct MI300X for single-node functionality.
|
||||
.. [#mi300_620] **For ROCm 6.2.0** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
.. [#kfd_support] ROCm provides forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported.
|
||||
.. [#ROCT-rocr] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package.
|
||||
@@ -183,6 +185,7 @@ Use this lookup table to confirm which operating system and kernel versions are
|
||||
,8.9, 5.15.0
|
||||
,,
|
||||
`Debian <https://www.debian.org/download>`_,12, 6.1
|
||||
`Azure Linux <https://techcommunity.microsoft.com/blog/linuxandopensourceblog/azure-linux-3-0-now-in-preview-on-azure-kubernetes-service-v1-31/4287229>`_,3.0, 6.6
|
||||
|
||||
..
|
||||
Footnotes and ref anchors in below historical tables should be appended with "-past-60", to differentiate from the
|
||||
@@ -210,7 +213,8 @@ Expand for full historical view of:
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#mic300x-past-60] Oracle Linux and Debian are supported only on AMD Instinct MI300X.
|
||||
.. [#mic300x-past-60] Oracle Linux and Azure Linux are supported only on AMD Instinct MI300X.
|
||||
.. [#single-node-past-60] Debian 12 is supported only on AMD Instinct MI300X for single-node functionality.
|
||||
.. [#mi300_624-past-60] **For ROCm 6.2.4** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
.. [#mi300_622-past-60] **For ROCm 6.2.2** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
.. [#mi300_621-past-60] **For ROCm 6.2.1** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE].
|
||||
|
||||
663
docs/compatibility/ml-compatibility/jax-compatibility.rst
Normal file
663
docs/compatibility/ml-compatibility/jax-compatibility.rst
Normal file
@@ -0,0 +1,663 @@
|
||||
.. meta::
|
||||
:description: JAX compatibility
|
||||
:keywords: GPU, JAX compatibility
|
||||
|
||||
*******************************************************************************
|
||||
JAX compatibility
|
||||
*******************************************************************************
|
||||
|
||||
JAX provides a NumPy-like API, which combines automatic differentiation and the
|
||||
Accelerated Linear Algebra (XLA) compiler to achieve high-performance machine
|
||||
learning at scale.
|
||||
|
||||
JAX uses composable transformations of Python and NumPy through just-in-time (JIT) compilation,
|
||||
automatic vectorization, and parallelization. To learn about JAX, including profiling and
|
||||
optimizations, see the official `JAX documentation
|
||||
<https://jax.readthedocs.io/en/latest/notebooks/quickstart.html>`_.
|
||||
|
||||
ROCm support for JAX is upstreamed and users can build the official source code with ROCm
|
||||
support:
|
||||
|
||||
- ROCm JAX release:
|
||||
|
||||
- Offers AMD-validated and community :ref:`Docker images <jax-docker-compat>` with ROCm and JAX pre-installed.
|
||||
|
||||
- ROCm JAX repository: `<https://github.com/ROCm/jax>`__
|
||||
|
||||
- See the :doc:`ROCm JAX installation guide <rocm-install-on-linux:install/3rd-party/jax-install>`
|
||||
to get started.
|
||||
|
||||
- Official JAX release:
|
||||
|
||||
- Official JAX repository: `<https://github.com/jax-ml/jax>`__
|
||||
|
||||
- See the `AMD GPU (Linux) installation section
|
||||
<https://jax.readthedocs.io/en/latest/installation.html#amd-gpu-linux>`_ in the JAX
|
||||
documentation.
|
||||
|
||||
.. note::
|
||||
|
||||
AMD releases official `ROCm JAX Docker images <https://hub.docker.com/r/rocm/jax>`_
|
||||
quarterly alongside new ROCm releases. These images undergo full AMD testing.
|
||||
`Community ROCm JAX Docker images <https://hub.docker.com/r/rocm/jax-community>`_
|
||||
follow upstream JAX releases and use the latest available ROCm version.
|
||||
|
||||
.. _jax-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes ready-made `JAX <https://hub.docker.com/r/rocm/jax/>`_
|
||||
images with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are validated for
|
||||
`ROCm 6.3.1 <https://repo.radeon.com/rocm/apt/6.3.1/>`_. Click the |docker-icon|
|
||||
icon to view the image on Docker Hub.
|
||||
|
||||
.. list-table:: JAX Docker image components
|
||||
:header-rows: 1
|
||||
|
||||
* - Docker image
|
||||
- JAX
|
||||
- Linux
|
||||
- Python
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/jax/rocm6.3.1-jax0.4.31-py3.12/images/sha256-085a0cd5207110922f1fca684933a9359c66d42db6c5aba4760ed5214fdabde0"><i class="fab fa-docker fa-lg"></i> rocm/jax</a>
|
||||
|
||||
- `0.4.31 <https://github.com/ROCm/jax/releases/tag/rocm-jax-v0.4.31>`_
|
||||
- Ubuntu 24.04
|
||||
- `3.12.7 <https://www.python.org/downloads/release/python-3127/>`_
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/jax/rocm6.3.1-jax0.4.31-py3.10/images/sha256-f88eddad8f47856d8640b694da4da347ffc1750d7363175ab7dc872e82b43324"><i class="fab fa-docker fa-lg"></i> rocm/jax</a>
|
||||
|
||||
- `0.4.31 <https://github.com/ROCm/jax/releases/tag/rocm-jax-v0.4.31>`_
|
||||
- Ubuntu 22.04
|
||||
- `3.10.14 <https://www.python.org/downloads/release/python-31014/>`_
|
||||
|
||||
AMD publishes community `JAX <https://hub.docker.com/r/rocm/jax-community>`_
|
||||
images with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are tested for `ROCm 6.2.4 <https://repo.radeon.com/rocm/apt/6.2.4/>`_.
|
||||
|
||||
.. list-table:: JAX community Docker image components
|
||||
:header-rows: 1
|
||||
|
||||
* - Docker image
|
||||
- JAX
|
||||
- Linux
|
||||
- Python
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/jax-community/rocm6.2.4-jax0.4.35-py3.12.7/images/sha256-a6032d89c07573b84c44e42c637bf9752b1b7cd2a222d39344e603d8f4c63beb?context=explore"><i class="fab fa-docker fa-lg"></i> rocm/jax-community</a>
|
||||
|
||||
- `0.4.35 <https://github.com/ROCm/jax/releases/tag/rocm-jax-v0.4.35>`_
|
||||
- Ubuntu 22.04
|
||||
- `3.12.7 <https://www.python.org/downloads/release/python-3127/>`_
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/jax-community/rocm6.2.4-jax0.4.35-py3.11.10/images/sha256-d462f7e445545fba2f3b92234a21beaa52fe6c5f550faabcfdcd1bf53486d991?context=explore"><i class="fab fa-docker fa-lg"></i> rocm/jax-community</a>
|
||||
|
||||
- `0.4.35 <https://github.com/ROCm/jax/releases/tag/rocm-jax-v0.4.35>`_
|
||||
- Ubuntu 22.04
|
||||
- `3.11.10 <https://www.python.org/downloads/release/python-31110/>`_
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/jax-community/rocm6.2.4-jax0.4.35-py3.10.15/images/sha256-6f2d4d0f529378d9572f0e8cfdcbc101d1e1d335bd626bb3336fff87814e9d60?context=explore"><i class="fab fa-docker fa-lg"></i> rocm/jax-community</a>
|
||||
|
||||
- `0.4.35 <https://github.com/ROCm/jax/releases/tag/rocm-jax-v0.4.35>`_
|
||||
- Ubuntu 22.04
|
||||
- `3.10.15 <https://www.python.org/downloads/release/python-31015/>`_
|
||||
|
||||
Critical ROCm libraries for JAX
|
||||
================================================================================
|
||||
|
||||
The functionality of JAX with ROCm is determined by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Matrix multiplication in ``jax.numpy.matmul``, ``jax.lax.dot`` and
|
||||
``jax.lax.dot_general``, operations like ``jax.numpy.dot``, which
|
||||
involve vector and matrix computations and batch matrix multiplications
|
||||
``jax.numpy.einsum`` with matrix-multiplication patterns algebra
|
||||
operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- 0.10.0
|
||||
- hipBLASLt is an extension of hipBLAS, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- Matrix multiplication in ``jax.numpy.matmul`` or ``jax.lax.dot``, and
|
||||
the XLA (Accelerated Linear Algebra) use hipBLASLt for optimized matrix
|
||||
operations, mixed-precision support, and hardware-specific
|
||||
optimizations.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Reduction functions (``jax.numpy.sum``, ``jax.numpy.mean``,
|
||||
``jax.numpy.prod``, ``jax.numpy.max`` and ``jax.numpy.min``), prefix sum
|
||||
(``jax.numpy.cumsum``, ``jax.numpy.cumprod``) and sorting
|
||||
(``jax.numpy.sort``, ``jax.numpy.argsort``).
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- 1.0.17
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
- Used in functions like ``jax.numpy.fft``.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- 2.11.0
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``jax.random.uniform``, ``jax.random.normal``,
|
||||
``jax.random.randint`` and ``jax.random.split``.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
- Solving linear systems (``jax.numpy.linalg.solve``), matrix
|
||||
factorizations, SVD (``jax.numpy.linalg.svd``) and eigenvalue problems
|
||||
(``jax.numpy.linalg.eig``).
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- 3.1.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse matrix multiplication (``jax.numpy.matmul``), sparse
|
||||
matrix-vector and matrix-matrix products
|
||||
(``jax.experimental.sparse.dot``), sparse linear system solvers and
|
||||
sparse data handling.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- 0.2.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse matrix multiplication (``jax.numpy.matmul``), sparse
|
||||
matrix-vector and matrix-matrix products
|
||||
(``jax.experimental.sparse.dot``) and sparse linear system solvers.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- 3.3.0
|
||||
- Optimized for deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs), recurrent neural
|
||||
networks (RNNs), and other layers. Used in operations like
|
||||
``jax.nn.conv``, ``jax.nn.relu``, and ``jax.nn.batch_norm``.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- 2.21.5
|
||||
- Optimized for multi-GPU communication for operations like all-reduce,
|
||||
broadcast, and scatter.
|
||||
- Distribute computations across multiple GPU with ``pmap`` and
|
||||
``jax.distributed``. XLA automatically uses rccl when executing
|
||||
operations across multiple GPUs on AMD hardware.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Reduction operations like ``jax.numpy.sum``, ``jax.pmap`` for
|
||||
distributed training, which involves parallel reductions or
|
||||
operations like ``jax.numpy.cumsum`` can use rocThrust.
|
||||
|
||||
Supported and unsupported features
|
||||
===============================================================================
|
||||
|
||||
The following table maps GPU-accelerated JAX modules to their supported
|
||||
ROCm and JAX versions.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Description
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.numpy``
|
||||
- Implements the NumPy API, using the primitives in ``jax.lax``.
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy``
|
||||
- Provides GPU-accelerated and differentiable implementations of many
|
||||
functions from the SciPy library, leveraging JAX's transformations
|
||||
(e.g., ``grad``, ``jit``, ``vmap``).
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.lax``
|
||||
- A library of primitives operations that underpins libraries such as
|
||||
``jax.numpy.`` Transformation rules, such as Jacobian-vector product
|
||||
(JVP) and batching rules, are typically defined as transformations on
|
||||
``jax.lax`` primitives.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.random``
|
||||
- Provides a number of routines for deterministic generation of sequences
|
||||
of pseudorandom numbers.
|
||||
- 0.1.58
|
||||
- 5.0.0
|
||||
* - ``jax.sharding``
|
||||
- Allows to define partitioning and distributing arrays across multiple
|
||||
devices.
|
||||
- 0.3.20
|
||||
- 5.1.0
|
||||
* - ``jax.dlpack``
|
||||
- For exchanging tensor data between JAX and other libraries that support the
|
||||
DLPack standard.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.distributed``
|
||||
- Enables the scaling of computations across multiple devices on a single
|
||||
machine or across multiple machines.
|
||||
- 0.1.74
|
||||
- 5.0.0
|
||||
* - ``jax.dtypes``
|
||||
- Provides utilities for working with and managing data types in JAX
|
||||
arrays and computations.
|
||||
- 0.1.66
|
||||
- 5.0.0
|
||||
* - ``jax.image``
|
||||
- Contains image manipulation functions like resize, scale and translation.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.nn``
|
||||
- Contains common functions for neural network libraries.
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.ops``
|
||||
- Computes the minimum, maximum, sum or product within segments of an
|
||||
array.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.profiler``
|
||||
- Contains JAX’s tracing and time profiling features.
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.stages``
|
||||
- Contains interfaces to stages of the compiled execution process.
|
||||
- 0.3.4
|
||||
- 5.0.0
|
||||
* - ``jax.tree``
|
||||
- Provides utilities for working with tree-like container data structures.
|
||||
- 0.4.26
|
||||
- 5.6.0
|
||||
* - ``jax.tree_util``
|
||||
- Provides utilities for working with nested data structures, or
|
||||
``pytrees``.
|
||||
- 0.1.65
|
||||
- 5.0.0
|
||||
* - ``jax.typing``
|
||||
- Provides JAX-specific static type annotations.
|
||||
- 0.3.18
|
||||
- 5.1.0
|
||||
* - ``jax.extend``
|
||||
- Provides modules for access to JAX internal machinery module. The
|
||||
``jax.extend`` module defines a library view of some of JAX’s internal
|
||||
components.
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
* - ``jax.example_libraries``
|
||||
- Serves as a collection of example code and libraries that demonstrate
|
||||
various capabilities of JAX.
|
||||
- 0.1.74
|
||||
- 5.0.0
|
||||
* - ``jax.experimental``
|
||||
- Namespace for experimental features and APIs that are in development or
|
||||
are not yet fully stable for production use.
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.lib``
|
||||
- Set of internal tools and types for bridging between JAX’s Python
|
||||
frontend and its XLA backend.
|
||||
- 0.4.6
|
||||
- 5.3.0
|
||||
* - ``jax_triton``
|
||||
- Library that integrates the Triton deep learning compiler with JAX.
|
||||
- jax_triton 0.2.0
|
||||
- 6.2.4
|
||||
|
||||
jax.scipy module
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
A SciPy-like API for scientific computing.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.scipy.cluster``
|
||||
- 0.3.11
|
||||
- 5.1.0
|
||||
* - ``jax.scipy.fft``
|
||||
- 0.1.71
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.integrate``
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
* - ``jax.scipy.interpolate``
|
||||
- 0.1.76
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.linalg``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.ndimage``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.optimize``
|
||||
- 0.1.57
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.signal``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.spatial.transform``
|
||||
- 0.4.12
|
||||
- 5.4.0
|
||||
* - ``jax.scipy.sparse.linalg``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.special``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
|
||||
jax.scipy.stats module
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.scipy.stats.bernouli``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.beta``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.betabinom``
|
||||
- 0.1.61
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.binom``
|
||||
- 0.4.14
|
||||
- 5.4.0
|
||||
* - ``jax.scipy.stats.cauchy``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.chi2``
|
||||
- 0.1.61
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.dirichlet``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.expon``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.gamma``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.gennorm``
|
||||
- 0.3.15
|
||||
- 5.2.0
|
||||
* - ``jax.scipy.stats.geom``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.laplace``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.logistic``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.multinomial``
|
||||
- 0.3.18
|
||||
- 5.1.0
|
||||
* - ``jax.scipy.stats.multivariate_normal``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.nbinom``
|
||||
- 0.1.72
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.norm``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.pareto``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.poisson``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.t``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.truncnorm``
|
||||
- 0.4.0
|
||||
- 5.3.0
|
||||
* - ``jax.scipy.stats.uniform``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.scipy.stats.vonmises``
|
||||
- 0.4.2
|
||||
- 5.3.0
|
||||
* - ``jax.scipy.stats.wrapcauchy``
|
||||
- 0.4.20
|
||||
- 5.6.0
|
||||
|
||||
jax.extend module
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
Modules for JAX extensions.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.extend.ffi``
|
||||
- 0.4.30
|
||||
- 6.0.0
|
||||
* - ``jax.extend.linear_util``
|
||||
- 0.4.17
|
||||
- 5.6.0
|
||||
* - ``jax.extend.mlir``
|
||||
- 0.4.26
|
||||
- 5.6.0
|
||||
* - ``jax.extend.random``
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
|
||||
jax.experimental module
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
Experimental modules and APIs.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.checkify``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.compilation_cache.compilation_cache``
|
||||
- 0.1.68
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.custom_partitioning``
|
||||
- 0.4.0
|
||||
- 5.3.0
|
||||
* - ``jax.experimental.jet``
|
||||
- 0.1.56
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.key_reuse``
|
||||
- 0.4.26
|
||||
- 5.6.0
|
||||
* - ``jax.experimental.mesh_utils``
|
||||
- 0.1.76
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.multihost_utils``
|
||||
- 0.3.2
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.pallas``
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
* - ``jax.experimental.pjit``
|
||||
- 0.1.61
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.serialize_executable``
|
||||
- 0.4.0
|
||||
- 5.3.0
|
||||
* - ``jax.experimental.shard_map``
|
||||
- 0.4.3
|
||||
- 5.3.0
|
||||
* - ``jax.experimental.sparse``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - API
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.enable_x64``
|
||||
- 0.1.60
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.disable_x64``
|
||||
- 0.1.60
|
||||
- 5.0.0
|
||||
|
||||
jax.experimental.pallas module
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Module for Pallas, a JAX extension for custom kernels.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.pallas.mosaic_gpu``
|
||||
- 0.4.31
|
||||
- 6.1.3
|
||||
* - ``jax.experimental.pallas.tpu``
|
||||
- 0.4.15
|
||||
- 5.5.0
|
||||
* - ``jax.experimental.pallas.triton``
|
||||
- 0.4.32
|
||||
- 6.1.3
|
||||
|
||||
jax.experimental.sparse module
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Experimental support for sparse matrix operations.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.sparse.linalg``
|
||||
- 0.3.15
|
||||
- 5.2.0
|
||||
* - ``jax.experimental.sparse.sparsify``
|
||||
- 0.3.25
|
||||
- ❌
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ``sparse`` data structure API
|
||||
- Since JAX
|
||||
- Since ROCm
|
||||
* - ``jax.experimental.sparse.BCOO``
|
||||
- 0.1.72
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.sparse.BCSR``
|
||||
- 0.3.20
|
||||
- 5.1.0
|
||||
* - ``jax.experimental.sparse.CSR``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
* - ``jax.experimental.sparse.NM``
|
||||
- 0.4.27
|
||||
- 5.6.0
|
||||
* - ``jax.experimental.sparse.COO``
|
||||
- 0.1.75
|
||||
- 5.0.0
|
||||
|
||||
Unsupported JAX features
|
||||
------------------------
|
||||
|
||||
The following are GPU-accelerated JAX features not currently supported by
|
||||
ROCm.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since JAX
|
||||
* - Mixed Precision with TF32
|
||||
- Mixed precision with TF32 is used for matrix multiplications,
|
||||
convolutions, and other linear algebra operations, particularly in
|
||||
deep learning workloads like CNNs and transformers.
|
||||
- 0.2.25
|
||||
* - RNN support
|
||||
- Currently only LSTM with double bias is supported with float32 input
|
||||
and weight.
|
||||
- 0.3.25
|
||||
* - XLA int4 support
|
||||
- 4-bit integer (int4) precision in the XLA compiler.
|
||||
- 0.4.0
|
||||
* - ``jax.experimental.sparsify``
|
||||
- Converts a dense matrix to a sparse matrix representation.
|
||||
- Experimental
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* The `nanoGPT in JAX <https://rocm.blogs.amd.com/artificial-intelligence/nanoGPT-JAX/README.html>`_
|
||||
blog explores the implementation and training of a Generative Pre-trained
|
||||
Transformer (GPT) model in JAX, inspired by Andrej Karpathy’s PyTorch-based
|
||||
nanoGPT. By comparing how essential GPT components—such as self-attention
|
||||
mechanisms and optimizers—are realized in PyTorch and JAX, also highlight
|
||||
JAX’s unique features.
|
||||
|
||||
* The `Optimize GPT Training: Enabling Mixed Precision Training in JAX using
|
||||
ROCm on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-mixed-precision/README.html>`_
|
||||
blog post provides a comprehensive guide on enhancing the training efficiency
|
||||
of GPT models by implementing mixed precision techniques in JAX, specifically
|
||||
tailored for AMD GPUs utilizing the ROCm platform.
|
||||
|
||||
* The `Supercharging JAX with Triton Kernels on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/jax-triton/README.html>`_
|
||||
blog demonstrates how to develop a custom fused dropout-activation kernel for
|
||||
matrices using Triton, integrate it with JAX, and benchmark its performance
|
||||
using ROCm.
|
||||
|
||||
* The `Distributed fine-tuning with JAX on AMD GPUs <https://rocm.blogs.amd.com/artificial-intelligence/distributed-sft-jax/README.html>`_
|
||||
outlines the process of fine-tuning a Bidirectional Encoder Representations
|
||||
from Transformers (BERT)-based large language model (LLM) using JAX for a text
|
||||
classification task. The blog post discuss techniques for parallelizing the
|
||||
fine-tuning across multiple AMD GPUs and assess the model's performance on a
|
||||
holdout dataset. During the fine-tuning, a BERT-base-cased transformer model
|
||||
and the General Language Understanding Evaluation (GLUE) benchmark dataset was
|
||||
used on a multi-GPU setup.
|
||||
|
||||
* The `MI300X workload optimization guide <https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html>`_
|
||||
provides detailed guidance on optimizing workloads for the AMD Instinct MI300X
|
||||
accelerator using ROCm. The page is aimed at helping users achieve optimal
|
||||
performance for deep learning and other high-performance computing tasks on
|
||||
the MI300X GPU.
|
||||
|
||||
For more use cases and recommendations, see `ROCm JAX blog posts <https://rocm.blogs.amd.com/blog/tag/jax.html>`_.
|
||||
922
docs/compatibility/ml-compatibility/pytorch-compatibility.rst
Normal file
922
docs/compatibility/ml-compatibility/pytorch-compatibility.rst
Normal file
@@ -0,0 +1,922 @@
|
||||
.. meta::
|
||||
:description: PyTorch compatibility
|
||||
:keywords: GPU, PyTorch compatibility
|
||||
|
||||
********************************************************************************
|
||||
PyTorch compatibility
|
||||
********************************************************************************
|
||||
|
||||
`PyTorch <https://pytorch.org/>`_ is an open-source tensor library designed for
|
||||
deep learning. PyTorch on ROCm provides mixed-precision and large-scale training
|
||||
using `MIOpen <https://github.com/ROCm/MIOpen>`_ and
|
||||
`RCCL <https://github.com/ROCm/rccl>`_ libraries.
|
||||
|
||||
ROCm support for PyTorch is upstreamed into the official PyTorch repository. Due
|
||||
to independent compatibility considerations, this results in two distinct
|
||||
release cycles for PyTorch on ROCm:
|
||||
|
||||
- ROCm PyTorch release:
|
||||
|
||||
- Provides the latest version of ROCm but doesn't immediately support the latest stable PyTorch
|
||||
version.
|
||||
|
||||
- Offers :ref:`Docker images <pytorch-docker-compat>` with ROCm and PyTorch
|
||||
pre-installed.
|
||||
|
||||
- ROCm PyTorch repository: `<https://github.com/ROCm/pytorch>`__
|
||||
|
||||
- See the :doc:`ROCm PyTorch installation guide <rocm-install-on-linux:install/3rd-party/pytorch-install>` to get started.
|
||||
|
||||
- Official PyTorch release:
|
||||
|
||||
- Provides the latest stable version of PyTorch but doesn't immediately support the latest ROCm version.
|
||||
|
||||
- Official PyTorch repository: `<https://github.com/pytorch/pytorch>`__
|
||||
|
||||
- See the `Nightly and latest stable version installation guide <https://pytorch.org/get-started/locally/>`_
|
||||
or `Previous versions <https://pytorch.org/get-started/previous-versions/>`_ to get started.
|
||||
|
||||
The upstream PyTorch includes an automatic HIPification solution that automatically generates HIP
|
||||
source code from the CUDA backend. This approach allows PyTorch to support ROCm without requiring
|
||||
manual code modifications.
|
||||
|
||||
Development of ROCm is aligned with the stable release of PyTorch while upstream PyTorch testing uses
|
||||
the stable release of ROCm to maintain consistency.
|
||||
|
||||
.. _pytorch-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
================================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes ready-made `PyTorch <https://hub.docker.com/r/rocm/pytorch>`_
|
||||
images with ROCm backends on Docker Hub. The following Docker image tags and
|
||||
associated inventories are validated for `ROCm 6.3.0 <https://repo.radeon.com/rocm/apt/6.3/>`_.
|
||||
Click the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
.. list-table:: PyTorch Docker image components
|
||||
:header-rows: 1
|
||||
:class: docker-image-compatibility
|
||||
|
||||
* - Docker
|
||||
- PyTorch
|
||||
- Ubuntu
|
||||
- Python
|
||||
- Apex
|
||||
- torchvision
|
||||
- TensorBoard
|
||||
- MAGMA
|
||||
- UCX
|
||||
- OMPI
|
||||
- OFED
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu24.04_py3.12_pytorch_release_2.4.0/images/sha256-98ddf20333bd01ff749b8092b1190ee369a75d3b8c71c2fac80ffdcb1a98d529?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 24.04
|
||||
- `3.12 <https://www.python.org/downloads/release/python-3128/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.4.0/images/sha256-402c9b4f1a6b5a81c634a1932b56cbe01abb699cfcc7463d226276997c6cf8ea?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_2.4.0/images/sha256-e0608b55d408c3bfe5c19fdd57a4ced3e0eb3a495b74c309980b60b156c526dd?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.4.0 <https://github.com/ROCm/pytorch/tree/release/2.4>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3918/>`_
|
||||
- `1.4.0 <https://github.com/ROCm/apex/tree/release/1.4.0>`_
|
||||
- `0.19.0 <https://github.com/pytorch/vision/tree/v0.19.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.7 <https://github.com/open-mpi/ompi/tree/v4.0.7>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.3.0/images/sha256-652cf25263d05b1de548222970aeb76e60b12de101de66751264709c0d0ff9d8?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.3.0 <https://github.com/ROCm/pytorch/tree/release/2.3>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.3.0 <https://github.com/ROCm/apex/tree/release/1.3.0>`_
|
||||
- `0.18.0 <https://github.com/pytorch/vision/tree/v0.18.0>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.10_pytorch_release_2.2.1/images/sha256-051976f26beab8f9aa65d999e3ad546c027b39240a0cc3ee81b114a9024f2912?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 22.04
|
||||
- `3.10 <https://www.python.org/downloads/release/python-31016/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_2.2.1/images/sha256-88c839a364d109d3748c100385bfa100d28090d25118cc723fd0406390ab2f7e?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `2.2.1 <https://github.com/ROCm/pytorch/tree/release/2.2>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.2.0 <https://github.com/ROCm/apex/tree/release/1.2.0>`_
|
||||
- `0.17.1 <https://github.com/pytorch/vision/tree/v0.17.1>`_
|
||||
- `2.13.0 <https://github.com/tensorflow/tensorboard/tree/2.13.0>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu22.04_py3.9_pytorch_release_1.13.1/images/sha256-994424ed07a63113f79dd9aa72159124c00f5fbfe18127151e6658f7d0b6f821?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 22.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.14.1 <https://github.com/openucx/ucx/tree/v1.14.1>`_
|
||||
- `4.1.5 <https://github.com/open-mpi/ompi/tree/v4.1.5>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/pytorch/rocm6.3_ubuntu20.04_py3.9_pytorch_release_1.13.1/images/sha256-7b8139fe40a9aeb4bca3aecd15c22c1fa96e867d93479fa3a24fdeeeeafa1219?context=explore"><i class="fab fa-docker fa-lg"></i></a>
|
||||
|
||||
- `1.13.1 <https://github.com/ROCm/pytorch/tree/release/1.13>`_
|
||||
- 20.04
|
||||
- `3.9 <https://www.python.org/downloads/release/python-3921/>`_
|
||||
- `1.0.0 <https://github.com/ROCm/apex/tree/release/1.0.0>`_
|
||||
- `0.14.0 <https://github.com/pytorch/vision/tree/v0.14.0>`_
|
||||
- `2.18.0 <https://github.com/tensorflow/tensorboard/tree/2.18>`_
|
||||
- `master <https://bitbucket.org/icl/magma/src/master/>`_
|
||||
- `1.10.0 <https://github.com/openucx/ucx/tree/v1.10.0>`_
|
||||
- `4.0.3 <https://github.com/open-mpi/ompi/tree/v4.0.3>`_
|
||||
- `5.3-1.0.5.0 <https://content.mellanox.com/ofed/MLNX_OFED-5.3-1.0.5.0/MLNX_OFED_LINUX-5.3-1.0.5.0-ubuntu20.04-x86_64.tgz>`_
|
||||
|
||||
Critical ROCm libraries for PyTorch
|
||||
================================================================================
|
||||
|
||||
The functionality of PyTorch with ROCm is determined by its underlying library
|
||||
dependencies. These critical ROCm components affect the capabilities,
|
||||
performance, and feature set available to developers.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `Composable Kernel <https://github.com/ROCm/composable_kernel>`_
|
||||
- 1.1.0
|
||||
- Enables faster execution of core operations like matrix multiplication
|
||||
(GEMM), convolutions and transformations.
|
||||
- Speeds up ``torch.permute``, ``torch.view``, ``torch.matmul``,
|
||||
``torch.mm``, ``torch.bmm``, ``torch.nn.Conv2d``, ``torch.nn.Conv3d``
|
||||
and ``torch.nn.MultiheadAttention``.
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Supports operations like matrix multiplication, matrix-vector products,
|
||||
and tensor contractions. Utilized in both dense and batched linear
|
||||
algebra operations.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- 0.10.0
|
||||
- hipBLASLt is an extension of the hipBLAS library, providing additional
|
||||
features like epilogues fused into the matrix multiplication kernel or
|
||||
use of integer tensor cores.
|
||||
- It accelerates operations like ``torch.matmul``, ``torch.mm``, and the
|
||||
matrix multiplications used in convolutional and linear layers.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations like ``torch.sum``, ``torch.cumsum``, ``torch.sort``
|
||||
and ``torch.topk``. Operations on sparse tensors or tensors with
|
||||
irregular shapes often involve scanning, sorting, and filtering, which
|
||||
hipCUB handles efficiently.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- 1.0.17
|
||||
- Provides GPU-accelerated Fast Fourier Transform (FFT) operations.
|
||||
- Used in functions like the ``torch.fft`` module.
|
||||
* - `hipRAND <https://github.com/ROCm/hipRAND>`_
|
||||
- 2.11.0
|
||||
- Provides fast random number generation for GPUs.
|
||||
- The ``torch.rand``, ``torch.randn`` and stochastic layers like
|
||||
``torch.nn.Dropout``.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated solvers for linear systems, eigenvalues, and
|
||||
singular value decompositions (SVD).
|
||||
- Supports functions like ``torch.linalg.solve``,
|
||||
``torch.linalg.eig``, and ``torch.linalg.svd``.
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- 3.1.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipSPARSELt <https://github.com/ROCm/hipSPARSELt>`_
|
||||
- 0.2.2
|
||||
- Accelerates operations on sparse matrices, such as sparse matrix-vector
|
||||
or matrix-matrix products.
|
||||
- Sparse tensor operations ``torch.sparse``.
|
||||
* - `hipTensor <https://github.com/ROCm/hipTensor>`_
|
||||
- 1.4.0
|
||||
- Optimizes for high-performance tensor operations, such as contractions.
|
||||
- Accelerates tensor algebra, especially in deep learning and scientific
|
||||
computing.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- 3.3.0
|
||||
- Optimizes deep learning primitives such as convolutions, pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs), recurrent neural
|
||||
networks (RNNs), and other layers. Used in operations like
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIGraphX <https://github.com/ROCm/AMDMIGraphX>`_
|
||||
- 2.11.0
|
||||
- Adds graph-level optimizations, ONNX models and mixed precision support
|
||||
and enable Ahead-of-Time (AOT) Compilation.
|
||||
- Speeds up inference models and executes ONNX models for
|
||||
compatibility with other frameworks.
|
||||
``torch.nn.Conv2d``, ``torch.nn.ReLU``, and ``torch.nn.LSTM``.
|
||||
* - `MIVisionX <https://github.com/ROCm/MIVisionX>`_
|
||||
- 3.1.0
|
||||
- Optimizes acceleration for computer vision and AI workloads like
|
||||
preprocessing, augmentation, and inferencing.
|
||||
- Faster data preprocessing and augmentation pipelines for datasets like
|
||||
ImageNet or COCO and easy to integrate into PyTorch's ``torch.utils.data``
|
||||
and ``torchvision`` workflows.
|
||||
* - `rocAL <https://github.com/ROCm/rocAL>`_
|
||||
- 2.1.0
|
||||
- Accelerates the data pipeline by offloading intensive preprocessing and
|
||||
augmentation tasks. rocAL is part of MIVisionX.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- 2.21.5
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
- Distributed data parallel training (``torch.nn.parallel.DistributedDataParallel``).
|
||||
Handles communication in multi-GPU setups.
|
||||
* - `rocDecode <https://github.com/ROCm/rocDecode>`_
|
||||
- 0.8.0
|
||||
- Provides hardware-accelerated data decoding capabilities, particularly
|
||||
for image, video, and other dataset formats.
|
||||
- Can be integrated in ``torch.utils.data``, ``torchvision.transforms``
|
||||
and ``torch.distributed``.
|
||||
* - `rocJPEG <https://github.com/ROCm/rocJPEG>`_
|
||||
- 0.6.0
|
||||
- Provides hardware-accelerated JPEG image decoding and encoding.
|
||||
- GPU accelerated ``torchvision.io.decode_jpeg`` and
|
||||
``torchvision.io.encode_jpeg`` and can be integrated in
|
||||
``torch.utils.data`` and ``torchvision``.
|
||||
* - `RPP <https://github.com/ROCm/RPP>`_
|
||||
- 1.9.1
|
||||
- Speeds up data augmentation, transformation, and other preprocessing steps.
|
||||
- Easy to integrate into PyTorch's ``torch.utils.data`` and
|
||||
``torchvision`` data load workloads.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Utilized in backend operations for tensor computations requiring
|
||||
parallel processing.
|
||||
* - `rocWMMA <https://github.com/ROCm/rocWMMA>`_
|
||||
- 1.6.0
|
||||
- Accelerates warp-level matrix-multiply and matrix-accumulate to speed up matrix
|
||||
multiplication (GEMM) and accumulation operations with mixed precision
|
||||
support.
|
||||
- Linear layers (``torch.nn.Linear``), convolutional layers
|
||||
(``torch.nn.Conv2d``), attention layers, general tensor operations that
|
||||
involve matrix products, such as ``torch.matmul``, ``torch.bmm``, and
|
||||
more.
|
||||
|
||||
Supported and unsupported features
|
||||
================================================================================
|
||||
|
||||
The following section maps GPU-accelerated PyTorch features to their supported
|
||||
ROCm and PyTorch versions.
|
||||
|
||||
torch
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
`torch <https://pytorch.org/docs/stable/index.html>`_ is the central module of
|
||||
PyTorch, providing data structures for multi-dimensional tensors and
|
||||
implementing mathematical operations on them. It also includes utilities for
|
||||
efficient serialization of tensors and arbitrary data types, along with various
|
||||
other tools.
|
||||
|
||||
Tensor data types
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or argument, and PyTorch supports a wide range of data types for different use cases.
|
||||
|
||||
The following table lists `torch.Tensor <https://pytorch.org/docs/stable/tensors.html>`_'s single data types:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.float8_e4m3fn``
|
||||
- 8-bit floating point, e4m3
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float8_e5m2``
|
||||
- 8-bit floating point, e5m2
|
||||
- 2.3
|
||||
- 5.5
|
||||
* - ``torch.float16`` or ``torch.half``
|
||||
- 16-bit floating point
|
||||
- 0.1.6
|
||||
- 2.0
|
||||
* - ``torch.bfloat16``
|
||||
- 16-bit floating point
|
||||
- 1.6
|
||||
- 2.6
|
||||
* - ``torch.float32`` or ``torch.float``
|
||||
- 32-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.float64`` or ``torch.double``
|
||||
- 64-bit floating point
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.complex32`` or ``torch.chalf``
|
||||
- PyTorch provides native support for 32-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex64`` or ``torch.cfloat``
|
||||
- PyTorch provides native support for 64-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.complex128`` or ``torch.cdouble``
|
||||
- PyTorch provides native support for 128-bit complex numbers
|
||||
- 1.6
|
||||
- 2.0
|
||||
* - ``torch.uint8``
|
||||
- 8-bit integer (unsigned)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.uint16``
|
||||
- 16-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint32``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.uint64``
|
||||
- 32-bit integer (unsigned)
|
||||
- 2.3
|
||||
- Not natively supported
|
||||
* - ``torch.int8``
|
||||
- 8-bit integer (signed)
|
||||
- 1.12
|
||||
- 5.0
|
||||
* - ``torch.int16`` or ``torch.short``
|
||||
- 16-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int32`` or ``torch.int``
|
||||
- 32-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.int64`` or ``torch.long``
|
||||
- 64-bit integer (signed)
|
||||
- 0.1.12_2
|
||||
- 2.0
|
||||
* - ``torch.bool``
|
||||
- Boolean
|
||||
- 1.2
|
||||
- 2.0
|
||||
* - ``torch.quint8``
|
||||
- Quantized 8-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint8``
|
||||
- Quantized 8-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.qint32``
|
||||
- Quantized 32-bit integer (signed)
|
||||
- 1.8
|
||||
- 5.0
|
||||
* - ``torch.quint4x2``
|
||||
- Quantized 4-bit integer (unsigned)
|
||||
- 1.8
|
||||
- 5.0
|
||||
|
||||
.. note::
|
||||
|
||||
Unsigned types aside from ``uint8`` are currently only have limited support in
|
||||
eager mode (they primarily exist to assist usage with ``torch.compile``).
|
||||
|
||||
The :doc:`ROCm precision support page <rocm:reference/precision-support>`
|
||||
collected the native HW support of different data types.
|
||||
|
||||
torch.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.cuda`` in PyTorch is a module that provides utilities and functions for
|
||||
managing and utilizing AMD and NVIDIA GPUs. It enables GPU-accelerated
|
||||
computations, memory management, and efficient execution of tensor operations,
|
||||
leveraging ROCm and CUDA as the underlying frameworks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Device management
|
||||
- Utilities for managing and interacting with GPUs.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Tensor operations on GPU
|
||||
- Performs tensor operations such as addition and matrix multiplications on
|
||||
the GPU.
|
||||
- 0.4.0
|
||||
- 3.8
|
||||
* - Streams and events
|
||||
- Streams allow overlapping computation and communication for optimized
|
||||
performance. Events enable synchronization.
|
||||
- 1.6.0
|
||||
- 3.8
|
||||
* - Memory management
|
||||
- Functions to manage and inspect memory usage like
|
||||
``torch.cuda.memory_allocated()``, ``torch.cuda.max_memory_allocated()``,
|
||||
``torch.cuda.memory_reserved()`` and ``torch.cuda.empty_cache()``.
|
||||
- 0.3.0
|
||||
- 1.9.2
|
||||
* - Running process lists of memory management
|
||||
- Returns a human-readable printout of the running processes and their GPU
|
||||
memory use for a given device with functions like
|
||||
``torch.cuda.memory_stats()`` and ``torch.cuda.memory_summary()``.
|
||||
- 1.8.0
|
||||
- 4.0
|
||||
* - Communication collectives
|
||||
- Set of APIs that enable efficient communication between multiple GPUs,
|
||||
allowing for distributed computing and data parallelism.
|
||||
- 1.9.0
|
||||
- 5.0
|
||||
* - ``torch.cuda.CUDAGraph``
|
||||
- Graphs capture sequences of GPU operations to minimize kernel launch
|
||||
overhead and improve performance.
|
||||
- 1.10.0
|
||||
- 5.3
|
||||
* - TunableOp
|
||||
- A mechanism that allows certain operations to be more flexible and
|
||||
optimized for performance. It enables automatic tuning of kernel
|
||||
configurations and other settings to achieve the best possible
|
||||
performance based on the specific hardware (GPU) and workload.
|
||||
- 2.0
|
||||
- 5.4
|
||||
* - NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.8.0
|
||||
- ❌
|
||||
* - Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.13.0
|
||||
- ❌
|
||||
* - Jiterator (beta)
|
||||
- Jiterator allows asynchronous data streaming into computation streams
|
||||
during training loops.
|
||||
- 1.13.0
|
||||
- 5.2
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cuda
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``torch.backends.cuda`` is a PyTorch module that provides configuration options
|
||||
and flags to control the behavior of ROCm or CUDA operations. It is part of the
|
||||
PyTorch backend configuration system, which allows users to fine-tune how
|
||||
PyTorch interacts with the ROCm or CUDA environment.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``cufft_plan_cache``
|
||||
- Manages caching of GPU FFT plans to optimize repeated FFT computations.
|
||||
- 1.7.0
|
||||
- 5.0
|
||||
* - ``matmul.allow_tf32``
|
||||
- Enables or disables the use of TensorFloat-32 (TF32) precision for
|
||||
faster matrix multiplications on GPUs with Tensor Cores.
|
||||
- 1.10.0
|
||||
- ❌
|
||||
* - ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions (e.g., with fp16 accumulation type) are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
- ❌
|
||||
* - ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
* - ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
- ❌
|
||||
|
||||
.. Need to validate and extend.
|
||||
|
||||
torch.backends.cudnn
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Supported ``torch`` options include:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Option
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions on NVIDIA
|
||||
Ampere or newer GPUs.
|
||||
- 1.12.0
|
||||
- ❌
|
||||
* - ``deterministic``
|
||||
- A bool that, if True, causes cuDNN to only use deterministic
|
||||
convolution algorithms.
|
||||
- 1.12.0
|
||||
- 6.0
|
||||
|
||||
Automatic mixed precision: torch.amp
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
PyTorch that automates the process of using both 16-bit (half-precision,
|
||||
float16) and 32-bit (single-precision, float32) floating-point types in model
|
||||
training and inference.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - Autocasting
|
||||
- Instances of autocast serve as context managers or decorators that allow
|
||||
regions of your script to run in mixed precision.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - Gradient scaling
|
||||
- To prevent underflow, “gradient scaling” multiplies the network’s
|
||||
loss(es) by a scale factor and invokes a backward pass on the scaled
|
||||
loss(es). Gradients flowing backward through the network are then
|
||||
scaled by the same factor. In other words, gradient values have a
|
||||
larger magnitude, so they don’t flush to zero.
|
||||
- 1.9
|
||||
- 2.5
|
||||
* - CUDA op-specific behavior
|
||||
- These ops always go through autocasting whether they are invoked as part
|
||||
of a ``torch.nn.Module``, as a function, or as a ``torch.Tensor`` method. If
|
||||
functions are exposed in multiple namespaces, they go through
|
||||
autocasting regardless of the namespace.
|
||||
- 1.9
|
||||
- 2.5
|
||||
|
||||
Distributed library features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The PyTorch distributed library includes a collective of parallelism modules, a
|
||||
communications layer, and infrastructure for launching and debugging large
|
||||
training jobs. See :ref:`rocm-for-ai-pytorch-distributed` for more information.
|
||||
|
||||
The Distributed Library feature in PyTorch provides tools and APIs for building
|
||||
and running distributed machine learning workflows. It allows training models
|
||||
across multiple processes, GPUs, or nodes in a cluster, enabling efficient use
|
||||
of computational resources and scalability for large-scale tasks.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - TensorPipe
|
||||
- A point-to-point communication library integrated into
|
||||
PyTorch for distributed training. It is designed to handle tensor data
|
||||
transfers efficiently between different processes or devices, including
|
||||
those on separate machines.
|
||||
- 1.8
|
||||
- 5.4
|
||||
* - Gloo
|
||||
- Designed for multi-machine and multi-GPU setups, enabling
|
||||
efficient communication and synchronization between processes. Gloo is
|
||||
one of the default backends for PyTorch's Distributed Data Parallel
|
||||
(DDP) and RPC frameworks, alongside other backends like NCCL and MPI.
|
||||
- 1.0
|
||||
- 2.0
|
||||
|
||||
torch.compiler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
- Since ROCm
|
||||
* - ``torch.compiler`` (AOT Autograd)
|
||||
- Autograd captures not only the user-level code, but also backpropagation,
|
||||
which results in capturing the backwards pass “ahead-of-time”. This
|
||||
enables acceleration of both forwards and backwards pass using
|
||||
``TorchInductor``.
|
||||
- 2.0
|
||||
- 5.3
|
||||
* - ``torch.compiler`` (TorchInductor)
|
||||
- The default ``torch.compile`` deep learning compiler that generates fast
|
||||
code for multiple accelerators and backends. You need to use a backend
|
||||
compiler to make speedups through ``torch.compile`` possible. For AMD,
|
||||
NVIDIA, and Intel GPUs, it leverages OpenAI Triton as the key building block.
|
||||
- 2.0
|
||||
- 5.3
|
||||
|
||||
torchaudio
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchaudio <https://pytorch.org/audio/stable/index.html>`_ library provides
|
||||
utilities for processing audio data in PyTorch, such as audio loading,
|
||||
transformations, and feature extraction.
|
||||
|
||||
To ensure GPU-acceleration with ``torchaudio.transforms``, you need to move audio
|
||||
data (waveform tensor) explicitly to GPU using ``.to('cuda')``.
|
||||
|
||||
The following ``torchaudio`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since torchaudio version
|
||||
- Since ROCm
|
||||
* - ``torchaudio.transforms.Spectrogram``
|
||||
- Generates spectrogram of an input waveform using STFT.
|
||||
- 0.6.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MelSpectrogram``
|
||||
- Generates the mel-scale spectrogram of raw audio signals.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.MFCC``
|
||||
- Extract of MFCC features.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
* - ``torchaudio.transforms.Resample``
|
||||
- Resamples a signal from one frequency to another.
|
||||
- 0.9.0
|
||||
- 4.5
|
||||
|
||||
torchvision
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchvision <https://pytorch.org/vision/stable/index.html>`_ library
|
||||
provide datasets, model architectures, and common image transformations for
|
||||
computer vision.
|
||||
|
||||
The following ``torchvision`` features are GPU-accelerated.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since torchvision version
|
||||
- Since ROCm
|
||||
* - ``torchvision.transforms.functional``
|
||||
- Provides GPU-compatible transformations for image preprocessing like
|
||||
resize, normalize, rotate and crop.
|
||||
- 0.2.0
|
||||
- 4.0
|
||||
* - ``torchvision.ops``
|
||||
- GPU-accelerated operations for object detection and segmentation tasks.
|
||||
``torchvision.ops.roi_align``, ``torchvision.ops.nms`` and
|
||||
``box_convert``.
|
||||
- 0.6.0
|
||||
- 3.3
|
||||
* - ``torchvision.models`` with ``.to('cuda')``
|
||||
- ``torchvision`` provides several pre-trained models (ResNet, Faster
|
||||
R-CNN, Mask R-CNN, ...) that can run on CUDA for faster inference and
|
||||
training.
|
||||
- 0.1.6
|
||||
- 2.x
|
||||
* - ``torchvision.io``
|
||||
- Enables video decoding and frame extraction using GPU acceleration with NVIDIA’s
|
||||
NVDEC and nvJPEG (rocJPEG) on CUDA-enabled GPUs.
|
||||
- 0.4.0
|
||||
- 6.3
|
||||
|
||||
torchtext
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtext <https://pytorch.org/text/stable/index.html>`_ library provides
|
||||
utilities for processing and working with text data in PyTorch, including
|
||||
tokenization, vocabulary management, and text embeddings. torchtext supports
|
||||
preprocessing pipelines and integration with PyTorch models, simplifying the
|
||||
implementation of natural language processing (NLP) tasks.
|
||||
|
||||
To leverage GPU acceleration in torchtext, you need to move tensors
|
||||
explicitly to the GPU using ``.to('cuda')``.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchtune
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchtune <https://pytorch.org/torchtune/stable/index.html>`_ library for
|
||||
authoring, fine-tuning and experimenting with LLMs.
|
||||
|
||||
* Usage: It works out-of-the-box, enabling developers to fine-tune ROCm PyTorch solutions.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchserve
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchserve <https://pytorch.org/torchserve/>`_ is a PyTorch domain library
|
||||
for common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchtext does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
torchrec
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The `torchrec <https://pytorch.org/torchrec/>`_ is a PyTorch domain library for
|
||||
common sparsity and parallelism primitives needed for large-scale recommender
|
||||
systems.
|
||||
|
||||
* torchrec does not implement its own kernels. ROCm support is enabled by linking against ROCm libraries.
|
||||
|
||||
* Only official release exists.
|
||||
|
||||
Unsupported PyTorch features
|
||||
----------------------------
|
||||
|
||||
The following are GPU-accelerated PyTorch features not currently supported by ROCm.
|
||||
|
||||
.. list-table::
|
||||
:widths: 30, 60, 10
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since PyTorch
|
||||
* - APEX batch norm
|
||||
- Use APEX batch norm instead of PyTorch batch norm.
|
||||
- 1.6.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_tf32``
|
||||
- A bool that controls whether TensorFloat-32 tensor cores may be used in
|
||||
matrix multiplications.
|
||||
- 1.7
|
||||
* - ``torch.cuda`` / NVIDIA Tools Extension (NVTX)
|
||||
- Integration with NVTX for profiling and debugging GPU performance using
|
||||
NVIDIA's Nsight tools.
|
||||
- 1.7.0
|
||||
* - ``torch.cuda`` / Lazy loading NVRTC
|
||||
- Delays JIT compilation with NVRTC until the code is explicitly needed.
|
||||
- 1.8.0
|
||||
* - ``torch-tensorrt``
|
||||
- Integrate TensorRT library for optimizing and deploying PyTorch models.
|
||||
ROCm does not have equialent library for TensorRT.
|
||||
- 1.9.0
|
||||
* - ``torch.backends`` / ``cudnn.allow_tf32``
|
||||
- TensorFloat-32 tensor cores may be used in cuDNN convolutions.
|
||||
- 1.10.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_fp16_reduced_precision_reduction``
|
||||
- Reduced precision reductions with fp16 accumulation type are
|
||||
allowed with fp16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``matmul.allow_bf16_reduced_precision_reduction``
|
||||
- Reduced precision reductions are allowed with bf16 GEMMs.
|
||||
- 2.0
|
||||
* - ``torch.nn.functional`` / ``scaled_dot_product_attention``
|
||||
- Flash attention backend for SDPA to accelerate attention computation in
|
||||
transformer-based models.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_cudnn_sdp``
|
||||
- Globally enables cuDNN SDPA's kernels within SDPA.
|
||||
- 2.0
|
||||
* - ``torch.backends.cuda`` / ``enable_flash_sdp``
|
||||
- Globally enables or disables FlashAttention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_mem_efficient_sdp``
|
||||
- Globally enables or disables Memory-Efficient Attention for SDPA.
|
||||
- 2.1
|
||||
* - ``torch.backends.cuda`` / ``enable_math_sdp``
|
||||
- Globally enables or disables the PyTorch C++ implementation within SDPA.
|
||||
- 2.1
|
||||
* - Dynamic parallelism
|
||||
- PyTorch itself does not directly expose dynamic parallelism as a core
|
||||
feature. Dynamic parallelism allow GPU threads to launch additional
|
||||
threads which can be reached using custom operations via the
|
||||
``torch.utils.cpp_extension`` module.
|
||||
- Not a core feature
|
||||
* - Unified memory support in PyTorch
|
||||
- Unified Memory is not directly exposed in PyTorch's core API, it can be
|
||||
utilized effectively through custom CUDA extensions or advanced
|
||||
workflows.
|
||||
- Not a core feature
|
||||
|
||||
Use cases and recommendations
|
||||
================================================================================
|
||||
|
||||
* :doc:`Using ROCm for AI: training a model </how-to/rocm-for-ai/training/train-a-model>` provides
|
||||
guidance on how to leverage the ROCm platform for training AI models. It covers the steps, tools, and best practices
|
||||
for optimizing training workflows on AMD GPUs using PyTorch features.
|
||||
|
||||
* :doc:`Single-GPU fine-tuning and inference </how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates how to use the ROCm platform for the fine-tuning and inference of
|
||||
machine learning models, particularly large language models (LLMs), on systems with a single AMD
|
||||
Instinct MI300X accelerator. This page provides a detailed guide for setting up, optimizing, and
|
||||
executing fine-tuning and inference workflows in such environments.
|
||||
|
||||
* :doc:`Multi-GPU fine-tuning and inference optimization </how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference>`
|
||||
describes and demonstrates the fine-tuning and inference of machine learning models on systems
|
||||
with multi MI300X accelerators.
|
||||
|
||||
* The :doc:`Instinct MI300X workload optimization guide </how-to/rocm-for-ai/inference-optimization/workload>` provides detailed
|
||||
guidance on optimizing workloads for the AMD Instinct MI300X accelerator using ROCm. This guide is aimed at helping
|
||||
users achieve optimal performance for deep learning and other high-performance computing tasks on the MI300X
|
||||
accelerator.
|
||||
|
||||
* The :doc:`Inception with PyTorch documentation </conceptual/ai-pytorch-inception>`
|
||||
describes how PyTorch integrates with ROCm for AI workloads It outlines the use of PyTorch on the ROCm platform and
|
||||
focuses on how to efficiently leverage AMD GPU hardware for training and inference tasks in AI applications.
|
||||
|
||||
For more use cases and recommendations, see `ROCm PyTorch blog posts <https://rocm.blogs.amd.com/blog/tag/pytorch.html>`_.
|
||||
489
docs/compatibility/ml-compatibility/tensorflow-compatibility.rst
Normal file
489
docs/compatibility/ml-compatibility/tensorflow-compatibility.rst
Normal file
@@ -0,0 +1,489 @@
|
||||
.. meta::
|
||||
:description: TensorFlow compatibility
|
||||
:keywords: GPU, TensorFlow compatibility
|
||||
|
||||
*******************************************************************************
|
||||
TensorFlow compatibility
|
||||
*******************************************************************************
|
||||
|
||||
`TensorFlow <https://www.tensorflow.org/>`_ is an open-source library for
|
||||
solving machine learning, deep learning, and AI problems. It can solve many
|
||||
problems across different sectors and industries but primarily focuses on
|
||||
neural network training and inference. It is one of the most popular and
|
||||
in-demand frameworks and is very active in open-source contribution and
|
||||
development.
|
||||
|
||||
The `official TensorFlow repository <http://github.com/tensorflow/tensorflow>`_
|
||||
includes full ROCm support. AMD maintains a TensorFlow `ROCm repository
|
||||
<http://github.com/rocm/tensorflow-upstream>`_ in order to quickly add bug
|
||||
fixes, updates, and support for the latest ROCM versions.
|
||||
|
||||
- ROCm TensorFlow release:
|
||||
|
||||
- Offers :ref:`Docker images <tensorflow-docker-compat>` with
|
||||
ROCm and TensorFlow pre-installed.
|
||||
|
||||
- ROCm TensorFlow repository: `<https://github.com/ROCm/tensorflow-upstream>`_
|
||||
|
||||
- See the :doc:`ROCm TensorFlow installation guide <rocm-install-on-linux:install/3rd-party/tensorflow-install>`
|
||||
to get started.
|
||||
|
||||
- Official TensorFlow release:
|
||||
|
||||
- Official TensorFlow repository: `<https://github.com/tensorflow/tensorflow>`_
|
||||
|
||||
- See the `TensorFlow API versions <https://www.tensorflow.org/versions>`_ list.
|
||||
|
||||
.. note::
|
||||
|
||||
The official TensorFlow documentation does not cover ROCm support. Use the
|
||||
ROCm documentation for installation instructions for Tensorflow on ROCm.
|
||||
See :doc:`rocm-install-on-linux:install/3rd-party/tensorflow-install`.
|
||||
|
||||
.. _tensorflow-docker-compat:
|
||||
|
||||
Docker image compatibility
|
||||
===============================================================================
|
||||
|
||||
.. |docker-icon| raw:: html
|
||||
|
||||
<i class="fab fa-docker"></i>
|
||||
|
||||
AMD validates and publishes ready-made `TensorFlow
|
||||
<https://hub.docker.com/r/rocm/tensorflow>`_ images with ROCm backends on
|
||||
Docker Hub. The following Docker image tags and associated inventories are
|
||||
validated for `ROCm 6.3.1 <https://repo.radeon.com/rocm/apt/6.3.1/>`_. Click
|
||||
the |docker-icon| icon to view the image on Docker Hub.
|
||||
|
||||
.. list-table:: TensorFlow Docker image components
|
||||
:header-rows: 1
|
||||
|
||||
* - Docker image
|
||||
- TensorFlow
|
||||
- Dev
|
||||
- Python
|
||||
- TensorBoard
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.12-tf2.17.0-dev/images/sha256-804121ee4985718277ba7dcec53c57bdade130a1ef42f544b6c48090ad379c17"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.17.0-cp312-cp312-manylinux_2_28_x86_64.whl>`_
|
||||
- dev
|
||||
- `Python 3.12 <https://www.python.org/downloads/release/python-3124/>`_
|
||||
- `TensorBoard 2.17.1 <https://github.com/tensorflow/tensorboard/tree/2.17.1>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.10-tf2.17.0-dev/images/sha256-776837ffa945913f6c466bfe477810a11453d21d5b6afb200be1c36e48fbc08e"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.17.0 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.17.0-cp310-cp310-manylinux_2_28_x86_64.whl>`_
|
||||
- dev
|
||||
- `Python 3.10 <https://www.python.org/downloads/release/python-31012/>`_
|
||||
- `TensorBoard 2.17.0 <https://github.com/tensorflow/tensorboard/tree/2.17.0>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.12-tf2.16.2-dev/images/sha256-c793e1483e30809c3c28fc5d7805bedc033c73da224f839fff370717cb100944"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.16.2-cp312-cp312-manylinux_2_28_x86_64.whl>`_
|
||||
- dev
|
||||
- `Python 3.12 <https://www.python.org/downloads/release/python-3124/>`_
|
||||
- `TensorBoard 2.16.2 <https://github.com/tensorflow/tensorboard/tree/2.16.2>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.10-tf2.16.0-dev/images/sha256-263e78414ae85d7bcd52a025a94131d0a279872a45ed632b9165336dfdcd4443"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.16.2 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.16.2-cp310-cp310-manylinux_2_28_x86_64.whl>`_
|
||||
- dev
|
||||
- `Python 3.10 <https://www.python.org/downloads/release/python-31012/>`_
|
||||
- `TensorBoard 2.16.2 <https://github.com/tensorflow/tensorboard/tree/2.16.2>`_
|
||||
|
||||
* - .. raw:: html
|
||||
|
||||
<a href="https://hub.docker.com/layers/rocm/tensorflow/rocm6.3.1-py3.10-tf2.15.0-dev/images/sha256-479046a8477ca701a9494a813ab17e8ab4f6baa54641e65dc8d07629f1e6a880"><i class="fab fa-docker fa-lg"></i> rocm/tensorflow</a>
|
||||
|
||||
- `tensorflow-rocm 2.15.1 <https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3/tensorflow_rocm-2.15.1-cp310-cp310-manylinux_2_28_x86_64.whl>`_
|
||||
- dev
|
||||
- `Python 3.10 <https://www.python.org/downloads/release/python-31012/>`_
|
||||
- `TensorBoard 2.15.2 <https://github.com/tensorflow/tensorboard/tree/2.15.2>`_
|
||||
|
||||
Critical ROCm libraries for TensorFlow
|
||||
===============================================================================
|
||||
|
||||
TensorFlow depends on multiple components and the supported features of those
|
||||
components can affect the TensorFlow ROCm supported feature set. The versions
|
||||
in the following table refer to the first TensorFlow version where the ROCm
|
||||
library was introduced as a dependency.
|
||||
|
||||
.. list-table::
|
||||
:widths: 25, 10, 35, 30
|
||||
:header-rows: 1
|
||||
|
||||
* - ROCm library
|
||||
- Version
|
||||
- Purpose
|
||||
- Used in
|
||||
* - `hipBLAS <https://github.com/ROCm/hipBLAS>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated Basic Linear Algebra Subprograms (BLAS) for
|
||||
matrix and vector operations.
|
||||
- Accelerates operations like ``tf.matmul``, ``tf.linalg.matmul``, and
|
||||
other matrix multiplications commonly used in neural network layers.
|
||||
* - `hipBLASLt <https://github.com/ROCm/hipBLASLt>`_
|
||||
- 0.10.0
|
||||
- Extends hipBLAS with additional optimizations like fused kernels and
|
||||
integer tensor cores.
|
||||
- Optimizes matrix multiplications and linear algebra operations used in
|
||||
layers like dense, convolutional, and RNNs in TensorFlow.
|
||||
* - `hipCUB <https://github.com/ROCm/hipCUB>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms for reduction,
|
||||
scan, sort and select.
|
||||
- Supports operations like ``tf.reduce_sum``, ``tf.cumsum``, ``tf.sort``
|
||||
and other tensor operations in TensorFlow, especially those involving
|
||||
scanning, sorting, and filtering.
|
||||
* - `hipFFT <https://github.com/ROCm/hipFFT>`_
|
||||
- 1.0.17
|
||||
- Accelerates Fast Fourier Transforms (FFT) for signal processing tasks.
|
||||
- Used for operations like signal processing, image filtering, and
|
||||
certain types of neural networks requiring FFT-based transformations.
|
||||
* - `hipSOLVER <https://github.com/ROCm/hipSOLVER>`_
|
||||
- 2.3.0
|
||||
- Provides GPU-accelerated direct linear solvers for dense and sparse
|
||||
systems.
|
||||
- Optimizes linear algebra functions such as solving systems of linear
|
||||
equations, often used in optimization and training tasks.
|
||||
* - `hipSPARSE <https://github.com/ROCm/hipSPARSE>`_
|
||||
- 3.1.2
|
||||
- Optimizes sparse matrix operations for efficient computations on sparse
|
||||
data.
|
||||
- Accelerates sparse matrix operations in models with sparse weight
|
||||
matrices or activations, commonly used in neural networks.
|
||||
* - `MIOpen <https://github.com/ROCm/MIOpen>`_
|
||||
- 3.3.0
|
||||
- Provides optimized deep learning primitives such as convolutions,
|
||||
pooling,
|
||||
normalization, and activation functions.
|
||||
- Speeds up convolutional neural networks (CNNs) and other layers. Used
|
||||
in TensorFlow for layers like ``tf.nn.conv2d``, ``tf.nn.relu``, and
|
||||
``tf.nn.lstm_cell``.
|
||||
* - `RCCL <https://github.com/ROCm/rccl>`_
|
||||
- 2.21.5
|
||||
- Optimizes for multi-GPU communication for operations like AllReduce and
|
||||
Broadcast.
|
||||
- Distributed data parallel training (``tf.distribute.MirroredStrategy``).
|
||||
Handles communication in multi-GPU setups.
|
||||
* - `rocThrust <https://github.com/ROCm/rocThrust>`_
|
||||
- 3.3.0
|
||||
- Provides a C++ template library for parallel algorithms like sorting,
|
||||
reduction, and scanning.
|
||||
- Reduction operations like ``tf.reduce_sum``, ``tf.cumsum`` for computing
|
||||
the cumulative sum of elements along a given axis or ``tf.unique`` to
|
||||
finds unique elements in a tensor can use rocThrust.
|
||||
|
||||
Supported and unsupported features
|
||||
===============================================================================
|
||||
|
||||
The following section maps supported data types and GPU-accelerated TensorFlow
|
||||
features to their minimum supported ROCm and TensorFlow versions.
|
||||
|
||||
Data types
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The data type of a tensor is specified using the ``dtype`` attribute or
|
||||
argument, and TensorFlow supports a wide range of data types for different use
|
||||
cases.
|
||||
|
||||
The basic, single data types of `tf.dtypes <https://www.tensorflow.org/api_docs/python/tf/dtypes>`_
|
||||
are as follows:
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Data type
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
- Since ROCm
|
||||
* - ``bfloat16``
|
||||
- 16-bit bfloat (brain floating point).
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``bool``
|
||||
- Boolean.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``complex128``
|
||||
- 128-bit complex.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``complex64``
|
||||
- 64-bit complex.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``double``
|
||||
- 64-bit (double precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``float16``
|
||||
- 16-bit (half precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``float32``
|
||||
- 32-bit (single precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``float64``
|
||||
- 64-bit (double precision) floating-point.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``half``
|
||||
- 16-bit (half precision) floating-point.
|
||||
- 2.0.0
|
||||
- 2.0
|
||||
* - ``int16``
|
||||
- Signed 16-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``int32``
|
||||
- Signed 32-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``int64``
|
||||
- Signed 64-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``int8``
|
||||
- Signed 8-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``qint16``
|
||||
- Signed quantized 16-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``qint32``
|
||||
- Signed quantized 32-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``qint8``
|
||||
- Signed quantized 8-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``quint16``
|
||||
- Unsigned quantized 16-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``quint8``
|
||||
- Unsigned quantized 8-bit integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``resource``
|
||||
- Handle to a mutable, dynamically allocated resource.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``string``
|
||||
- Variable-length string, represented as byte array.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``uint16``
|
||||
- Unsigned 16-bit (word) integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``uint32``
|
||||
- Unsigned 32-bit (dword) integer.
|
||||
- 1.5.0
|
||||
- 1.7
|
||||
* - ``uint64``
|
||||
- Unsigned 64-bit (qword) integer.
|
||||
- 1.5.0
|
||||
- 1.7
|
||||
* - ``uint8``
|
||||
- Unsigned 8-bit (byte) integer.
|
||||
- 1.0.0
|
||||
- 1.7
|
||||
* - ``variant``
|
||||
- Data of arbitrary type (known at runtime).
|
||||
- 1.4.0
|
||||
- 1.7
|
||||
|
||||
Features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This table provides an overview of key features in TensorFlow and their
|
||||
availability in ROCm.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Module
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
- Since ROCm
|
||||
* - ``tf.linalg`` (Linear Algebra)
|
||||
- Operations for matrix and tensor computations, such as
|
||||
``tf.linalg.matmul`` (matrix multiplication), ``tf.linalg.inv``
|
||||
(matrix inversion) and ``tf.linalg.cholesky`` (Cholesky decomposition).
|
||||
These leverage GPUs for high-performance linear algebra operations.
|
||||
- 1.4
|
||||
- 1.8.2
|
||||
* - ``tf.nn`` (Neural Network Operations)
|
||||
- GPU-accelerated building blocks for deep learning models, such as 2D
|
||||
convolutions with ``tf.nn.conv2d``, max pooling operations with
|
||||
``tf.nn.max_pool``, activation functions like ``tf.nn.relu`` or softmax
|
||||
for output layers with ``tf.nn.softmax``.
|
||||
- 1.0
|
||||
- 1.8.2
|
||||
* - ``tf.image`` (Image Processing)
|
||||
- GPU-accelerated functions for image preprocessing and augmentations,
|
||||
such as resize images with ``tf.image.resize``, flip images horizontally
|
||||
with ``tf.image.flip_left_right`` and adjust image brightness randomly
|
||||
with ``tf.image.random_brightness``.
|
||||
- 1.1
|
||||
- 1.8.2
|
||||
* - ``tf.keras`` (High-Level API)
|
||||
- GPU acceleration for Keras layers and models, including dense layers
|
||||
(``tf.keras.layers.Dense``), convolutional layers
|
||||
(``tf.keras.layers.Conv2D``) and recurrent layers
|
||||
(``tf.keras.layers.LSTM``).
|
||||
- 1.4
|
||||
- 1.8.2
|
||||
* - ``tf.math`` (Mathematical Operations)
|
||||
- GPU-accelerated mathematical operations, such as sum across dimensions
|
||||
with ``tf.math.reduce_sum``, elementwise exponentiation with
|
||||
``tf.math.exp`` and sigmoid activation (``tf.math.sigmoid``).
|
||||
- 1.5
|
||||
- 1.8.2
|
||||
* - ``tf.signal`` (Signal Processing)
|
||||
- Functions for spectral analysis and signal transformations.
|
||||
- 1.13
|
||||
- 2.1
|
||||
* - ``tf.data`` (Data Input Pipeline)
|
||||
- GPU-accelerated data preprocessing for efficient input pipelines,
|
||||
Prefetching with ``tf.data.experimental.AUTOTUNE``. GPU-enabled
|
||||
transformations like map and batch.
|
||||
- 1.4
|
||||
- 1.8.2
|
||||
* - ``tf.distribute`` (Distributed Training)
|
||||
- Enabling to scale computations across multiple devices on a single
|
||||
machine or across multiple machines.
|
||||
- 1.13
|
||||
- 2.1
|
||||
* - ``tf.random`` (Random Number Generation)
|
||||
- GPU-accelerated random number generation
|
||||
- 1.12
|
||||
- 1.9.2
|
||||
* - ``tf.TensorArray`` (Dynamic Array Operations)
|
||||
- Enables dynamic tensor manipulation on GPUs.
|
||||
- 1.0
|
||||
- 1.8.2
|
||||
* - ``tf.sparse`` (Sparse Tensor Operations)
|
||||
- GPU-accelerated sparse matrix manipulations.
|
||||
- 1.9
|
||||
- 1.9.0
|
||||
* - ``tf.experimental.numpy``
|
||||
- GPU-accelerated NumPy-like API for numerical computations.
|
||||
- 2.4
|
||||
- 4.1.1
|
||||
* - ``tf.RaggedTensor``
|
||||
- Handling of variable-length sequences and ragged tensors with GPU
|
||||
support.
|
||||
- 1.13
|
||||
- 2.1
|
||||
* - ``tf.function`` with XLA (Accelerated Linear Algebra)
|
||||
- Enable GPU-accelerated functions in optimization.
|
||||
- 1.14
|
||||
- 2.4
|
||||
* - ``tf.quantization``
|
||||
- Quantized operations for inference, accelerated on GPUs.
|
||||
- 1.12
|
||||
- 1.9.2
|
||||
|
||||
Distributed library features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Enables developers to scale computations across multiple devices on a single machine or
|
||||
across multiple machines.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
- Since ROCm
|
||||
* - ``MultiWorkerMirroredStrategy``
|
||||
- Synchronous training across multiple workers using mirrored variables.
|
||||
- 2.0
|
||||
- 3.0
|
||||
* - ``MirroredStrategy``
|
||||
- Synchronous training across multiple GPUs on one machine.
|
||||
- 1.5
|
||||
- 2.5
|
||||
* - ``TPUStrategy``
|
||||
- Efficiently trains models on Google TPUs.
|
||||
- 1.9
|
||||
- ❌
|
||||
* - ``ParameterServerStrategy``
|
||||
- Asynchronous training using parameter servers for variable management.
|
||||
- 2.1
|
||||
- 4.0
|
||||
* - ``CentralStorageStrategy``
|
||||
- Keeps variables on a single device and performs computation on multiple
|
||||
devices.
|
||||
- 2.3
|
||||
- 4.1
|
||||
* - ``CollectiveAllReduceStrategy``
|
||||
- Synchronous training across multiple devices and hosts.
|
||||
- 1.14
|
||||
- 3.5
|
||||
* - Distribution Strategies API
|
||||
- High-level API to simplify distributed training configuration and
|
||||
execution.
|
||||
- 1.10
|
||||
- 3.0
|
||||
|
||||
Unsupported TensorFlow features
|
||||
===============================================================================
|
||||
|
||||
The following are GPU-accelerated TensorFlow features not currently supported by
|
||||
ROCm.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Feature
|
||||
- Description
|
||||
- Since TensorFlow
|
||||
* - Mixed Precision with TF32
|
||||
- Mixed precision with TF32 is used for matrix multiplications,
|
||||
convolutions, and other linear algebra operations, particularly in
|
||||
deep learning workloads like CNNs and transformers.
|
||||
- 2.4
|
||||
* - ``tf.distribute.TPUStrategy``
|
||||
- Efficiently trains models on Google TPUs.
|
||||
- 1.9
|
||||
|
||||
Use cases and recommendations
|
||||
===============================================================================
|
||||
|
||||
* The `Training a Neural Collaborative Filtering (NCF) Recommender on an AMD
|
||||
GPU <https://rocm.blogs.amd.com/artificial-intelligence/ncf/README.html>`_
|
||||
blog post discusses training an NCF recommender system using TensorFlow. It
|
||||
explains how NCF improves traditional collaborative filtering methods by
|
||||
leveraging neural networks to model non-linear user-item interactions. The
|
||||
post outlines the implementation using the recommenders library, focusing on
|
||||
the use of implicit data (for example, user interactions like viewing or
|
||||
purchasing) and how it addresses challenges like the lack of negative values.
|
||||
|
||||
* The `Creating a PyTorch/TensorFlow code environment on AMD GPUs
|
||||
<https://rocm.blogs.amd.com/software-tools-optimization/pytorch-tensorflow-env/README.html>`_
|
||||
blog post provides instructions for creating a machine learning environment
|
||||
for PyTorch and TensorFlow on AMD GPUs using ROCm. It covers steps like
|
||||
installing the libraries, cloning code repositories, installing dependencies,
|
||||
and troubleshooting potential issues with CUDA-based code. Additionally, it
|
||||
explains how to HIPify code (port CUDA code to HIP) and manage Docker images
|
||||
for a better experience on AMD GPUs. This guide aims to help data scientists
|
||||
and ML practitioners adapt their code for AMD GPUs.
|
||||
|
||||
For more use cases and recommendations, see the `ROCm Tensorflow blog posts <https://rocm.blogs.amd.com/blog/tag/tensorflow.html>`_.
|
||||
@@ -615,7 +615,6 @@ The following table shows the hardware counters *by* all texture addressing unit
|
||||
"``TA_FLAT_READ_WAVEFRONTS_sum``", "Sum of flat opcode reads processed"
|
||||
"``TA_FLAT_WRITE_WAVEFRONTS_sum``", "Sum of flat opcode writes processed"
|
||||
"``TA_FLAT_WAVEFRONTS_sum``", "Total number of flat opcode wavefronts processed"
|
||||
"``TA_FLAT_READ_WAVEFRONTS_sum``", "Total number of flat opcode read wavefronts processed"
|
||||
"``TA_FLAT_ATOMIC_WAVEFRONTS_sum``", "Total number of flat opcode atomic wavefronts processed"
|
||||
"``TA_TOTAL_WAVEFRONTS_sum``", "Total number of wavefronts processed"
|
||||
|
||||
|
||||
77
docs/conf.py
77
docs/conf.py
@@ -29,7 +29,7 @@ if os.environ.get("READTHEDOCS", "") == "True":
|
||||
# configurations for PDF output by Read the Docs
|
||||
project = "ROCm Documentation"
|
||||
author = "Advanced Micro Devices, Inc."
|
||||
copyright = "Copyright (c) 2024 Advanced Micro Devices, Inc. All rights reserved."
|
||||
copyright = "Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved."
|
||||
version = "6.3.1"
|
||||
release = "6.3.1"
|
||||
setting_all_article_info = True
|
||||
@@ -39,50 +39,38 @@ all_article_info_author = ""
|
||||
# pages with specific settings
|
||||
article_pages = [
|
||||
{"file": "about/release-notes", "os": ["linux", "windows"], "date": "2024-12-20"},
|
||||
{"file": "compatibility/ml-compatibility/pytorch-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/tensorflow-compatibility", "os": ["linux"]},
|
||||
{"file": "compatibility/ml-compatibility/jax-compatibility", "os": ["linux"]},
|
||||
{"file": "how-to/deep-learning-rocm", "os": ["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/install", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/train-a-model", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/accelerate-training", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/deploy-your-model", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/hugging-face-models", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-hpc/index", "os": ["linux"]},
|
||||
{"file": "how-to/llm-fine-tuning-optimization/index", "os": ["linux"]},
|
||||
{"file": "how-to/llm-fine-tuning-optimization/overview", "os": ["linux"]},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/fine-tuning-and-inference",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/multi-gpu-fine-tuning-and-inference",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/llm-inference-frameworks",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/model-acceleration-libraries",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{"file": "how-to/llm-fine-tuning-optimization/model-quantization", "os": ["linux"]},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/optimizing-with-composable-kernel",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/optimizing-triton-kernel",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{
|
||||
"file": "how-to/llm-fine-tuning-optimization/profiling-and-debugging",
|
||||
"os": ["linux"],
|
||||
},
|
||||
{"file": "how-to/performance-validation/mi300x/vllm-benchmark", "os": ["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/training/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/train-a-model", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/training/scale-model-training", "os": ["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/overview", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/fine-tuning-and-inference", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference", "os": ["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/inference/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/install", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/hugging-face-models", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/llm-inference-frameworks", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/vllm-benchmark", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference/deploy-your-model", "os": ["linux"]},
|
||||
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/index", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/model-quantization", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/model-acceleration-libraries", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/optimizing-with-composable-kernel", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/optimizing-triton-kernel", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/profiling-and-debugging", "os": ["linux"]},
|
||||
{"file": "how-to/rocm-for-ai/inference-optimization/workload", "os": ["linux"]},
|
||||
|
||||
{"file": "how-to/system-optimization/index", "os": ["linux"]},
|
||||
{"file": "how-to/system-optimization/mi300x", "os": ["linux"]},
|
||||
{"file": "how-to/system-optimization/mi200", "os": ["linux"]},
|
||||
@@ -101,6 +89,9 @@ extensions = ["rocm_docs", "sphinx_reredirects", "sphinx_sitemap"]
|
||||
|
||||
external_projects_current_project = "rocm"
|
||||
|
||||
# Uncomment if facing rate limit exceed issue with local build
|
||||
# external_projects_remote_repository = ""
|
||||
|
||||
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "https://rocm-stg.amd.com/")
|
||||
html_context = {}
|
||||
if os.environ.get("READTHEDOCS", "") == "True":
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 13 KiB |
BIN
docs/data/rocm-software-stack-6_3_2.jpg
Normal file
BIN
docs/data/rocm-software-stack-6_3_2.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 346 KiB |
@@ -11,20 +11,24 @@ ROCm provides a comprehensive ecosystem for deep learning development, including
|
||||
deep learning frameworks and libraries such as PyTorch, TensorFlow, and JAX. ROCm works closely with these
|
||||
frameworks to ensure that framework-specific optimizations take advantage of AMD accelerator and GPU architectures.
|
||||
|
||||
The following guides cover installation processes for ROCm-aware deep learning frameworks.
|
||||
The following guides provide information on compatibility and supported
|
||||
features for these ROCm-enabled deep learning frameworks.
|
||||
|
||||
* :doc:`PyTorch for ROCm <rocm-install-on-linux:install/3rd-party/pytorch-install>`
|
||||
* :doc:`TensorFlow for ROCm <rocm-install-on-linux:install/3rd-party/tensorflow-install>`
|
||||
* :doc:`JAX for ROCm <rocm-install-on-linux:install/3rd-party/jax-install>`
|
||||
* :doc:`PyTorch compatibility <../compatibility/ml-compatibility/pytorch-compatibility>`
|
||||
* :doc:`TensorFlow compatibility <../compatibility/ml-compatibility/tensorflow-compatibility>`
|
||||
* :doc:`JAX compatibility <../compatibility/ml-compatibility/jax-compatibility>`
|
||||
|
||||
The following chart steps through typical installation workflows for installing deep learning frameworks for ROCm.
|
||||
This chart steps through typical installation workflows for installing deep learning frameworks for ROCm.
|
||||
|
||||
.. image:: ../data/how-to/framework_install_2024_07_04.png
|
||||
:alt: Flowchart for installing ROCm-aware machine learning frameworks
|
||||
:align: center
|
||||
|
||||
Find information on version compatibility and framework release notes in :doc:`Third-party support matrix
|
||||
<rocm-install-on-linux:reference/3rd-party-support-matrix>`.
|
||||
See the installation instructions to get started.
|
||||
|
||||
* :doc:`PyTorch for ROCm <rocm-install-on-linux:install/3rd-party/pytorch-install>`
|
||||
* :doc:`TensorFlow for ROCm <rocm-install-on-linux:install/3rd-party/tensorflow-install>`
|
||||
* :doc:`JAX for ROCm <rocm-install-on-linux:install/3rd-party/jax-install>`
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -35,4 +39,11 @@ through the following guides.
|
||||
|
||||
* :doc:`rocm-for-ai/index`
|
||||
|
||||
* :doc:`llm-fine-tuning-optimization/index`
|
||||
* :doc:`Training <rocm-for-ai/training/index>`
|
||||
|
||||
* :doc:`Fine-tuning LLMs <rocm-for-ai/fine-tuning/index>`
|
||||
|
||||
* :doc:`Inference <rocm-for-ai/inference/index>`
|
||||
|
||||
* :doc:`Inference optimization <rocm-for-ai/inference-optimization/index>`
|
||||
|
||||
|
||||
@@ -1,264 +0,0 @@
|
||||
.. meta::
|
||||
:description: GPU-enabled Message Passing Interface
|
||||
:keywords: Message Passing Interface, MPI, AMD, ROCm
|
||||
|
||||
***************************************************************************************************
|
||||
GPU-enabled Message Passing Interface
|
||||
***************************************************************************************************
|
||||
|
||||
The Message Passing Interface (`MPI <https://www.mpi-forum.org>`_) is a standard API for distributed
|
||||
and parallel application development that can scale to multi-node clusters. To facilitate the porting of
|
||||
applications to clusters with GPUs, ROCm enables various technologies. You can use these
|
||||
technologies add GPU pointers to MPI calls and enable ROCm-aware MPI libraries to deliver optimal
|
||||
performance for both intra-node and inter-node GPU-to-GPU communication.
|
||||
|
||||
The AMD kernel driver exposes remote direct memory access (RDMA) through *PeerDirect* interfaces.
|
||||
This allows network interface cards (NICs) to directly read and write to RDMA-capable GPU device
|
||||
memory, resulting in high-speed direct memory access (DMA) transfers between GPU and NIC. These
|
||||
interfaces are used to optimize inter-node MPI message communication.
|
||||
|
||||
The Open MPI project is an open source implementation of the MPI. It's developed and maintained by
|
||||
a consortium of academic, research, and industry partners. To compile Open MPI with ROCm support,
|
||||
refer to the following sections:
|
||||
|
||||
* :ref:`open-mpi-ucx`
|
||||
* :ref:`open-mpi-libfabric`
|
||||
|
||||
.. _open-mpi-ucx:
|
||||
|
||||
ROCm-aware Open MPI on InfiniBand and RoCE networks using UCX
|
||||
================================================================
|
||||
|
||||
The `Unified Communication Framework <https://www.openucx.org/documentation>`_ (UCX), is an
|
||||
open source, cross-platform framework designed to provide a common set of communication
|
||||
interfaces for various network programming models and interfaces. UCX uses ROCm technologies to
|
||||
implement various network operation primitives. UCX is the standard communication library for
|
||||
InfiniBand and RDMA over Converged Ethernet (RoCE) network interconnect. To optimize data
|
||||
transfer operations, many MPI libraries, including Open MPI, can leverage UCX internally.
|
||||
|
||||
UCX and Open MPI have a compile option to enable ROCm support. To install and configure UCX to compile Open MPI for ROCm, use the following instructions.
|
||||
|
||||
1. Set environment variables to install all software components in the same base directory. We use the
|
||||
home directory in our example, but you can specify a different location if you want.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export INSTALL_DIR=$HOME/ompi_for_gpu
|
||||
export BUILD_DIR=/tmp/ompi_for_gpu_build
|
||||
mkdir -p $BUILD_DIR
|
||||
|
||||
2. Install UCX. To view UCX and ROCm version compatibility, refer to the
|
||||
`communication libraries tables <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/3rd-party-support-matrix.html>`_
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export UCX_DIR=$INSTALL_DIR/ucx
|
||||
cd $BUILD_DIR
|
||||
git clone https://github.com/openucx/ucx.git -b v1.15.x
|
||||
cd ucx
|
||||
./autogen.sh
|
||||
mkdir build
|
||||
cd build
|
||||
../configure -prefix=$UCX_DIR \
|
||||
--with-rocm=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make -j $(nproc) install
|
||||
|
||||
3. Install Open MPI.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OMPI_DIR=$INSTALL_DIR/ompi
|
||||
cd $BUILD_DIR
|
||||
git clone --recursive https://github.com/open-mpi/ompi.git \
|
||||
-b v5.0.x
|
||||
cd ompi
|
||||
./autogen.pl
|
||||
mkdir build
|
||||
cd build
|
||||
../configure --prefix=$OMPI_DIR --with-ucx=$UCX_DIR \
|
||||
--with-rocm=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make install
|
||||
|
||||
.. _rocm-enabled-osu:
|
||||
|
||||
ROCm-enabled OSU benchmarks
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
You can use OSU Micro Benchmarks (OMB) to evaluate the performance of various primitives on
|
||||
ROCm-supported AMD GPUs. The ``--enable-rocm`` option exposes this functionality.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OSU_DIR=$INSTALL_DIR/osu
|
||||
cd $BUILD_DIR
|
||||
wget http://mvapich.cse.ohio-state.edu/download/mvapich/osu-micro-benchmarks-7.2.tar.gz
|
||||
tar xfz osu-micro-benchmarks-7.2.tar.gz
|
||||
cd osu-micro-benchmarks-7.2
|
||||
./configure --enable-rocm \
|
||||
--with-rocm=/opt/rocm \
|
||||
CC=$OMPI_DIR/bin/mpicc CXX=$OMPI_DIR/bin/mpicxx \
|
||||
LDFLAGS="-L$OMPI_DIR/lib/ -lmpi -L/opt/rocm/lib/ \
|
||||
$(hipconfig -C) -lamdhip64" CXXFLAGS="-std=c++11"
|
||||
make -j $(nproc)
|
||||
|
||||
Intra-node run
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Before running an Open MPI job, you must set the following environment variables to ensure that
|
||||
you're using the correct versions of Open MPI and UCX.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export LD_LIBRARY_PATH=$OMPI_DIR/lib:$UCX_DIR/lib:/opt/rocm/lib
|
||||
export PATH=$OMPI_DIR/bin:$PATH
|
||||
|
||||
To run the OSU bandwidth benchmark between the first two GPU devices (``GPU 0`` and ``GPU 1``)
|
||||
inside the same node, use the following code.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$OMPI_DIR/bin/mpirun -np 2 \
|
||||
-x UCX_TLS=sm,self,rocm \
|
||||
--mca pml ucx \
|
||||
./c/mpi/pt2pt/standard/osu_bw D D
|
||||
|
||||
This measures the unidirectional bandwidth from the first device (``GPU 0``) to the second device
|
||||
(``GPU 1``). To select specific devices, for example ``GPU 2`` and ``GPU 3``, include the following
|
||||
command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export HIP_VISIBLE_DEVICES=2,3
|
||||
|
||||
To force using a copy kernel instead of a DMA engine for the data transfer, use the following
|
||||
command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export HSA_ENABLE_SDMA=0
|
||||
|
||||
The following output shows the effective transfer bandwidth measured for inter-die data transfer
|
||||
between ``GPU 2`` and ``GPU 3`` on a system with MI250 GPUs. For messages larger than 67 MB, an effective
|
||||
utilization of about 150 GB/sec is achieved:
|
||||
|
||||
.. image:: ../data/how-to/gpu-enabled-mpi-1.png
|
||||
:width: 400
|
||||
:alt: Inter-GPU bandwidth for various payload sizes
|
||||
|
||||
Collective operations
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Collective operations on GPU buffers are best handled through the Unified Collective Communication
|
||||
(UCC) library component in Open MPI. To accomplish this, you must configure and compile the UCC
|
||||
library with ROCm support.
|
||||
|
||||
.. note::
|
||||
|
||||
You can verify UCC and ROCm version compatibility using the
|
||||
`communication libraries tables <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/3rd-party-support-matrix.html>`_
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export UCC_DIR=$INSTALL_DIR/ucc
|
||||
git clone https://github.com/openucx/ucc.git -b v1.2.x
|
||||
cd ucc
|
||||
./autogen.sh
|
||||
./configure --with-rocm=/opt/rocm \
|
||||
--with-ucx=$UCX_DIR \
|
||||
--prefix=$UCC_DIR
|
||||
make -j && make install
|
||||
|
||||
# Configure and compile Open MPI with UCX, UCC, and ROCm support
|
||||
cd ompi
|
||||
./configure --with-rocm=/opt/rocm \
|
||||
--with-ucx=$UCX_DIR \
|
||||
--with-ucc=$UCC_DIR
|
||||
--prefix=$OMPI_DIR
|
||||
|
||||
To use the UCC component with an MPI application, you must set additional parameters:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
mpirun --mca pml ucx --mca osc ucx \
|
||||
--mca coll_ucc_enable 1 \
|
||||
--mca coll_ucc_priority 100 -np 64 ./my_mpi_app
|
||||
|
||||
.. _open-mpi-libfabric:
|
||||
|
||||
ROCm-aware Open MPI using libfabric
|
||||
================================================================
|
||||
|
||||
For network interconnects that are not covered in the previous category, such as HPE Slingshot,
|
||||
ROCm-aware communication can often be achieved through the libfabric library. For more information,
|
||||
refer to the `libfabric documentation <https://github.com/ofiwg/libfabric/wiki>`_.
|
||||
|
||||
.. note::
|
||||
|
||||
When using Open MPI v5.0.x with libfabric support, shared memory communication between
|
||||
processes on the same node goes through the *ob1/sm* component. This component has
|
||||
fundamental support for GPU memory that is, accomplished by using a staging host buffer
|
||||
Consequently, the performance of device-to-device shared memory communication is lower than
|
||||
the theoretical peak performance allowed by the GPU-to-GPU interconnect.
|
||||
|
||||
1. Install libfabric. Note that libfabric is often pre-installed. To determine if it's already installed, run:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
module avail libfabric
|
||||
|
||||
Alternatively, you can download and compile libfabric with ROCm support. Note that not all
|
||||
components required to support some networks (e.g., HPE Slingshot) are available in the open source
|
||||
repository. Therefore, using a pre-installed libfabric library is strongly recommended over compiling
|
||||
libfabric manually.
|
||||
|
||||
If a pre-compiled libfabric library is available on your system, you can skip the following step.
|
||||
|
||||
2. Compile libfabric with ROCm support.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OFI_DIR=$INSTALL_DIR/ofi
|
||||
cd $BUILD_DIR
|
||||
git clone https://github.com/ofiwg/libfabric.git -b v1.19.x
|
||||
cd libfabric
|
||||
./autogen.sh
|
||||
./configure --prefix=$OFI_DIR \
|
||||
--with-rocr=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make install
|
||||
|
||||
Installing Open MPI with libfabric support
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
To build Open MPI with libfabric, use the following code:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OMPI_DIR=$INSTALL_DIR/ompi
|
||||
cd $BUILD_DIR
|
||||
git clone --recursive https://github.com/open-mpi/ompi.git \
|
||||
-b v5.0.x
|
||||
cd ompi
|
||||
./autogen.pl
|
||||
mkdir build
|
||||
cd build
|
||||
../configure --prefix=$OMPI_DIR --with-ofi=$OFI_DIR \
|
||||
--with-rocm=/opt/rocm
|
||||
make -j $(nproc)
|
||||
make install
|
||||
|
||||
ROCm-aware OSU with Open MPI and libfabric
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Compiling a ROCm-aware version of OSU benchmarks with Open MPI and libfabric uses the same
|
||||
process described in :ref:`rocm-enabled-osu`.
|
||||
|
||||
To run an OSU benchmark using multiple nodes, use the following code:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export LD_LIBRARY_PATH=$OMPI_DIR/lib:$OFI_DIR/lib64:/opt/rocm/lib
|
||||
$OMPI_DIR/bin/mpirun --mca pml ob1 --mca btl_ofi_mode 2 -np 2 \
|
||||
./c/mpi/pt2pt/standard/osu_bw D D
|
||||
@@ -1,37 +0,0 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial
|
||||
|
||||
*******************************************
|
||||
Fine-tuning LLMs and inference optimization
|
||||
*******************************************
|
||||
|
||||
ROCm empowers the fine-tuning and optimization of large language models, making them accessible and efficient for
|
||||
specialized tasks. ROCm supports the broader AI ecosystem to ensure seamless integration with open frameworks,
|
||||
models, and tools.
|
||||
|
||||
For more information, see `What is ROCm? <https://rocm.docs.amd.com/en/latest/what-is-rocm.html>`_
|
||||
|
||||
Throughout the following topics, this guide discusses the goals and :ref:`challenges of fine-tuning a large language
|
||||
model <fine-tuning-llms-concept-challenge>` like Llama 2. Then, it introduces :ref:`common methods of optimizing your
|
||||
fine-tuning <fine-tuning-llms-concept-optimizations>` using techniques like LoRA with libraries like PEFT. In the
|
||||
sections that follow, you'll find practical guides on libraries and tools to accelerate your fine-tuning.
|
||||
|
||||
- :doc:`Conceptual overview of fine-tuning LLMs <overview>`
|
||||
|
||||
- :doc:`Fine-tuning and inference <fine-tuning-and-inference>` using a
|
||||
:doc:`single-accelerator <single-gpu-fine-tuning-and-inference>` or
|
||||
:doc:`multi-accelerator <multi-gpu-fine-tuning-and-inference>` system.
|
||||
|
||||
- :doc:`Model quantization <model-quantization>`
|
||||
|
||||
- :doc:`Model acceleration libraries <model-acceleration-libraries>`
|
||||
|
||||
- :doc:`LLM inference frameworks <llm-inference-frameworks>`
|
||||
|
||||
- :doc:`Optimizing with Composable Kernel <optimizing-with-composable-kernel>`
|
||||
|
||||
- :doc:`Optimizing Triton kernels <optimizing-triton-kernel>`
|
||||
|
||||
- :doc:`Profiling and debugging <profiling-and-debugging>`
|
||||
|
||||
@@ -1,407 +0,0 @@
|
||||
.. meta::
|
||||
:description: Learn how to validate LLM inference performance on MI300X accelerators using AMD MAD and the unified
|
||||
ROCm Docker image.
|
||||
:keywords: model, MAD, automation, dashboarding, validate
|
||||
|
||||
***********************************************************
|
||||
LLM inference performance validation on AMD Instinct MI300X
|
||||
***********************************************************
|
||||
|
||||
.. _vllm-benchmark-unified-docker:
|
||||
|
||||
The `ROCm vLLM Docker <https://hub.docker.com/r/rocm/vllm/tags>`_ image offers
|
||||
a prebuilt, optimized environment designed for validating large language model
|
||||
(LLM) inference performance on the AMD Instinct™ MI300X accelerator. This
|
||||
ROCm vLLM Docker image integrates vLLM and PyTorch tailored specifically for the
|
||||
MI300X accelerator and includes the following components:
|
||||
|
||||
* `ROCm 6.2.1 <https://github.com/ROCm/ROCm>`_
|
||||
|
||||
* `vLLM 0.6.4 <https://docs.vllm.ai/en/latest>`_
|
||||
|
||||
* `PyTorch 2.5.0 <https://github.com/pytorch/pytorch>`_
|
||||
|
||||
* Tuning files (in CSV format)
|
||||
|
||||
With this Docker image, you can quickly validate the expected inference
|
||||
performance numbers on the MI300X accelerator. This topic also provides tips on
|
||||
optimizing performance with popular AI models.
|
||||
|
||||
.. hlist::
|
||||
:columns: 6
|
||||
|
||||
* Llama 3.1 8B
|
||||
|
||||
* Llama 3.1 70B
|
||||
|
||||
* Llama 3.1 405B
|
||||
|
||||
* Llama 2 7B
|
||||
|
||||
* Llama 2 70B
|
||||
|
||||
* Mixtral 8x7B
|
||||
|
||||
* Mixtral 8x22B
|
||||
|
||||
* Mixtral 7B
|
||||
|
||||
* Qwen2 7B
|
||||
|
||||
* Qwen2 72B
|
||||
|
||||
* JAIS 13B
|
||||
|
||||
* JAIS 30B
|
||||
|
||||
.. _vllm-benchmark-vllm:
|
||||
|
||||
.. note::
|
||||
|
||||
vLLM is a toolkit and library for LLM inference and serving. AMD implements
|
||||
high-performance custom kernels and modules in vLLM to enhance performance.
|
||||
See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for
|
||||
more information.
|
||||
|
||||
Getting started
|
||||
===============
|
||||
|
||||
Use the following procedures to reproduce the benchmark results on an
|
||||
MI300X accelerator with the prebuilt vLLM Docker image.
|
||||
|
||||
.. _vllm-benchmark-get-started:
|
||||
|
||||
1. Disable NUMA auto-balancing.
|
||||
|
||||
To optimize performance, disable automatic NUMA balancing. Otherwise, the GPU
|
||||
might hang until the periodic balancing is finalized. For more information,
|
||||
see :ref:`AMD Instinct MI300X system optimization <mi300x-disable-numa>`.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# disable automatic NUMA balancing
|
||||
sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
|
||||
# check if NUMA balancing is disabled (returns 0 if disabled)
|
||||
cat /proc/sys/kernel/numa_balancing
|
||||
0
|
||||
|
||||
2. Download the :ref:`ROCm vLLM Docker image <vllm-benchmark-unified-docker>`.
|
||||
|
||||
Use the following command to pull the Docker image from Docker Hub.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker pull rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
|
||||
|
||||
Once setup is complete, you can choose between two options to reproduce the
|
||||
benchmark results:
|
||||
|
||||
- :ref:`MAD-integrated benchmarking <vllm-benchmark-mad>`
|
||||
|
||||
- :ref:`Standalone benchmarking <vllm-benchmark-standalone>`
|
||||
|
||||
.. _vllm-benchmark-mad:
|
||||
|
||||
MAD-integrated benchmarking
|
||||
===========================
|
||||
|
||||
Clone the ROCm Model Automation and Dashboarding (`<https://github.com/ROCm/MAD>`__) repository to a local
|
||||
directory and install the required packages on the host machine.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://github.com/ROCm/MAD
|
||||
cd MAD
|
||||
pip install -r requirements.txt
|
||||
|
||||
Use this command to run a performance benchmark test of the Llama 3.1 8B model
|
||||
on one GPU with ``float16`` data type in the host machine.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models"
|
||||
python3 tools/run_models.py --tags pyt_vllm_llama-3.1-8b --keep-model-dir --live-output --timeout 28800
|
||||
|
||||
ROCm MAD launches a Docker container with the name
|
||||
``container_ci-pyt_vllm_llama-3.1-8b``. The latency and throughput reports of the
|
||||
model are collected in the following path: ``~/MAD/reports_float16/``.
|
||||
|
||||
Although the following models are preconfigured to collect latency and
|
||||
throughput performance data, you can also change the benchmarking parameters.
|
||||
Refer to the :ref:`Standalone benchmarking <vllm-benchmark-standalone>` section.
|
||||
|
||||
Available models
|
||||
----------------
|
||||
|
||||
.. hlist::
|
||||
:columns: 3
|
||||
|
||||
* ``pyt_vllm_llama-3.1-8b``
|
||||
|
||||
* ``pyt_vllm_llama-3.1-70b``
|
||||
|
||||
* ``pyt_vllm_llama-3.1-405b``
|
||||
|
||||
* ``pyt_vllm_llama-2-7b``
|
||||
|
||||
* ``pyt_vllm_llama-2-70b``
|
||||
|
||||
* ``pyt_vllm_mixtral-8x7b``
|
||||
|
||||
* ``pyt_vllm_mixtral-8x22b``
|
||||
|
||||
* ``pyt_vllm_mistral-7b``
|
||||
|
||||
* ``pyt_vllm_qwen2-7b``
|
||||
|
||||
* ``pyt_vllm_qwen2-72b``
|
||||
|
||||
* ``pyt_vllm_jais-13b``
|
||||
|
||||
* ``pyt_vllm_jais-30b``
|
||||
|
||||
* ``pyt_vllm_llama-3.1-8b_fp8``
|
||||
|
||||
* ``pyt_vllm_llama-3.1-70b_fp8``
|
||||
|
||||
* ``pyt_vllm_llama-3.1-405b_fp8``
|
||||
|
||||
* ``pyt_vllm_mixtral-8x7b_fp8``
|
||||
|
||||
* ``pyt_vllm_mixtral-8x22b_fp8``
|
||||
|
||||
.. _vllm-benchmark-standalone:
|
||||
|
||||
Standalone benchmarking
|
||||
=======================
|
||||
|
||||
You can run the vLLM benchmark tool independently by starting the
|
||||
:ref:`Docker container <vllm-benchmark-get-started>` as shown in the following
|
||||
snippet.
|
||||
|
||||
.. code-block::
|
||||
|
||||
docker pull rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
|
||||
docker run -it --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128G --security-opt seccomp=unconfined --security-opt apparmor=unconfined --cap-add=SYS_PTRACE -v $(pwd):/workspace --env HUGGINGFACE_HUB_CACHE=/workspace --name vllm_v0.6.4 rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
|
||||
|
||||
In the Docker container, clone the ROCm MAD repository and navigate to the
|
||||
benchmark scripts directory at ``~/MAD/scripts/vllm``.
|
||||
|
||||
.. code-block::
|
||||
|
||||
git clone https://github.com/ROCm/MAD
|
||||
cd MAD/scripts/vllm
|
||||
|
||||
Command
|
||||
-------
|
||||
|
||||
To start the benchmark, use the following command with the appropriate options.
|
||||
See :ref:`Options <vllm-benchmark-standalone-options>` for the list of
|
||||
options and their descriptions.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
./vllm_benchmark_report.sh -s $test_option -m $model_repo -g $num_gpu -d $datatype
|
||||
|
||||
See the :ref:`examples <vllm-benchmark-run-benchmark>` for more information.
|
||||
|
||||
.. note::
|
||||
|
||||
The input sequence length, output sequence length, and tensor parallel (TP) are
|
||||
already configured. You don't need to specify them with this script.
|
||||
|
||||
.. note::
|
||||
|
||||
If you encounter the following error, pass your access-authorized Hugging
|
||||
Face token to the gated models.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
OSError: You are trying to access a gated repo.
|
||||
|
||||
# pass your HF_TOKEN
|
||||
export HF_TOKEN=$your_personal_hf_token
|
||||
|
||||
.. _vllm-benchmark-standalone-options:
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:align: center
|
||||
|
||||
* - Name
|
||||
- Options
|
||||
- Description
|
||||
|
||||
* - ``$test_option``
|
||||
- latency
|
||||
- Measure decoding token latency
|
||||
|
||||
* -
|
||||
- throughput
|
||||
- Measure token generation throughput
|
||||
|
||||
* -
|
||||
- all
|
||||
- Measure both throughput and latency
|
||||
|
||||
* - ``$model_repo``
|
||||
- ``meta-llama/Meta-Llama-3.1-8B-Instruct``
|
||||
- Llama 3.1 8B
|
||||
|
||||
* - (``float16``)
|
||||
- ``meta-llama/Meta-Llama-3.1-70B-Instruct``
|
||||
- Llama 3.1 70B
|
||||
|
||||
* -
|
||||
- ``meta-llama/Meta-Llama-3.1-405B-Instruct``
|
||||
- Llama 3.1 405B
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-2-7b-chat-hf``
|
||||
- Llama 2 7B
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-2-70b-chat-hf``
|
||||
- Llama 2 70B
|
||||
|
||||
* -
|
||||
- ``mistralai/Mixtral-8x7B-Instruct-v0.1``
|
||||
- Mixtral 8x7B
|
||||
|
||||
* -
|
||||
- ``mistralai/Mixtral-8x22B-Instruct-v0.1``
|
||||
- Mixtral 8x22B
|
||||
|
||||
* -
|
||||
- ``mistralai/Mistral-7B-Instruct-v0.3``
|
||||
- Mixtral 7B
|
||||
|
||||
* -
|
||||
- ``Qwen/Qwen2-7B-Instruct``
|
||||
- Qwen2 7B
|
||||
|
||||
* -
|
||||
- ``Qwen/Qwen2-72B-Instruct``
|
||||
- Qwen2 72B
|
||||
|
||||
* -
|
||||
- ``core42/jais-13b-chat``
|
||||
- JAIS 13B
|
||||
|
||||
* -
|
||||
- ``core42/jais-30b-chat-v3``
|
||||
- JAIS 30B
|
||||
|
||||
* - ``$model_repo``
|
||||
- ``amd/Meta-Llama-3.1-8B-Instruct-FP8-KV``
|
||||
- Llama 3.1 8B
|
||||
|
||||
* - (``float8``)
|
||||
- ``amd/Meta-Llama-3.1-70B-Instruct-FP8-KV``
|
||||
- Llama 3.1 70B
|
||||
|
||||
* -
|
||||
- ``amd/Meta-Llama-3.1-405B-Instruct-FP8-KV``
|
||||
- Llama 3.1 405B
|
||||
|
||||
* -
|
||||
- ``amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV``
|
||||
- Mixtral 8x7B
|
||||
|
||||
* -
|
||||
- ``amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV``
|
||||
- Mixtral 8x22B
|
||||
|
||||
* - ``$num_gpu``
|
||||
- 1 or 8
|
||||
- Number of GPUs
|
||||
|
||||
* - ``$datatype``
|
||||
- ``float16`` or ``float8``
|
||||
- Data type
|
||||
|
||||
.. _vllm-benchmark-run-benchmark:
|
||||
|
||||
Running the benchmark on the MI300X accelerator
|
||||
-----------------------------------------------
|
||||
|
||||
Here are some examples of running the benchmark with various options.
|
||||
See :ref:`Options <vllm-benchmark-standalone-options>` for the list of
|
||||
options and their descriptions.
|
||||
|
||||
Example 1: latency benchmark
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to benchmark the latency of the Llama 3.1 8B model on one GPU with the ``float16`` and ``float8`` data types.
|
||||
|
||||
.. code-block::
|
||||
|
||||
./vllm_benchmark_report.sh -s latency -m meta-llama/Meta-Llama-3.1-8B-Instruct -g 1 -d float16
|
||||
./vllm_benchmark_report.sh -s latency -m amd/Meta-Llama-3.1-8B-Instruct-FP8-KV -g 1 -d float8
|
||||
|
||||
Find the latency reports at:
|
||||
|
||||
- ``./reports_float16/summary/Meta-Llama-3.1-8B-Instruct_latency_report.csv``
|
||||
|
||||
- ``./reports_float8/summary/Meta-Llama-3.1-8B-Instruct-FP8-KV_latency_report.csv``
|
||||
|
||||
Example 2: throughput benchmark
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to benchmark the throughput of the Llama 3.1 8B model on one GPU with the ``float16`` and ``float8`` data types.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
./vllm_benchmark_report.sh -s throughput -m meta-llama/Meta-Llama-3.1-8B-Instruct -g 1 -d float16
|
||||
./vllm_benchmark_report.sh -s throughput -m amd/Meta-Llama-3.1-8B-Instruct-FP8-KV -g 1 -d float8
|
||||
|
||||
Find the throughput reports at:
|
||||
|
||||
- ``./reports_float16/summary/Meta-Llama-3.1-8B-Instruct_throughput_report.csv``
|
||||
|
||||
- ``./reports_float8/summary/Meta-Llama-3.1-8B-Instruct-FP8-KV_throughput_report.csv``
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
mjx-container[jax="CHTML"][display="true"] {
|
||||
text-align: left;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
|
||||
.. note::
|
||||
|
||||
Throughput is calculated as:
|
||||
|
||||
- .. math:: throughput\_tot = requests \times (\mathsf{\text{input lengths}} + \mathsf{\text{output lengths}}) / elapsed\_time
|
||||
|
||||
- .. math:: throughput\_gen = requests \times \mathsf{\text{output lengths}} / elapsed\_time
|
||||
|
||||
Further reading
|
||||
===============
|
||||
|
||||
- For application performance optimization strategies for HPC and AI workloads,
|
||||
including inference with vLLM, see :doc:`/how-to/tuning-guides/mi300x/workload`.
|
||||
|
||||
- To learn more about the options for latency and throughput benchmark scripts,
|
||||
see `<https://github.com/ROCm/vllm/tree/main/benchmarks>`_.
|
||||
|
||||
- To learn more about system settings and management practices to configure your system for
|
||||
MI300X accelerators, see :doc:`/how-to/system-optimization/mi300x`.
|
||||
|
||||
- To learn how to run LLM models from Hugging Face or your own model, see
|
||||
:doc:`Using ROCm for AI </how-to/rocm-for-ai/index>`.
|
||||
|
||||
- To learn how to optimize inference on LLMs, see
|
||||
:doc:`Fine-tuning LLMs and inference optimization </how-to/llm-fine-tuning-optimization/index>`.
|
||||
|
||||
- For a list of other ready-made Docker images for ROCm, see the
|
||||
:doc:`Docker image support matrix <rocm-install-on-linux:reference/docker-image-support-matrix>`.
|
||||
|
||||
- To compare with the previous version of the ROCm vLLM Docker image for performance validation, refer to
|
||||
`LLM inference performance validation on AMD Instinct MI300X (ROCm 6.2.0) <https://rocm.docs.amd.com/en/docs-6.2.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, inference, usage, tutorial
|
||||
:description: How to fine-tune models with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, inference, usage, tutorial, deep learning, PyTorch, TensorFlow, JAX
|
||||
|
||||
*************************
|
||||
Fine-tuning and inference
|
||||
@@ -9,7 +9,7 @@ Fine-tuning and inference
|
||||
Fine-tuning using ROCm involves leveraging AMD's GPU-accelerated :doc:`libraries <rocm:reference/api-libraries>` and
|
||||
:doc:`tools <rocm:reference/rocm-tools>` to optimize and train deep learning models. ROCm provides a comprehensive
|
||||
ecosystem for deep learning development, including open-source libraries for optimized deep learning operations and
|
||||
ROCm-aware versions of :doc:`deep learning frameworks <../deep-learning-rocm>` such as PyTorch, TensorFlow, and JAX.
|
||||
ROCm-aware versions of :doc:`deep learning frameworks <../../deep-learning-rocm>` such as PyTorch, TensorFlow, and JAX.
|
||||
|
||||
Single-accelerator systems, such as a machine equipped with a single accelerator or GPU, are commonly used for
|
||||
smaller-scale deep learning tasks, including fine-tuning pre-trained models and running inference on moderately
|
||||
25
docs/how-to/rocm-for-ai/fine-tuning/index.rst
Normal file
25
docs/how-to/rocm-for-ai/fine-tuning/index.rst
Normal file
@@ -0,0 +1,25 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, GPUs, Llama, accelerators
|
||||
|
||||
*******************************************
|
||||
Use ROCm for fine-tuning LLMs
|
||||
*******************************************
|
||||
|
||||
Fine-tuning is an essential technique in machine learning, where a pre-trained model, typically trained on a large-scale dataset, is further refined to achieve better performance and adapt to a particular task or dataset of interest.
|
||||
|
||||
With AMD GPUs, the fine-tuning process benefits from the parallel processing capabilities and efficient resource management, ultimately leading to improved performance and faster model adaptation to the target domain.
|
||||
|
||||
The ROCm™ software platform helps you optimize this fine-tuning process by supporting various optimization techniques tailored for AMD GPUs. It empowers the fine-tuning of large language models, making them accessible and efficient for specialized tasks. ROCm supports the broader AI ecosystem to ensure seamless integration with open frameworks, models, and tools.
|
||||
|
||||
Throughout the following topics, this guide discusses the goals and :ref:`challenges of fine-tuning a large language
|
||||
model <fine-tuning-llms-concept-challenge>` like Llama 2. In the
|
||||
sections that follow, you'll find practical guides on libraries and tools to accelerate your fine-tuning.
|
||||
|
||||
- :doc:`Conceptual overview of fine-tuning LLMs <overview>`
|
||||
|
||||
- :doc:`Fine-tuning and inference <fine-tuning-and-inference>` using a
|
||||
:doc:`single-accelerator <single-gpu-fine-tuning-and-inference>` or
|
||||
:doc:`multi-accelerator <multi-gpu-fine-tuning-and-inference>` system.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.. meta::
|
||||
:description: Model fine-tuning and inference on a multi-GPU system
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, multi-GPU, distributed, inference
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, multi-GPU, distributed, inference, accelerators, PyTorch, HuggingFace, torchtune
|
||||
|
||||
*****************************************************
|
||||
Fine-tuning and inference using multiple accelerators
|
||||
@@ -233,4 +233,4 @@ GPU model fine-tuning and inference with LLMs.
|
||||
INFO:torchtune.utils.logging:Learning rate scheduler is initialized.
|
||||
1|111|Loss: 1.5790324211120605: 7%|█ | 114/1618
|
||||
|
||||
Read more about inference frameworks in :doc:`LLM inference frameworks <llm-inference-frameworks>`.
|
||||
Read more about inference frameworks in :doc:`LLM inference frameworks <../inference/llm-inference-frameworks>`.
|
||||
@@ -1,6 +1,6 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, optimzation, LoRA, walkthrough
|
||||
:description: Conceptual overview of fine-tuning LLMs
|
||||
:keywords: ROCm, LLM, Llama, fine-tuning, usage, tutorial, optimzation, LoRA, walkthrough, PEFT, Reinforcement
|
||||
|
||||
***************************************
|
||||
Conceptual overview of fine-tuning LLMs
|
||||
@@ -41,7 +41,7 @@ The weight update is as follows: :math:`W_{updated} = W + ΔW`.
|
||||
If the weight matrix :math:`W` contains 7B parameters, then the weight update matrix :math:`ΔW` should also
|
||||
contain 7B parameters. Therefore, the :math:`ΔW` calculation is computationally and memory intensive.
|
||||
|
||||
.. figure:: ../../data/how-to/llm-fine-tuning-optimization/weight-update.png
|
||||
.. figure:: ../../../data/how-to/llm-fine-tuning-optimization/weight-update.png
|
||||
:alt: Weight update diagram
|
||||
|
||||
(a) Weight update in regular fine-tuning. (b) Weight update in LoRA where the product of matrix A (:math:`M\times K`)
|
||||
@@ -1,6 +1,6 @@
|
||||
.. meta::
|
||||
:description: Model fine-tuning and inference on a single-GPU system
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, single-GPU, LoRA, PEFT, inference
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, single-GPU, LoRA, PEFT, inference, SFTTrainer
|
||||
|
||||
****************************************************
|
||||
Fine-tuning and inference using a single accelerator
|
||||
@@ -80,7 +80,7 @@ Setting up the base implementation environment
|
||||
#. Install the required dependencies.
|
||||
|
||||
bitsandbytes is a library that facilitates quantization to improve the efficiency of deep learning models. Learn more
|
||||
about its use in :doc:`model-quantization`.
|
||||
about its use in :doc:`../inference-optimization/model-quantization`.
|
||||
|
||||
See the :ref:`Optimizations for model fine-tuning <fine-tuning-llms-concept-optimizations>` for a brief discussion on
|
||||
PEFT and TRL.
|
||||
@@ -507,4 +507,4 @@ If using multiple accelerators, see
|
||||
popular libraries that simplify fine-tuning and inference in a multi-accelerator system.
|
||||
|
||||
Read more about inference frameworks like vLLM and Hugging Face TGI in
|
||||
:doc:`LLM inference frameworks <llm-inference-frameworks>`.
|
||||
:doc:`LLM inference frameworks <../inference/llm-inference-frameworks>`.
|
||||
@@ -1,28 +1,27 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for AI
|
||||
:description: Learn how to use ROCm for AI.
|
||||
:keywords: ROCm, AI, machine learning, LLM, usage, tutorial
|
||||
|
||||
*****************
|
||||
Using ROCm for AI
|
||||
*****************
|
||||
**************************
|
||||
Use ROCm for AI
|
||||
**************************
|
||||
|
||||
ROCm offers a suite of optimizations for AI workloads from large language models (LLMs) to image and video detection and
|
||||
recognition, life sciences and drug discovery, autonomous driving, robotics, and more. ROCm proudly supports the broader
|
||||
AI software ecosystem, including open frameworks, models, and tools.
|
||||
ROCm™ is an open-source software platform that enables high-performance computing and machine learning applications. It features the ability to accelerate training, fine-tuning, and inference for AI application development. With ROCm, you can access the full power of AMD GPUs, which can significantly improve the performance and efficiency of AI workloads.
|
||||
|
||||
For more information, see `What is ROCm? <https://rocm.docs.amd.com/en/latest/what-is-rocm.html>`_
|
||||
You can use ROCm to perform distributed training, which enables you to train models across multiple GPUs or nodes simultaneously. Additionally, ROCm supports mixed-precision training, which can help reduce the memory and compute requirements of training workloads. For fine-tuning, ROCm provides access to various algorithms and optimization techniques. In terms of inference, ROCm provides several techniques that can help you optimize your models for deployment, such as quantization, GEMM tuning, and optimization with composable kernel.
|
||||
|
||||
Overall, ROCm can be used to improve the performance and efficiency of your AI applications. With its training, fine-tuning, and inference support, ROCm provides a complete solution for optimizing AI workflows and achieving the optimum results possible on AMD GPUs.
|
||||
|
||||
In this guide, you'll learn about:
|
||||
In this guide, you'll learn how to use ROCm for AI:
|
||||
|
||||
- :doc:`Installing ROCm and machine learning frameworks <install>`
|
||||
- :doc:`Training <training/index>`
|
||||
|
||||
- :doc:`Scaling model training <scale-model-training>`
|
||||
- :doc:`Fine-tuning LLMs <fine-tuning/index>`
|
||||
|
||||
- :doc:`Training a model <train-a-model>`
|
||||
- :doc:`Inference <inference/index>`
|
||||
|
||||
- :doc:`Running models from Hugging Face <hugging-face-models>`
|
||||
- :doc:`Inference optimization <inference-optimization/index>`
|
||||
|
||||
- :doc:`Deploying your model <deploy-your-model>`
|
||||
|
||||
To learn about ROCm for HPC applications and scientific computing, see
|
||||
:doc:`../rocm-for-hpc/index`.
|
||||
|
||||
36
docs/how-to/rocm-for-ai/inference-optimization/index.rst
Normal file
36
docs/how-to/rocm-for-ai/inference-optimization/index.rst
Normal file
@@ -0,0 +1,36 @@
|
||||
.. meta::
|
||||
:description: How to Use ROCm for AI inference optimization
|
||||
:keywords: ROCm, LLM, AI inference, Optimization, GPUs, usage, tutorial
|
||||
|
||||
*******************************************
|
||||
Use ROCm for AI inference optimization
|
||||
*******************************************
|
||||
|
||||
AI inference optimization is the process of improving the performance of machine learning models and speeding up the inference process. It includes:
|
||||
|
||||
- **Quantization**: This involves reducing the precision of model weights and activations while maintaining acceptable accuracy levels. Reduced precision improves inference efficiency because lower precision data requires less storage and better utilizes the hardware's computation power.
|
||||
|
||||
- **Kernel optimization**: This technique involves optimizing computation kernels to exploit the underlying hardware capabilities. For example, the kernels can be optimized to use multiple GPU cores or utilize specialized hardware like tensor cores to accelerate the computations.
|
||||
|
||||
- **Libraries**: Libraries such as Flash Attention, xFormers, and PyTorch TunableOp are used to accelerate deep learning models and improve the performance of inference workloads.
|
||||
|
||||
- **Hardware acceleration**: Hardware acceleration techniques, like GPUs for AI inference, can significantly improve performance due to their parallel processing capabilities.
|
||||
|
||||
- **Pruning**: This involves removing unnecessary connections, layers, or weights from a pre-trained model while maintaining acceptable accuracy levels, resulting in a smaller model that requires fewer computational resources to run inference.
|
||||
|
||||
Utilizing these optimization techniques with the ROCm™ software platform can significantly reduce inference time, improve performance, and reduce the cost of your AI applications.
|
||||
|
||||
Throughout the following topics, this guide discusses optimization techniques for inference workloads.
|
||||
|
||||
- :doc:`Model quantization <model-quantization>`
|
||||
|
||||
- :doc:`Model acceleration libraries <model-acceleration-libraries>`
|
||||
|
||||
- :doc:`Optimizing with Composable Kernel <optimizing-with-composable-kernel>`
|
||||
|
||||
- :doc:`Optimizing Triton kernels <optimizing-triton-kernel>`
|
||||
|
||||
- :doc:`Profiling and debugging <profiling-and-debugging>`
|
||||
|
||||
- :doc:`Workload tuning <workload>`
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:description: How to use model acceleration techniques and libraries to improve memory efficiency and performance.
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, Flash Attention, Hugging Face, xFormers, vLLM, PyTorch
|
||||
|
||||
****************************
|
||||
@@ -20,7 +20,7 @@ Attention (GQA), and Multi-Query Attention (MQA). This reduction in memory movem
|
||||
time-to-first-token (TTFT) latency for large batch sizes and long prompt sequences, thereby enhancing overall
|
||||
performance.
|
||||
|
||||
.. image:: ../../data/how-to/llm-fine-tuning-optimization/attention-module.png
|
||||
.. image:: ../../../data/how-to/llm-fine-tuning-optimization/attention-module.png
|
||||
:alt: Attention module of a large language module utilizing tiling
|
||||
:align: center
|
||||
|
||||
@@ -245,7 +245,7 @@ page describes the options.
|
||||
Validator,ROCBLAS_VERSION,4.1.0-cefa4a9b-dirty
|
||||
GemmTunableOp_float_TN,tn_200_100_20,Gemm_Rocblas_32323,0.00669595
|
||||
|
||||
.. image:: ../../data/how-to/llm-fine-tuning-optimization/tunableop.png
|
||||
.. image:: ../../../data/how-to/llm-fine-tuning-optimization/tunableop.png
|
||||
:alt: GEMM and TunableOp
|
||||
:align: center
|
||||
|
||||
@@ -277,7 +277,7 @@ Installing FBGEMM_GPU
|
||||
Installing FBGEMM_GPU consists of the following steps:
|
||||
|
||||
* Set up an isolated Miniconda environment
|
||||
* Install ROCm using Docker or the :doc:`package manager <rocm-install-on-linux:install/native-install/index>`
|
||||
* Install ROCm using Docker or the :doc:`package manager <rocm-install-on-linux:install/install-methods/package-manager-index>`
|
||||
* Install the nightly `PyTorch <https://pytorch.org/>`_ build
|
||||
* Complete the pre-build and build tasks
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:description: How to use model quantization techniques to speed up inference.
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, quantization, GPTQ, transformers, bitsandbytes
|
||||
|
||||
*****************************
|
||||
@@ -1,6 +1,6 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, Triton, kernel, performance, optimization
|
||||
:description: How to optimize Triton kernels for ROCm.
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, MI300X, tutorial, Triton, kernel, performance, optimization
|
||||
|
||||
*************************
|
||||
Optimizing Triton kernels
|
||||
@@ -13,7 +13,7 @@ and CUDA kernel optimization.
|
||||
|
||||
Refer to the
|
||||
:ref:`Triton kernel performance optimization <mi300x-triton-kernel-performance-optimization>`
|
||||
section of the :doc:`/how-to/tuning-guides/mi300x/workload` guide
|
||||
section of the :doc:`workload` guide
|
||||
for detailed information.
|
||||
|
||||
Triton kernel performance optimization includes the following topics.
|
||||
@@ -1,8 +1,9 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="SmoothQuant model inference on AMD Instinct MI300X using Composable Kernel">
|
||||
<meta name="keywords" content="Mixed Precision, Kernel, Inference, Linear Algebra">
|
||||
</head>
|
||||
---
|
||||
myst:
|
||||
html_meta:
|
||||
"description": "How to optimize machine learning workloads with Composable Kernel (CK)."
|
||||
"keywords": "mixed, precision, kernel, inference, linear, algebra, ck, GEMM"
|
||||
---
|
||||
|
||||
# Optimizing with Composable Kernel
|
||||
|
||||
@@ -32,7 +33,7 @@ The template parameters of the instance are grouped into four parameter types:
|
||||
================
|
||||
### Figure 2
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-template_parameters.jpg
|
||||
```{figure} ../../../data/how-to/llm-fine-tuning-optimization/ck-template_parameters.jpg
|
||||
The template parameters of the selected GEMM kernel are classified into four groups. These template parameter groups should be defined properly before running the instance.
|
||||
```
|
||||
|
||||
@@ -126,7 +127,7 @@ The row and column, and stride information of input matrices are also passed to
|
||||
================
|
||||
### Figure 3
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-kernel_launch.jpg
|
||||
```{figure} ../../../data/how-to/llm-fine-tuning-optimization/ck-kernel_launch.jpg
|
||||
Templated kernel launching consists of kernel instantiation, making arguments by passing in actual application parameters, creating an invoker, and running the instance through the invoker.
|
||||
```
|
||||
|
||||
@@ -155,7 +156,7 @@ The first operation in the process is to perform the multiplication of input mat
|
||||
================
|
||||
### Figure 4
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-operation_flow.jpg
|
||||
```{figure} ../../../data/how-to/llm-fine-tuning-optimization/ck-operation_flow.jpg
|
||||
Operation flow.
|
||||
```
|
||||
|
||||
@@ -171,7 +172,7 @@ Here, we use [DeviceBatchedGemmMultiD_Xdl](https://github.com/ROCm/composable_ke
|
||||
================
|
||||
### Figure 5
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-root_instance.jpg
|
||||
```{figure} ../../../data/how-to/llm-fine-tuning-optimization/ck-root_instance.jpg
|
||||
Use the ‘DeviceBatchedGemmMultiD_Xdl’ instance as a root.
|
||||
```
|
||||
|
||||
@@ -421,7 +422,7 @@ Run `python setup.py install` to build and install the extension. It should look
|
||||
================
|
||||
### Figure 6
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-compilation.jpg
|
||||
```{figure} ../../../data/how-to/llm-fine-tuning-optimization/ck-compilation.jpg
|
||||
Compilation and installation of the INT8 kernels.
|
||||
```
|
||||
|
||||
@@ -433,7 +434,7 @@ The implementation architecture of running SmoothQuant models on MI300X GPUs is
|
||||
================
|
||||
### Figure 7
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-inference_flow.jpg
|
||||
```{figure} ../../../data/how-to/llm-fine-tuning-optimization/ck-inference_flow.jpg
|
||||
The implementation architecture of running SmoothQuant models on AMD MI300X accelerators.
|
||||
```
|
||||
|
||||
@@ -459,7 +460,7 @@ Figure 8 shows the performance comparisons between the original FP16 and the Smo
|
||||
================
|
||||
### Figure 8
|
||||
================ -->
|
||||
```{figure} ../../data/how-to/llm-fine-tuning-optimization/ck-comparisons.jpg
|
||||
```{figure} ../../../data/how-to/llm-fine-tuning-optimization/ck-comparisons.jpg
|
||||
Performance comparisons between the original FP16 and the SmoothQuant-quantized INT8 models on a single MI300X accelerator.
|
||||
```
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, profiling, debugging, performance, Triton
|
||||
:description: How to use ROCm profiling and debugging tools.
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, MI300X, tutorial, profiling, debugging, performance, Triton
|
||||
|
||||
***********************
|
||||
Profiling and debugging
|
||||
***********************
|
||||
|
||||
This section provides an index for further documentation on profiling and
|
||||
This section provides an index for further documentation on profiling and
|
||||
debugging tools and their common usage patterns.
|
||||
|
||||
See :ref:`AMD Instinct MI300X™ workload optimization <mi300x-profiling-start>`
|
||||
@@ -92,7 +92,7 @@ involves configuring tensor parallelism, leveraging advanced features, and
|
||||
ensuring efficient execution. Here’s how to optimize vLLM performance:
|
||||
|
||||
* Tensor parallelism: Configure the
|
||||
:ref:`tensor-parallel-size parameter <mi300x-vllm-optimize-tp-gemm>` to distribute
|
||||
:ref:`tensor-parallel-size parameter <mi300x-vllm-multiple-gpus>` to distribute
|
||||
tensor computations across multiple GPUs. Adjust parameters such as
|
||||
``batch-size``, ``input-len``, and ``output-len`` based on your workload.
|
||||
|
||||
@@ -152,7 +152,7 @@ address any new bottlenecks that may emerge.
|
||||
|
||||
ROCm provides a prebuilt optimized Docker image that has everything required to implement
|
||||
the tips in this section. It includes ROCm, vLLM, PyTorch, and tuning files in the CSV
|
||||
format. For more information, see :doc:`/how-to/performance-validation/mi300x/vllm-benchmark`.
|
||||
format. For more information, see :doc:`../inference/vllm-benchmark`.
|
||||
|
||||
.. _mi300x-profiling-tools:
|
||||
|
||||
@@ -173,7 +173,7 @@ tools available depending on their specific profiling needs.
|
||||
For more information, see
|
||||
:doc:`ROCm Compute Profiler documentation <rocprofiler-compute:index>`.
|
||||
|
||||
Refer to :doc:`/how-to/llm-fine-tuning-optimization/profiling-and-debugging`
|
||||
Refer to :doc:`profiling-and-debugging`
|
||||
to explore commonly used profiling tools and their usage patterns.
|
||||
|
||||
Once performance bottlenecks are identified, you can implement an informed workload
|
||||
@@ -412,7 +412,7 @@ usage with ROCm.
|
||||
ROCm provides a prebuilt optimized Docker image for validating the performance
|
||||
of LLM inference with vLLM on the MI300X accelerator. The Docker image includes
|
||||
ROCm, vLLM, PyTorch, and tuning files in the CSV format. For more information,
|
||||
see :doc:`/how-to/performance-validation/mi300x/vllm-benchmark`.
|
||||
see :doc:`../inference/vllm-benchmark`.
|
||||
|
||||
.. _mi300x-vllm-throughput-measurement:
|
||||
|
||||
@@ -1304,7 +1304,7 @@ performance (reduce latency) and improve benchmarking stability.
|
||||
CK provides a rich set of template parameters for generating flexible accelerated
|
||||
computing kernels for difference application scenarios.
|
||||
|
||||
See :doc:`/how-to/llm-fine-tuning-optimization/optimizing-with-composable-kernel`
|
||||
See :doc:`optimizing-with-composable-kernel`
|
||||
for an overview of Composable Kernel GEMM kernels, information on tunable
|
||||
parameters, and examples.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for AI
|
||||
:description: How to deploy your model for AI inference using vLLM and Hugging Face TGI.
|
||||
:keywords: ROCm, AI, LLM, train, fine-tune, deploy, FSDP, DeepSpeed, LLaMA, tutorial
|
||||
|
||||
********************
|
||||
@@ -119,4 +119,4 @@ TGI walkthrough
|
||||
vLLM and Hugging Face TGI are robust solutions for anyone looking to deploy LLMs for applications that demand high
|
||||
performance, low latency, and scalability.
|
||||
|
||||
Visit the topics in :doc:`Using ROCm for AI <index>` to learn about other ROCm-aware solutions for AI development.
|
||||
Visit the topics in :doc:`Using ROCm for AI <../index>` to learn about other ROCm-aware solutions for AI development.
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for AI
|
||||
:description: How to run models from Hugging Face on AMD GPUs.
|
||||
:keywords: ROCm, AI, LLM, Hugging Face, Optimum, Flash Attention, GPTQ, ONNX, tutorial
|
||||
|
||||
********************************
|
||||
22
docs/how-to/rocm-for-ai/inference/index.rst
Normal file
22
docs/how-to/rocm-for-ai/inference/index.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for AI inference workloads.
|
||||
:keywords: ROCm, AI, machine learning, LLM, AI inference, NLP, GPUs, usage, tutorial
|
||||
|
||||
****************************
|
||||
Use ROCm for AI inference
|
||||
****************************
|
||||
AI inference is a process of deploying a trained machine learning model to make predictions or classifications on new data. This commonly involves using the model with real-time data and making quick decisions based on the predictions made by the model.
|
||||
|
||||
Understanding the ROCm™ software platform’s architecture and capabilities is vital for running AI inference. By leveraging the ROCm platform's capabilities, you can harness the power of high-performance computing and efficient resource management to run inference workloads, leading to faster predictions and classifications on real-time data.
|
||||
|
||||
Throughout the following topics, this section provides a comprehensive guide to setting up and deploying AI inference on AMD GPUs. This includes instructions on how to install ROCm, how to use Hugging Face Transformers to manage pre-trained models for natural language processing (NLP) tasks, how to validate vLLM on AMD Instinct™ MI300X accelerators and illustrate how to deploy trained models in production environments.
|
||||
|
||||
- :doc:`Installing ROCm and machine learning frameworks <install>`
|
||||
|
||||
- :doc:`Running models from Hugging Face <hugging-face-models>`
|
||||
|
||||
- :doc:`LLM inference frameworks <llm-inference-frameworks>`
|
||||
|
||||
- :doc:`Performance validation <vllm-benchmark>`
|
||||
|
||||
- :doc:`Deploying your model <deploy-your-model>`
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for AI
|
||||
:description: How to install ROCm and popular machine learning frameworks.
|
||||
:keywords: ROCm, AI, LLM, train, fine-tune, FSDP, DeepSpeed, LLaMA, tutorial
|
||||
|
||||
.. _rocm-for-ai-install:
|
||||
@@ -26,7 +26,7 @@ If you’re using a Radeon GPU for graphics-accelerated applications, refer to t
|
||||
|
||||
ROCm supports multiple :doc:`installation methods <rocm-install-on-linux:install/install-overview>`:
|
||||
|
||||
* :doc:`Using your Linux distribution's package manager <rocm-install-on-linux:install/native-install/index>`
|
||||
* :doc:`Using your Linux distribution's package manager <rocm-install-on-linux:install/install-methods/package-manager-index>`
|
||||
|
||||
* :doc:`Using the AMDGPU installer <rocm-install-on-linux:install/amdgpu-install>`
|
||||
|
||||
@@ -59,4 +59,4 @@ images with the framework pre-installed.
|
||||
|
||||
* :doc:`JAX for ROCm <rocm-install-on-linux:install/3rd-party/jax-install>`
|
||||
|
||||
The sections that follow in :doc:`Training a model <train-a-model>` are geared for a ROCm with PyTorch installation.
|
||||
The sections that follow in :doc:`Training a model <../training/train-a-model>` are geared for a ROCm with PyTorch installation.
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: How to fine-tune LLMs with ROCm
|
||||
:description: How to implement the LLM inference frameworks with ROCm acceleration.
|
||||
:keywords: ROCm, LLM, fine-tuning, usage, tutorial, inference, vLLM, TGI, text generation inference
|
||||
|
||||
************************
|
||||
@@ -8,8 +8,8 @@ LLM inference frameworks
|
||||
|
||||
This section discusses how to implement `vLLM <https://docs.vllm.ai/en/latest>`_ and `Hugging Face TGI
|
||||
<https://huggingface.co/docs/text-generation-inference/en/index>`_ using
|
||||
:doc:`single-accelerator <single-gpu-fine-tuning-and-inference>` and
|
||||
:doc:`multi-accelerator <multi-gpu-fine-tuning-and-inference>` systems.
|
||||
:doc:`single-accelerator <../fine-tuning/single-gpu-fine-tuning-and-inference>` and
|
||||
:doc:`multi-accelerator <../fine-tuning/multi-gpu-fine-tuning-and-inference>` systems.
|
||||
|
||||
.. _fine-tuning-llms-vllm:
|
||||
|
||||
@@ -68,7 +68,7 @@ Installing vLLM
|
||||
|
||||
The following log message is displayed in your command line indicates that the server is listening for requests.
|
||||
|
||||
.. image:: ../../data/how-to/llm-fine-tuning-optimization/vllm-single-gpu-log.png
|
||||
.. image:: ../../../data/how-to/llm-fine-tuning-optimization/vllm-single-gpu-log.png
|
||||
:alt: vLLM API server log message
|
||||
:align: center
|
||||
|
||||
@@ -141,7 +141,7 @@ Installing vLLM
|
||||
|
||||
ROCm provides a prebuilt optimized Docker image for validating the performance of LLM inference with vLLM
|
||||
on the MI300X accelerator. The Docker image includes ROCm, vLLM, PyTorch, and tuning files in CSV
|
||||
format. For more information, see :doc:`/how-to/performance-validation/mi300x/vllm-benchmark`.
|
||||
format. For more information, see :doc:`vllm-benchmark`.
|
||||
|
||||
.. _fine-tuning-llms-tgi:
|
||||
|
||||
478
docs/how-to/rocm-for-ai/inference/vllm-benchmark.rst
Normal file
478
docs/how-to/rocm-for-ai/inference/vllm-benchmark.rst
Normal file
@@ -0,0 +1,478 @@
|
||||
.. meta::
|
||||
:description: Learn how to validate LLM inference performance on MI300X accelerators using AMD MAD and the
|
||||
ROCm vLLM Docker image.
|
||||
:keywords: model, MAD, automation, dashboarding, validate
|
||||
|
||||
***********************************************************
|
||||
LLM inference performance validation on AMD Instinct MI300X
|
||||
***********************************************************
|
||||
|
||||
.. _vllm-benchmark-unified-docker:
|
||||
|
||||
The `ROCm vLLM Docker <https://hub.docker.com/r/rocm/vllm/tags>`_ image offers
|
||||
a prebuilt, optimized environment for validating large language model (LLM)
|
||||
inference performance on the AMD Instinct™ MI300X accelerator. This ROCm vLLM
|
||||
Docker image integrates vLLM and PyTorch tailored specifically for the MI300X
|
||||
accelerator and includes the following components:
|
||||
|
||||
* `ROCm 6.3.1 <https://github.com/ROCm/ROCm>`_
|
||||
|
||||
* `vLLM 0.6.6 <https://docs.vllm.ai/en/latest>`_
|
||||
|
||||
* `PyTorch 2.7.0 (2.7.0a0+git3a58512) <https://github.com/pytorch/pytorch>`_
|
||||
|
||||
With this Docker image, you can quickly validate the expected inference
|
||||
performance numbers for the MI300X accelerator. This topic also provides tips on
|
||||
optimizing performance with popular AI models. For more information, see the lists of
|
||||
:ref:`available models for MAD-integrated benchmarking <vllm-benchmark-mad-models>`
|
||||
and :ref:`standalone benchmarking <vllm-benchmark-standalone-options>`.
|
||||
|
||||
.. _vllm-benchmark-vllm:
|
||||
|
||||
.. note::
|
||||
|
||||
vLLM is a toolkit and library for LLM inference and serving. AMD implements
|
||||
high-performance custom kernels and modules in vLLM to enhance performance.
|
||||
See :ref:`fine-tuning-llms-vllm` and :ref:`mi300x-vllm-optimization` for
|
||||
more information.
|
||||
|
||||
Getting started
|
||||
===============
|
||||
|
||||
Use the following procedures to reproduce the benchmark results on an
|
||||
MI300X accelerator with the prebuilt vLLM Docker image.
|
||||
|
||||
.. _vllm-benchmark-get-started:
|
||||
|
||||
1. Disable NUMA auto-balancing.
|
||||
|
||||
To optimize performance, disable automatic NUMA balancing. Otherwise, the GPU
|
||||
might hang until the periodic balancing is finalized. For more information,
|
||||
see :ref:`AMD Instinct MI300X system optimization <mi300x-disable-numa>`.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# disable automatic NUMA balancing
|
||||
sh -c 'echo 0 > /proc/sys/kernel/numa_balancing'
|
||||
# check if NUMA balancing is disabled (returns 0 if disabled)
|
||||
cat /proc/sys/kernel/numa_balancing
|
||||
0
|
||||
|
||||
2. Download the :ref:`ROCm vLLM Docker image <vllm-benchmark-unified-docker>`.
|
||||
|
||||
Use the following command to pull the Docker image from Docker Hub.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
docker pull rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
|
||||
|
||||
Once the setup is complete, choose between two options to reproduce the
|
||||
benchmark results:
|
||||
|
||||
- :ref:`MAD-integrated benchmarking <vllm-benchmark-mad>`
|
||||
|
||||
- :ref:`Standalone benchmarking <vllm-benchmark-standalone>`
|
||||
|
||||
.. _vllm-benchmark-mad:
|
||||
|
||||
MAD-integrated benchmarking
|
||||
===========================
|
||||
|
||||
Clone the ROCm Model Automation and Dashboarding (`<https://github.com/ROCm/MAD>`__) repository to a local
|
||||
directory and install the required packages on the host machine.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://github.com/ROCm/MAD
|
||||
cd MAD
|
||||
pip install -r requirements.txt
|
||||
|
||||
Use this command to run a performance benchmark test of the Llama 3.1 8B model
|
||||
on one GPU with ``float16`` data type in the host machine.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export MAD_SECRETS_HFTOKEN="your personal Hugging Face token to access gated models"
|
||||
python3 tools/run_models.py --tags pyt_vllm_llama-3.1-8b --keep-model-dir --live-output --timeout 28800
|
||||
|
||||
ROCm MAD launches a Docker container with the name
|
||||
``container_ci-pyt_vllm_llama-3.1-8b``. The latency and throughput reports of the
|
||||
model are collected in the following path: ``~/MAD/reports_float16/``.
|
||||
|
||||
Although the following models are preconfigured to collect latency and
|
||||
throughput performance data, you can also change the benchmarking parameters.
|
||||
Refer to the :ref:`Standalone benchmarking <vllm-benchmark-standalone>` section.
|
||||
|
||||
.. _vllm-benchmark-mad-models:
|
||||
|
||||
Available models
|
||||
----------------
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:widths: 2, 3
|
||||
|
||||
* - Model name
|
||||
- Tag
|
||||
|
||||
* - `Llama 3.1 8B <https://huggingface.co/meta-llama/Llama-3.1-8B>`_
|
||||
- ``pyt_vllm_llama-3.1-8b``
|
||||
|
||||
* - `Llama 3.1 70B <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
|
||||
- ``pyt_vllm_llama-3.1-70b``
|
||||
|
||||
* - `Llama 3.1 405B <https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct>`_
|
||||
- ``pyt_vllm_llama-3.1-405b``
|
||||
|
||||
* - `Llama 3.2 11B Vision <https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct>`_
|
||||
- ``pyt_vllm_llama-3.2-11b-vision-instruct``
|
||||
|
||||
* - `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-7b-chat-hf>`_
|
||||
- ``pyt_vllm_llama-2-7b``
|
||||
|
||||
* - `Llama 2 70B <https://huggingface.co/meta-llama/Llama-2-70b-chat-hf>`_
|
||||
- ``pyt_vllm_llama-2-70b``
|
||||
|
||||
* - `Mixtral MoE 8x7B <https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1>`_
|
||||
- ``pyt_vllm_mixtral-8x7b``
|
||||
|
||||
* - `Mixtral MoE 8x22B <https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1>`_
|
||||
- ``pyt_vllm_mixtral-8x22b``
|
||||
|
||||
* - `Mistral 7B <https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3>`_
|
||||
- ``pyt_vllm_mistral-7b``
|
||||
|
||||
* - `Qwen2 7B <https://huggingface.co/Qwen/Qwen2-7B-Instruct>`_
|
||||
- ``pyt_vllm_qwen2-7b``
|
||||
|
||||
* - `Qwen2 72B <https://huggingface.co/Qwen/Qwen2-72B-Instruct>`_
|
||||
- ``pyt_vllm_qwen2-72b``
|
||||
|
||||
* - `JAIS 13B <https://huggingface.co/core42/jais-13b-chat>`_
|
||||
- ``pyt_vllm_jais-13b``
|
||||
|
||||
* - `JAIS 30B <https://huggingface.co/core42/jais-30b-chat-v3>`_
|
||||
- ``pyt_vllm_jais-30b``
|
||||
|
||||
* - `DBRX Instruct <https://huggingface.co/databricks/dbrx-instruct>`_
|
||||
- ``pyt_vllm_dbrx-instruct``
|
||||
|
||||
* - `Gemma 2 27B <https://huggingface.co/google/gemma-2-27b>`_
|
||||
- ``pyt_vllm_gemma-2-27b``
|
||||
|
||||
* - `C4AI Command R+ 08-2024 <https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024>`_
|
||||
- ``pyt_vllm_c4ai-command-r-plus-08-2024``
|
||||
|
||||
* - `DeepSeek MoE 16B <https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat>`_
|
||||
- ``pyt_vllm_deepseek-moe-16b-chat``
|
||||
|
||||
* - `Llama 3.1 70B FP8 <https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV>`_
|
||||
- ``pyt_vllm_llama-3.1-70b_fp8``
|
||||
|
||||
* - `Llama 3.1 405B FP8 <https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV>`_
|
||||
- ``pyt_vllm_llama-3.1-405b_fp8``
|
||||
|
||||
* - `Mixtral MoE 8x7B FP8 <https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV>`_
|
||||
- ``pyt_vllm_mixtral-8x7b_fp8``
|
||||
|
||||
* - `Mixtral MoE 8x22B FP8 <https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV>`_
|
||||
- ``pyt_vllm_mixtral-8x22b_fp8``
|
||||
|
||||
* - `Mistral 7B FP8 <https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV>`_
|
||||
- ``pyt_vllm_mistral-7b_fp8``
|
||||
|
||||
* - `DBRX Instruct FP8 <https://huggingface.co/amd/dbrx-instruct-FP8-KV>`_
|
||||
- ``pyt_vllm_dbrx_fp8``
|
||||
|
||||
* - `C4AI Command R+ 08-2024 FP8 <https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV>`_
|
||||
- ``pyt_vllm_command-r-plus_fp8``
|
||||
|
||||
.. _vllm-benchmark-standalone:
|
||||
|
||||
Standalone benchmarking
|
||||
=======================
|
||||
|
||||
You can run the vLLM benchmark tool independently by starting the
|
||||
:ref:`Docker container <vllm-benchmark-get-started>` as shown in the following
|
||||
snippet.
|
||||
|
||||
.. code-block::
|
||||
|
||||
docker pull rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
|
||||
docker run -it --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 16G --security-opt seccomp=unconfined --security-opt apparmor=unconfined --cap-add=SYS_PTRACE -v $(pwd):/workspace --env HUGGINGFACE_HUB_CACHE=/workspace --name vllm_v0.6.6 rocm/vllm:rocm6.3.1_mi300_ubuntu22.04_py3.12_vllm_0.6.6
|
||||
|
||||
In the Docker container, clone the ROCm MAD repository and navigate to the
|
||||
benchmark scripts directory at ``~/MAD/scripts/vllm``.
|
||||
|
||||
.. code-block::
|
||||
|
||||
git clone https://github.com/ROCm/MAD
|
||||
cd MAD/scripts/vllm
|
||||
|
||||
Command
|
||||
-------
|
||||
|
||||
To start the benchmark, use the following command with the appropriate options.
|
||||
See :ref:`Options <vllm-benchmark-standalone-options>` for the list of
|
||||
options and their descriptions.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
./vllm_benchmark_report.sh -s $test_option -m $model_repo -g $num_gpu -d $datatype
|
||||
|
||||
See the :ref:`examples <vllm-benchmark-run-benchmark>` for more information.
|
||||
|
||||
.. note::
|
||||
|
||||
The input sequence length, output sequence length, and tensor parallel (TP) are
|
||||
already configured. You don't need to specify them with this script.
|
||||
|
||||
.. note::
|
||||
|
||||
If you encounter the following error, pass your access-authorized Hugging
|
||||
Face token to the gated models.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
OSError: You are trying to access a gated repo.
|
||||
|
||||
# pass your HF_TOKEN
|
||||
export HF_TOKEN=$your_personal_hf_token
|
||||
|
||||
.. _vllm-benchmark-standalone-options:
|
||||
|
||||
Options and available models
|
||||
----------------------------
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:align: center
|
||||
|
||||
* - Name
|
||||
- Options
|
||||
- Description
|
||||
|
||||
* - ``$test_option``
|
||||
- latency
|
||||
- Measure decoding token latency
|
||||
|
||||
* -
|
||||
- throughput
|
||||
- Measure token generation throughput
|
||||
|
||||
* -
|
||||
- all
|
||||
- Measure both throughput and latency
|
||||
|
||||
* - ``$model_repo``
|
||||
- ``meta-llama/Llama-3.1-8B-Instruct``
|
||||
- `Llama 3.1 8B <https://huggingface.co/meta-llama/Llama-3.1-8B>`_
|
||||
|
||||
* - (``float16``)
|
||||
- ``meta-llama/Llama-3.1-70B-Instruct``
|
||||
- `Llama 3.1 70B <https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct>`_
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-3.1-405B-Instruct``
|
||||
- `Llama 3.1 405B <https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct>`_
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-3.2-11B-Vision-Instruct``
|
||||
- `Llama 3.2 11B Vision <https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct>`_
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-2-7b-chat-hf``
|
||||
- `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-7b-chat-hf>`_
|
||||
|
||||
* -
|
||||
- ``meta-llama/Llama-2-70b-chat-hf``
|
||||
- `Llama 2 7B <https://huggingface.co/meta-llama/Llama-2-70b-chat-hf>`_
|
||||
|
||||
* -
|
||||
- ``mistralai/Mixtral-8x7B-Instruct-v0.1``
|
||||
- `Mixtral MoE 8x7B <https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1>`_
|
||||
|
||||
* -
|
||||
- ``mistralai/Mixtral-8x22B-Instruct-v0.1``
|
||||
- `Mixtral MoE 8x22B <https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1>`_
|
||||
|
||||
* -
|
||||
- ``mistralai/Mistral-7B-Instruct-v0.3``
|
||||
- `Mistral 7B <https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3>`_
|
||||
|
||||
* -
|
||||
- ``Qwen/Qwen2-7B-Instruct``
|
||||
- `Qwen2 7B <https://huggingface.co/Qwen/Qwen2-7B-Instruct>`_
|
||||
|
||||
* -
|
||||
- ``Qwen/Qwen2-72B-Instruct``
|
||||
- `Qwen2 72B <https://huggingface.co/Qwen/Qwen2-72B-Instruct>`_
|
||||
|
||||
* -
|
||||
- ``core42/jais-13b-chat``
|
||||
- `JAIS 13B <https://huggingface.co/core42/jais-13b-chat>`_
|
||||
|
||||
* -
|
||||
- ``core42/jais-30b-chat-v3``
|
||||
- `JAIS 30B <https://huggingface.co/core42/jais-30b-chat-v3>`_
|
||||
|
||||
* -
|
||||
- ``databricks/dbrx-instruct``
|
||||
- `DBRX Instruct <https://huggingface.co/databricks/dbrx-instruct>`_
|
||||
|
||||
* -
|
||||
- ``google/gemma-2-27b``
|
||||
- `Gemma 2 27B <https://huggingface.co/google/gemma-2-27b>`_
|
||||
|
||||
* -
|
||||
- ``CohereForAI/c4ai-command-r-plus-08-2024``
|
||||
- `C4AI Command R+ 08-2024 <https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024>`_
|
||||
|
||||
* -
|
||||
- ``deepseek-ai/deepseek-moe-16b-chat``
|
||||
- `DeepSeek MoE 16B <https://huggingface.co/deepseek-ai/deepseek-moe-16b-chat>`_
|
||||
|
||||
* - ``$model_repo``
|
||||
- ``amd/Llama-3.1-70B-Instruct-FP8-KV``
|
||||
- `Llama 3.1 70B FP8 <https://huggingface.co/amd/Llama-3.1-70B-Instruct-FP8-KV>`_
|
||||
|
||||
* - (``float8``)
|
||||
- ``amd/Llama-3.1-405B-Instruct-FP8-KV``
|
||||
- `Llama 3.1 405B FP8 <https://huggingface.co/amd/Llama-3.1-405B-Instruct-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV``
|
||||
- `Mixtral MoE 8x7B FP8 <https://huggingface.co/amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV``
|
||||
- `Mixtral MoE 8x22B FP8 <https://huggingface.co/amd/Mixtral-8x22B-Instruct-v0.1-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/Mistral-7B-v0.1-FP8-KV``
|
||||
- `Mistral 7B FP8 <https://huggingface.co/amd/Mistral-7B-v0.1-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/dbrx-instruct-FP8-KV``
|
||||
- `DBRX Instruct FP8 <https://huggingface.co/amd/dbrx-instruct-FP8-KV>`_
|
||||
|
||||
* -
|
||||
- ``amd/c4ai-command-r-plus-FP8-KV``
|
||||
- `C4AI Command R+ 08-2024 FP8 <https://huggingface.co/amd/c4ai-command-r-plus-FP8-KV>`_
|
||||
|
||||
* - ``$num_gpu``
|
||||
- 1 or 8
|
||||
- Number of GPUs
|
||||
|
||||
* - ``$datatype``
|
||||
- ``float16`` or ``float8``
|
||||
- Data type
|
||||
|
||||
.. _vllm-benchmark-run-benchmark:
|
||||
|
||||
Running the benchmark on the MI300X accelerator
|
||||
-----------------------------------------------
|
||||
|
||||
Here are some examples of running the benchmark with various options.
|
||||
See :ref:`Options <vllm-benchmark-standalone-options>` for the list of
|
||||
options and their descriptions.
|
||||
|
||||
Example 1: latency benchmark
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to benchmark the latency of the Llama 3.1 70B model on eight GPUs with the ``float16`` and ``float8`` data types.
|
||||
|
||||
.. code-block::
|
||||
|
||||
./vllm_benchmark_report.sh -s latency -m meta-llama/Llama-3.1-70B-Instruct -g 8 -d float16
|
||||
./vllm_benchmark_report.sh -s latency -m amd/Llama-3.1-70B-Instruct-FP8-KV -g 8 -d float8
|
||||
|
||||
Find the latency reports at:
|
||||
|
||||
- ``./reports_float16/summary/Llama-3.1-70B-Instruct_latency_report.csv``
|
||||
|
||||
- ``./reports_float8/summary/Llama-3.1-70B-Instruct-FP8-KV_latency_report.csv``
|
||||
|
||||
Example 2: throughput benchmark
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to benchmark the throughput of the Llama 3.1 70B model on eight GPUs with the ``float16`` and ``float8`` data types.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
./vllm_benchmark_report.sh -s throughput -m meta-llama/Llama-3.1-70B-Instruct -g 8 -d float16
|
||||
./vllm_benchmark_report.sh -s throughput -m amd/Llama-3.1-70B-Instruct-FP8-KV -g 8 -d float8
|
||||
|
||||
Find the throughput reports at:
|
||||
|
||||
- ``./reports_float16/summary/Llama-3.1-70B-Instruct_throughput_report.csv``
|
||||
|
||||
- ``./reports_float8/summary/Llama-3.1-70B-Instruct-FP8-KV_throughput_report.csv``
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
mjx-container[jax="CHTML"][display="true"] {
|
||||
text-align: left;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
|
||||
.. note::
|
||||
|
||||
Throughput is calculated as:
|
||||
|
||||
- .. math:: throughput\_tot = requests \times (\mathsf{\text{input lengths}} + \mathsf{\text{output lengths}}) / elapsed\_time
|
||||
|
||||
- .. math:: throughput\_gen = requests \times \mathsf{\text{output lengths}} / elapsed\_time
|
||||
|
||||
Further reading
|
||||
===============
|
||||
|
||||
- For application performance optimization strategies for HPC and AI workloads,
|
||||
including inference with vLLM, see :doc:`../inference-optimization/workload`.
|
||||
|
||||
- To learn more about the options for latency and throughput benchmark scripts,
|
||||
see `<https://github.com/ROCm/vllm/tree/main/benchmarks>`_.
|
||||
|
||||
- To learn more about system settings and management practices to configure your system for
|
||||
MI300X accelerators, see :doc:`../../system-optimization/mi300x`.
|
||||
|
||||
- To learn how to run LLM models from Hugging Face or your own model, see
|
||||
:doc:`Running models from Hugging Face <hugging-face-models>`.
|
||||
|
||||
- To learn how to optimize inference on LLMs, see
|
||||
:doc:`Inference optimization <../inference-optimization/index>`.
|
||||
|
||||
- To learn how to fine-tune LLMs, see
|
||||
:doc:`Fine-tuning LLMs <../fine-tuning/index>`.
|
||||
|
||||
Previous versions
|
||||
=================
|
||||
|
||||
This table lists previous versions of the ROCm vLLM Docker image for inference
|
||||
performance validation. For detailed information about available models for
|
||||
benchmarking, see the version-specific documentation.
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
:stub-columns: 1
|
||||
|
||||
* - ROCm version
|
||||
- vLLM version
|
||||
- PyTorch version
|
||||
- Resources
|
||||
|
||||
* - 6.2.1
|
||||
- 0.6.4
|
||||
- 2.5.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.3.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4/images/sha256-ccbb74cc9e7adecb8f7bdab9555f7ac6fc73adb580836c2a35ca96ff471890d8>`_
|
||||
|
||||
* - 6.2.0
|
||||
- 0.4.3
|
||||
- 2.4.0
|
||||
-
|
||||
* `Documentation <https://rocm.docs.amd.com/en/docs-6.2.0/how-to/performance-validation/mi300x/vllm-benchmark.html>`_
|
||||
* `Docker Hub <https://hub.docker.com/layers/rocm/vllm/rocm6.2_mi300_ubuntu22.04_py3.9_vllm_7c5fd50/images/sha256-9e4dd4788a794c3d346d7d0ba452ae5e92d39b8dfac438b2af8efdc7f15d22c0>`_
|
||||
21
docs/how-to/rocm-for-ai/training/index.rst
Normal file
21
docs/how-to/rocm-for-ai/training/index.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for training models
|
||||
:keywords: ROCm, LLM, training, GPUs, training model, scaling model, usage, tutorial
|
||||
|
||||
=======================
|
||||
Use ROCm for training
|
||||
=======================
|
||||
|
||||
Training models is the process of teaching a computer program to recognize patterns in data. This involves providing the computer with large amounts of labeled data and allowing it to learn from that data, adjusting the model's parameters.
|
||||
|
||||
The process of training models is computationally intensive, requiring specialized hardware like GPUs to accelerate computations and reduce training time. Training models on AMD GPUs involves leveraging the parallel processing capabilities of these GPUs to significantly speed up the model training process in machine learning and deep learning tasks.
|
||||
|
||||
Training models on AMD GPUs with the ROCm™ software platform allows you to use the powerful parallel processing capabilities and efficient compute resource management, significantly improving training time and overall performance in machine learning applications.
|
||||
|
||||
The ROCm software platform makes it easier to train models on AMD GPUs while maintaining compatibility with existing code and tools. The platform also provides features like multi-GPU support, allowing for scaling and parallelization of model training across multiple GPUs to enhance performance.
|
||||
|
||||
In this guide, you'll learn about:
|
||||
|
||||
- :doc:`Training a model <train-a-model>`
|
||||
|
||||
- :doc:`Scale model training <scale-model-training>`
|
||||
@@ -105,7 +105,7 @@ Fine-tuning your model
|
||||
ROCm supports multiple techniques for :ref:`optimizing fine-tuning <fine-tuning-llms-concept-optimizations>`, for
|
||||
example, LoRA, QLoRA, PEFT, and FSDP.
|
||||
|
||||
Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`.
|
||||
Learn more about challenges and solutions for model fine-tuning in :doc:`../fine-tuning/index`.
|
||||
|
||||
The following developer blogs showcase examples of fine-tuning a model on an AMD accelerator or GPU.
|
||||
|
||||
@@ -132,4 +132,4 @@ The following developer blogs showcase examples of fine-tuning a model on an AMD
|
||||
* Recipes for fine-tuning Llama2 and 3 with ``llama-recipes``
|
||||
|
||||
* `meta-llama/llama-recipes: Scripts for fine-tuning Meta Llama3 with composable FSDP & PEFT methods to cover
|
||||
single/multi-node GPUs <https://github.com/meta-llama/llama-recipes/tree/main/recipes/quickstart/finetuning>`_
|
||||
single/multi-node GPUs <https://github.com/meta-llama/llama-cookbook/tree/main/getting-started/finetuning>`_
|
||||
@@ -164,7 +164,7 @@ Run on 8 GPUs (``-g 8``), scanning from 8 bytes to 10 GB:
|
||||
|
||||
./build/all_reduce_perf -b 8 -e 10G -f 2 -g 8
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-8-gpu.png
|
||||
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-8-gpu.png
|
||||
:width: 800
|
||||
|
||||
Using one MPI process per GPU and ``-g 1`` for performance-oriented runs on both single-node and multi-node is
|
||||
@@ -174,7 +174,7 @@ recommended. So, a run on 8 GPUs looks something like:
|
||||
|
||||
mpirun -np 8 --bind-to numa ./build/all_reduce_perf -b 8 -e 10G -f 2 -g 1
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png
|
||||
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png
|
||||
:width: 800
|
||||
|
||||
Running with one MPI process per GPU ensures a one-to-one mapping for CPUs and GPUs, which can be beneficial
|
||||
@@ -195,7 +195,7 @@ Use the following script to run the RCCL test for four MI300X GPU nodes. Modify
|
||||
-x NCCL_DEBUG=version \
|
||||
$HOME/rccl-tests/build/all_reduce_perf -b 8 -e 8g -f 2 -g 1
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png
|
||||
.. image:: ../../../data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png
|
||||
:width: 800
|
||||
|
||||
.. _mi300x-amd-megatron-lm-training:
|
||||
@@ -264,7 +264,7 @@ end-of-document token, remove sentence splitting, and use the tokenizer type.
|
||||
In this case, the automatically generated output files are named ``my-gpt2_text_document.bin`` and
|
||||
``my-gpt2_text_document.idx``.
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png
|
||||
.. image:: ../../../data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png
|
||||
:width: 800
|
||||
|
||||
.. _amd-megatron-lm-environment-setup:
|
||||
@@ -462,7 +462,7 @@ Benchmarking examples
|
||||
|
||||
See the sample output:
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png
|
||||
.. image:: ../../../data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png
|
||||
:width: 800
|
||||
|
||||
.. tab-item:: Multi node training
|
||||
@@ -493,11 +493,11 @@ Benchmarking examples
|
||||
|
||||
Master node:
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/2-node-training-master.png
|
||||
.. image:: ../../../data/how-to/rocm-for-ai/2-node-training-master.png
|
||||
:width: 800
|
||||
|
||||
Worker node:
|
||||
|
||||
.. image:: ../../data/how-to/rocm-for-ai/2-node-training-worker.png
|
||||
.. image:: ../../../data/how-to/rocm-for-ai/2-node-training-worker.png
|
||||
:width: 800
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.. meta::
|
||||
:description: How to use ROCm for HPC
|
||||
:keywords: ROCm, AI, high performance computing, HPC
|
||||
:description: How to use ROCm for high-performance computing (HPC).
|
||||
:keywords: ROCm, AI, high performance computing, HPC, science, scientific
|
||||
|
||||
******************
|
||||
Using ROCm for HPC
|
||||
@@ -115,6 +115,12 @@ Ubuntu versions.
|
||||
for non-destructive testing or for ocean acoustics.
|
||||
|
||||
* - Molecular dynamics
|
||||
- `Amber <https://github.com/amd/InfinityHub-CI/tree/main/amber>`_
|
||||
- Amber is a suite of biomolecular simulation programs. It is a set of molecular mechanical force fields for
|
||||
simulating biomolecules. Amber is also a package of molecular simulation
|
||||
programs which includes source code and demos.
|
||||
|
||||
* -
|
||||
- `GROMACS with HIP (AMD implementation) <https://github.com/amd/InfinityHub-CI/tree/main/gromacs>`_
|
||||
- GROMACS is a versatile package to perform molecular dynamics, i.e.
|
||||
simulate the Newtonian equations of motion for systems with hundreds
|
||||
@@ -129,6 +135,13 @@ Ubuntu versions.
|
||||
Parallel Simulator.
|
||||
|
||||
* - Computational fluid dynamics
|
||||
- `Ansys Fluent <https://github.com/amd/InfinityHub-CI/tree/main/ansys-fluent>`_
|
||||
- Ansys Fluent is an advanced computational fluid dynamics (CFD) tool for
|
||||
simulating and analyzing fluid flow, heat transfer, and related phenomena in complex systems.
|
||||
It offers a range of powerful features for detailed and accurate modeling of various physical
|
||||
processes, including turbulence, chemical reactions, and multiphase flows.
|
||||
|
||||
* -
|
||||
- `NEKO <https://github.com/amd/InfinityHub-CI/tree/main/neko>`_
|
||||
- Neko is a portable framework for high-order spectral element flow simulations.
|
||||
Written in modern Fortran, Neko adopts an object-oriented approach, allowing
|
||||
@@ -141,6 +154,26 @@ Ubuntu versions.
|
||||
- nekRS is an open-source Navier Stokes solver based on the spectral element
|
||||
method targeting classical processors and accelerators like GPUs.
|
||||
|
||||
* -
|
||||
- `OpenFOAM <https://github.com/amd/InfinityHub-CI/tree/main/openfoam>`_
|
||||
- OpenFOAM is a free, open-source computational fluid dynamics (CFD)
|
||||
tool developed primarily by OpenCFD Ltd. It has a large user
|
||||
base across most areas of engineering and science, from both commercial and
|
||||
academic organizations. OpenFOAM has extensive features to solve
|
||||
anything from complex fluid flows involving chemical reactions, turbulence, and
|
||||
heat transfer, to acoustics, solid mechanics, and electromagnetics.
|
||||
|
||||
* -
|
||||
- `PeleC <https://github.com/amd/InfinityHub-CI/tree/main/pelec>`_
|
||||
- PeleC is an adaptive mesh refinement(AMR) solver for compressible reacting flows.
|
||||
|
||||
* -
|
||||
- `Simcenter Star-CCM+ <https://github.com/amd/InfinityHub-CI/tree/main/siemens-star-ccm>`_
|
||||
- Simcenter Star-CCM+ is a comprehensive computational fluid dynamics (CFD) and multiphysics
|
||||
simulation tool developed by Siemens Digital Industries Software. It is designed to
|
||||
help engineers and researchers analyze and optimize the performance of products and
|
||||
systems across various industries.
|
||||
|
||||
* - Computational chemistry
|
||||
- `QUDA <https://github.com/amd/InfinityHub-CI/tree/main/quda>`_
|
||||
- Library designed for efficient lattice QCD computations on
|
||||
@@ -170,12 +203,30 @@ Ubuntu versions.
|
||||
developing atmosphere, ocean, and other earth-system simulation components
|
||||
for use in climate, regional climate, and weather studies.
|
||||
|
||||
* - Energy, Oil, and Gas
|
||||
- `DevitoPRO <https://github.com/amd/InfinityHub-CI/tree/main/devitopro>`_
|
||||
- DevitoPRO is an advanced extension of the open-source Devito platform with added
|
||||
features tailored for high-demand production workflows. It supports
|
||||
high-performance computing (HPC) needs, especially in seismic imaging and inversion.
|
||||
It is used to perform optimized finite difference (FD) computations
|
||||
from high-level symbolic problem definitions. DevitoPro performs automated
|
||||
code generation and Just-In-time (JIT) compilation based on symbolic equations
|
||||
defined in SymPy to create and execute highly optimized Finite Difference stencil
|
||||
kernels on multiple computer platforms.
|
||||
|
||||
* -
|
||||
- `ECHELON <https://github.com/amd/InfinityHub-CI/tree/main/srt-echelon>`_
|
||||
- ECHELON by Stone Ridge Technology is a reservoir simulation tool. With
|
||||
fast processing, it retains precise accuracy and preserves legacy simulator results.
|
||||
Faster reservoir simulation enables reservoir engineers to produce many realizations,
|
||||
address larger models, and use advanced physics. It opens new workflows based on
|
||||
ensemble methodologies for history matching and forecasting that yield
|
||||
increased accuracy and more predictive results.
|
||||
|
||||
* - Benchmark
|
||||
- `rocHPL <https://github.com/amd/InfinityHub-CI/tree/main/rochpl>`_
|
||||
- HPL, or High-Performance Linpack, is a benchmark which solves a uniformly
|
||||
random system of linear equations and reports floating-point execution rate.
|
||||
This documentation supports the implementation of the HPL benchmark on
|
||||
top of AMD's ROCm platform.
|
||||
- HPL, or High-Performance Linpack, is a benchmark which solves a uniformly
|
||||
random system of linear equations and reports floating-point execution rate.
|
||||
|
||||
* -
|
||||
- `rocHPL-MxP <https://github.com/amd/InfinityHub-CI/tree/main/hpl-mxp>`_
|
||||
@@ -216,6 +267,14 @@ Ubuntu versions.
|
||||
range of hardware platforms via use of an in-built domain specific language derived
|
||||
from the Mako templating engine.
|
||||
|
||||
* -
|
||||
- `PETSc <https://github.com/amd/InfinityHub-CI/tree/main/petsc>`_
|
||||
- Portable, Extensible Toolkit for Scientific Computation (PETSc) is a suite of data structures
|
||||
and routines for the scalable (parallel) solution of scientific applications modeled by partial
|
||||
differential equations. It supports MPI, GPUs through CUDA, HIP, and OpenCL,
|
||||
as well as hybrid MPI-GPU parallelism. It also supports the NEC-SX Tsubasa Vector Engine.
|
||||
PETSc also includes the Toolkit for Advanced Optimization (TAO) library.
|
||||
|
||||
* -
|
||||
- `RAJA <https://github.com/amd/InfinityHub-CI/tree/main/raja>`_
|
||||
- RAJA is a library of C++ software abstractions, primarily developed at Lawrence
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: AMD hardware optimization for specific workloads
|
||||
:description: Learn about AMD hardware optimization for HPC-specific and workstation workloads.
|
||||
:keywords: high-performance computing, HPC, Instinct accelerators, Radeon,
|
||||
tuning, tuning guide, AMD, ROCm
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="MI100 high-performance computing and tuning guide">
|
||||
<meta name="keywords" content="MI100, high-performance computing, HPC, BIOS
|
||||
settings, NBIO, AMD, ROCm">
|
||||
</head>
|
||||
---
|
||||
myst:
|
||||
html_meta:
|
||||
"description": "AMD Instinct MI100 system settings optimization guide."
|
||||
"keywords": "Instinct, MI100, microarchitecture, AMD, ROCm"
|
||||
---
|
||||
|
||||
# AMD Instinct MI100 system optimization
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="MI200 high-performance computing and tuning guide">
|
||||
<meta name="keywords" content="MI200, high-performance computing, HPC, BIOS
|
||||
settings, NBIO, AMD, ROCm">
|
||||
</head>
|
||||
---
|
||||
myst:
|
||||
html_meta:
|
||||
"description": "Learn about AMD Instinct MI200 system settings and performance tuning."
|
||||
"keywords": "Instinct, MI200, microarchitecture, AMD, ROCm"
|
||||
---
|
||||
|
||||
# AMD Instinct MI200 system optimization
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: AMD Instinct MI300A system settings
|
||||
:description: Learn about AMD Instinct MI300A system settings and performance tuning.
|
||||
:keywords: AMD, Instinct, MI300A, HPC, tuning, BIOS settings, NBIO, ROCm,
|
||||
environment variable, performance, accelerator, GPU, EPYC, GRUB,
|
||||
operating system
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.. meta::
|
||||
:description: AMD Instinct MI300X system settings
|
||||
:description: Learn about AMD Instinct MI300X system settings and performance tuning.
|
||||
:keywords: AMD, Instinct, MI300X, HPC, tuning, BIOS settings, NBIO, ROCm,
|
||||
environment variable, performance, accelerator, GPU, EPYC, GRUB,
|
||||
operating system
|
||||
@@ -35,7 +35,7 @@ functioning correctly before trying to improve its overall performance. In this
|
||||
section, the settings discussed mostly ensure proper functionality of your
|
||||
Instinct-based system. Some settings discussed are known to improve performance
|
||||
for most applications running on a MI300X system. See
|
||||
:doc:`/how-to/tuning-guides/mi300x/workload` for how to improve performance for
|
||||
:doc:`../rocm-for-ai/inference-optimization/workload` for how to improve performance for
|
||||
specific applications or workloads.
|
||||
|
||||
.. _mi300x-bios-settings:
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="description" content="RDNA2 workstation tuning guide">
|
||||
<meta name="keywords" content="RDNA2, workstation, BIOS settings, installation, AMD,
|
||||
ROCm">
|
||||
</head>
|
||||
---
|
||||
myst:
|
||||
html_meta:
|
||||
"description": "Learn about system settings and performance tuning for RDNA2-based GPUs."
|
||||
"keywords": "RDNA2, workstation, desktop, BIOS, installation, Radeon, pro, v620, w6000"
|
||||
---
|
||||
|
||||
# AMD RDNA2 system optimization
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
This chapter reviews system settings that are required to configure the system
|
||||
for ROCm virtualization on RDNA2-based AMD Radeon™ PRO GPUs. Installing ROCm on
|
||||
Bare Metal follows the routine ROCm
|
||||
{doc}`installation procedure<rocm-install-on-linux:install/native-install/index>`.
|
||||
{doc}`installation procedure<rocm-install-on-linux:install/install-methods/package-manager-index>`.
|
||||
|
||||
To enable ROCm virtualization on V620, one has to setup Single Root I/O
|
||||
Virtualization (SR-IOV) in the BIOS via setting found in the following
|
||||
@@ -166,4 +166,4 @@ First, assign GPU virtual function (VF) to VM using the following steps.
|
||||
Then start the VM.
|
||||
|
||||
Finally install ROCm on the virtual machine (VM). For detailed instructions,
|
||||
refer to the {doc}`Linux install guide<rocm-install-on-linux:install/native-install/index>`.
|
||||
refer to the {doc}`Linux install guide<rocm-install-on-linux:install/install-methods/package-manager-index>`.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
.. meta::
|
||||
:description: How to configure MI300X accelerators to fully leverage their capabilities and achieve optimal performance.
|
||||
:keywords: ROCm, AI, machine learning, MI300X, LLM, usage, tutorial, optimization, tuning
|
||||
|
||||
************************
|
||||
AMD MI300X tuning guides
|
||||
************************
|
||||
@@ -8,8 +12,8 @@ accelerators. They include detailed instructions on system settings and
|
||||
application tuning suggestions to help you fully leverage the capabilities of
|
||||
these accelerators, thereby achieving optimal performance.
|
||||
|
||||
* :doc:`/how-to/performance-validation/mi300x/vllm-benchmark`
|
||||
* :doc:`../../rocm-for-ai/inference/vllm-benchmark`
|
||||
|
||||
* :doc:`/how-to/tuning-guides/mi300x/system`
|
||||
* :doc:`../../system-optimization/mi300x`
|
||||
|
||||
* :doc:`/how-to/tuning-guides/mi300x/workload`
|
||||
* :doc:`../../rocm-for-ai/inference-optimization/workload`
|
||||
|
||||
@@ -39,12 +39,9 @@ ROCm documentation is organized into the following categories:
|
||||
|
||||
* [Use ROCm for AI](./how-to/rocm-for-ai/index.rst)
|
||||
* [Use ROCm for HPC](./how-to/rocm-for-hpc/index.rst)
|
||||
* [Fine-tune LLMs and inference optimization](./how-to/llm-fine-tuning-optimization/index.rst)
|
||||
* [System optimization](./how-to/system-optimization/index.rst)
|
||||
* [AMD Instinct MI300X performance validation and tuning](./how-to/tuning-guides/mi300x/index.rst)
|
||||
* [GPU cluster networking](https://rocm.docs.amd.com/projects/gpu-cluster-networking/en/latest/index.html)
|
||||
* [System debugging](./how-to/system-debugging.md)
|
||||
* [Use MPI](./how-to/gpu-enabled-mpi.rst)
|
||||
* [Use advanced compiler features](./conceptual/compiler-topics.md)
|
||||
* [Set the number of CUs](./how-to/setting-cus)
|
||||
* [Troubleshoot BAR access limitation](./how-to/Bar-Memory.rst)
|
||||
|
||||
@@ -36,40 +36,62 @@ subtrees:
|
||||
title: Use ROCm for AI
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/rocm-for-ai/install.rst
|
||||
title: Installation
|
||||
- file: how-to/rocm-for-ai/train-a-model.rst
|
||||
title: Train a model
|
||||
- file: how-to/rocm-for-ai/scale-model-training.rst
|
||||
title: Scale model training
|
||||
- file: how-to/rocm-for-ai/hugging-face-models.rst
|
||||
title: Run models from Hugging Face
|
||||
- file: how-to/rocm-for-ai/deploy-your-model.rst
|
||||
title: Deploy your model
|
||||
- file: how-to/rocm-for-hpc/index.rst
|
||||
title: Use ROCm for HPC
|
||||
- file: how-to/llm-fine-tuning-optimization/index.rst
|
||||
title: Fine-tune LLMs and inference optimization
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/llm-fine-tuning-optimization/overview.rst
|
||||
title: Conceptual overview
|
||||
- file: how-to/llm-fine-tuning-optimization/fine-tuning-and-inference.rst
|
||||
- file: how-to/rocm-for-ai/training/index.rst
|
||||
title: Training
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference.rst
|
||||
title: Use a single accelerator
|
||||
- file: how-to/llm-fine-tuning-optimization/multi-gpu-fine-tuning-and-inference.rst
|
||||
title: Use multiple accelerators
|
||||
- file: how-to/llm-fine-tuning-optimization/model-quantization.rst
|
||||
- file: how-to/llm-fine-tuning-optimization/model-acceleration-libraries.rst
|
||||
- file: how-to/llm-fine-tuning-optimization/llm-inference-frameworks.rst
|
||||
- file: how-to/llm-fine-tuning-optimization/optimizing-with-composable-kernel.md
|
||||
title: Optimize with Composable Kernel
|
||||
- file: how-to/llm-fine-tuning-optimization/optimizing-triton-kernel.rst
|
||||
title: Optimize Triton kernels
|
||||
- file: how-to/llm-fine-tuning-optimization/profiling-and-debugging.rst
|
||||
title: Profile and debug
|
||||
- file: how-to/rocm-for-ai/training/train-a-model.rst
|
||||
title: Train a model
|
||||
- file: how-to/rocm-for-ai/training/scale-model-training.rst
|
||||
title: Scale model training
|
||||
|
||||
- file: how-to/rocm-for-ai/fine-tuning/index.rst
|
||||
title: Fine-tuning LLMs
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/rocm-for-ai/fine-tuning/overview.rst
|
||||
title: Conceptual overview
|
||||
- file: how-to/rocm-for-ai/fine-tuning/fine-tuning-and-inference.rst
|
||||
title: Fine-tuning
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/rocm-for-ai/fine-tuning/single-gpu-fine-tuning-and-inference.rst
|
||||
title: Use a single accelerator
|
||||
- file: how-to/rocm-for-ai/fine-tuning/multi-gpu-fine-tuning-and-inference.rst
|
||||
title: Use multiple accelerators
|
||||
|
||||
- file: how-to/rocm-for-ai/inference/index.rst
|
||||
title: Inference
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/rocm-for-ai/inference/install.rst
|
||||
title: Installation
|
||||
- file: how-to/rocm-for-ai/inference/hugging-face-models.rst
|
||||
title: Run models from Hugging Face
|
||||
- file: how-to/rocm-for-ai/inference/llm-inference-frameworks.rst
|
||||
title: LLM inference frameworks
|
||||
- file: how-to/rocm-for-ai/inference/vllm-benchmark.rst
|
||||
title: Performance validation
|
||||
- file: how-to/rocm-for-ai/inference/deploy-your-model.rst
|
||||
title: Deploy your model
|
||||
|
||||
- file: how-to/rocm-for-ai/inference-optimization/index.rst
|
||||
title: Inference optimization
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/rocm-for-ai/inference-optimization/model-quantization.rst
|
||||
- file: how-to/rocm-for-ai/inference-optimization/model-acceleration-libraries.rst
|
||||
- file: how-to/rocm-for-ai/inference-optimization/optimizing-with-composable-kernel.md
|
||||
title: Optimize with Composable Kernel
|
||||
- file: how-to/rocm-for-ai/inference-optimization/optimizing-triton-kernel.rst
|
||||
title: Optimize Triton kernels
|
||||
- file: how-to/rocm-for-ai/inference-optimization/profiling-and-debugging.rst
|
||||
title: Profile and debug
|
||||
- file: how-to/rocm-for-ai/inference-optimization/workload.rst
|
||||
title: Workload tuning
|
||||
|
||||
- file: how-to/rocm-for-hpc/index.rst
|
||||
title: Use ROCm for HPC
|
||||
- file: how-to/system-optimization/index.rst
|
||||
title: System optimization
|
||||
subtrees:
|
||||
@@ -86,18 +108,6 @@ subtrees:
|
||||
title: AMD RDNA 2
|
||||
- file: how-to/tuning-guides/mi300x/index.rst
|
||||
title: AMD MI300X performance validation and tuning
|
||||
subtrees:
|
||||
- entries:
|
||||
- file: how-to/performance-validation/mi300x/vllm-benchmark.rst
|
||||
title: Performance validation
|
||||
- file: how-to/tuning-guides/mi300x/system.rst
|
||||
title: System tuning
|
||||
- file: how-to/tuning-guides/mi300x/workload.rst
|
||||
title: Workload tuning
|
||||
- url: https://rocm.docs.amd.com/projects/gpu-cluster-networking/en/${branch}/index.html
|
||||
title: GPU cluster networking
|
||||
- file: how-to/gpu-enabled-mpi.rst
|
||||
title: Use MPI
|
||||
- file: how-to/system-debugging.md
|
||||
- file: conceptual/compiler-topics.md
|
||||
title: Use advanced compiler features
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
rocm-docs-core==1.11.0
|
||||
rocm-docs-core==1.13.0
|
||||
sphinx-reredirects
|
||||
sphinx-sitemap
|
||||
|
||||
@@ -16,17 +16,17 @@ beautifulsoup4==4.12.3
|
||||
# via pydata-sphinx-theme
|
||||
breathe==4.35.0
|
||||
# via rocm-docs-core
|
||||
certifi==2024.8.30
|
||||
certifi==2024.12.14
|
||||
# via requests
|
||||
cffi==1.17.1
|
||||
# via
|
||||
# cryptography
|
||||
# pynacl
|
||||
charset-normalizer==3.4.0
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
click==8.1.7
|
||||
click==8.1.8
|
||||
# via sphinx-external-toc
|
||||
cryptography==43.0.3
|
||||
cryptography==44.0.0
|
||||
# via pyjwt
|
||||
deprecated==1.2.15
|
||||
# via pygithub
|
||||
@@ -36,17 +36,17 @@ docutils==0.21.2
|
||||
# myst-parser
|
||||
# pydata-sphinx-theme
|
||||
# sphinx
|
||||
fastjsonschema==2.20.0
|
||||
fastjsonschema==2.21.1
|
||||
# via rocm-docs-core
|
||||
gitdb==4.0.11
|
||||
gitdb==4.0.12
|
||||
# via gitpython
|
||||
gitpython==3.1.43
|
||||
gitpython==3.1.44
|
||||
# via rocm-docs-core
|
||||
idna==3.10
|
||||
# via requests
|
||||
imagesize==1.4.1
|
||||
# via sphinx
|
||||
jinja2==3.1.4
|
||||
jinja2==3.1.5
|
||||
# via
|
||||
# myst-parser
|
||||
# sphinx
|
||||
@@ -66,18 +66,18 @@ packaging==24.2
|
||||
# via sphinx
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pydata-sphinx-theme==0.16.0
|
||||
pydata-sphinx-theme==0.16.1
|
||||
# via
|
||||
# rocm-docs-core
|
||||
# sphinx-book-theme
|
||||
pygithub==2.5.0
|
||||
# via rocm-docs-core
|
||||
pygments==2.18.0
|
||||
pygments==2.19.1
|
||||
# via
|
||||
# accessible-pygments
|
||||
# pydata-sphinx-theme
|
||||
# sphinx
|
||||
pyjwt[crypto]==2.10.0
|
||||
pyjwt[crypto]==2.10.1
|
||||
# via pygithub
|
||||
pynacl==1.5.0
|
||||
# via pygithub
|
||||
@@ -90,9 +90,9 @@ requests==2.32.3
|
||||
# via
|
||||
# pygithub
|
||||
# sphinx
|
||||
rocm-docs-core==1.11.0
|
||||
rocm-docs-core==1.13.0
|
||||
# via -r requirements.in
|
||||
smmap==5.0.1
|
||||
smmap==5.0.2
|
||||
# via gitdb
|
||||
snowballstemmer==2.2.0
|
||||
# via sphinx
|
||||
@@ -137,15 +137,15 @@ sphinxcontrib-qthelp==2.0.0
|
||||
# via sphinx
|
||||
sphinxcontrib-serializinghtml==2.0.0
|
||||
# via sphinx
|
||||
tomli==2.1.0
|
||||
tomli==2.2.1
|
||||
# via sphinx
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# pydata-sphinx-theme
|
||||
# pygithub
|
||||
urllib3==2.2.3
|
||||
urllib3==2.3.0
|
||||
# via
|
||||
# pygithub
|
||||
# requests
|
||||
wrapt==1.17.0
|
||||
wrapt==1.17.1
|
||||
# via deprecated
|
||||
|
||||
@@ -10,7 +10,7 @@ ROCm is a software stack, composed primarily of open-source software, that
|
||||
provides the tools for programming AMD Graphics Processing Units (GPUs), from
|
||||
low-level kernels to high-level end-user applications.
|
||||
|
||||
.. image:: data/rocm-software-stack-6_3_1.jpg
|
||||
.. image:: data/rocm-software-stack-6_3_2.jpg
|
||||
:width: 800
|
||||
:alt: AMD's ROCm software stack and enabling technologies.
|
||||
:align: center
|
||||
|
||||
@@ -68,85 +68,6 @@ set_address_sanitizer_off() {
|
||||
export LDFLAGS=""
|
||||
}
|
||||
|
||||
build_miopen_ckProf() {
|
||||
ENABLE_ADDRESS_SANITIZER=false
|
||||
echo "Start Building Composable Kernel Profiler"
|
||||
if [ "${ENABLE_ADDRESS_SANITIZER}" == "true" ]; then
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on
|
||||
else
|
||||
unset_asan_env_vars
|
||||
set_address_sanitizer_off
|
||||
fi
|
||||
|
||||
cd $COMPONENT_SRC
|
||||
cd "$BUILD_DIR"
|
||||
rm -rf *
|
||||
|
||||
architectures='gfx10 gfx11 gfx90 gfx94'
|
||||
if [ -n "$GPU_ARCHS" ]; then
|
||||
architectures=$(echo ${GPU_ARCHS} | awk -F';' '{for(i=1;i<=NF;i++) a[substr($i,1,5)]} END{for(i in a) printf i" "}')
|
||||
fi
|
||||
|
||||
for arch in ${architectures}
|
||||
do
|
||||
if [ "${ASAN_CMAKE_PARAMS}" == "true" ] ; then
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH%-*}/lib/cmake;${ROCM_PATH%-*}/$ASAN_LIBDIR;${ROCM_PATH%-*}/llvm;${ROCM_PATH%-*}" \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE:-'RelWithDebInfo'} \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_LIB_RPATH" \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT="-Wl,--enable-new-dtags,--rpath,$ROCM_ASAN_EXE_RPATH" \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH="${ROCM_PATH}" \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DPROFILER_ONLY=ON \
|
||||
-DENABLE_ASAN_PACKAGING=true \
|
||||
-DGPU_ARCH="${arch}" \
|
||||
"$COMPONENT_SRC"
|
||||
else
|
||||
cmake -DBUILD_DEV=OFF \
|
||||
-DCMAKE_PREFIX_PATH="${ROCM_PATH%-*}" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_SHARED_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN' \
|
||||
-DCMAKE_EXE_LINKER_FLAGS_INIT='-Wl,--enable-new-dtags,--rpath,$ORIGIN/../lib' \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||
-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE \
|
||||
-DCMAKE_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DCMAKE_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DBUILD_FILE_REORG_BACKWARD_COMPATIBILITY=OFF \
|
||||
-DROCM_SYMLINK_LIBS=OFF \
|
||||
-DCPACK_PACKAGING_INSTALL_PREFIX="${ROCM_PATH}" \
|
||||
-DROCM_DISABLE_LDCONFIG=ON \
|
||||
-DROCM_PATH="${ROCM_PATH}" \
|
||||
-DCPACK_GENERATOR="${PKGTYPE^^}" \
|
||||
-DCMAKE_CXX_COMPILER="${ROCM_PATH}/llvm/bin/clang++" \
|
||||
-DCMAKE_C_COMPILER="${ROCM_PATH}/llvm/bin/clang" \
|
||||
${LAUNCHER_FLAGS} \
|
||||
-DPROFILER_ONLY=ON \
|
||||
-DGPU_ARCH="${arch}" \
|
||||
"$COMPONENT_SRC"
|
||||
fi
|
||||
|
||||
cmake --build . -- -j${PROC} package
|
||||
cp ./*ckprofiler*.${PKGTYPE} $PACKAGE_DIR
|
||||
rm -rf *
|
||||
done
|
||||
rm -rf _CPack_Packages/ && find -name '*.o' -delete
|
||||
|
||||
echo "Finished building Composable Kernel"
|
||||
show_build_cache_stats
|
||||
}
|
||||
|
||||
clean_miopen_ck() {
|
||||
echo "Cleaning MIOpen-CK build directory: ${BUILD_DIR} ${PACKAGE_DIR}"
|
||||
rm -rf "$BUILD_DIR" "$PACKAGE_DIR"
|
||||
|
||||
@@ -42,7 +42,6 @@ DEB_PATH="$(getDebPath $PROJ_NAME)"
|
||||
RPM_PATH="$(getRpmPath $PROJ_NAME)"
|
||||
INSTALL_PATH="${ROCM_INSTALL_PATH}/lib/llvm"
|
||||
LLVM_ROOT_LCL="${LLVM_ROOT}"
|
||||
ROCM_WHEEL_DIR="${BUILD_PATH}/_wheel"
|
||||
|
||||
TARGET="all"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
@@ -150,7 +149,6 @@ ENABLE_RUNTIMES="$ENABLE_RUNTIMES;libcxx;libcxxabi"
|
||||
BOOTSTRAPPING_BUILD_LIBCXX=1
|
||||
|
||||
clean_lightning() {
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf "$BUILD_PATH"
|
||||
rm -rf "$DEB_PATH"
|
||||
rm -rf "$RPM_PATH"
|
||||
@@ -332,15 +330,6 @@ build_lightning() {
|
||||
echo "End Workaround for race condition"
|
||||
cmake --build . -- $MAKEOPTS
|
||||
|
||||
case "$DISTRO_ID" in
|
||||
(rhel*|centos*)
|
||||
RHEL_BUILD=1
|
||||
;;
|
||||
(*)
|
||||
RHEL_BUILD=0
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ $SKIP_LIT_TESTS -eq 0 ]; then
|
||||
if [ $RHEL_BUILD -eq 1 ]; then
|
||||
cmake --build . -- $MAKEOPTS check-lld check-mlir
|
||||
@@ -1158,9 +1147,4 @@ case $TARGET in
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -w, --wheel Creates python wheel package of omniperf.
|
||||
It needs to be used along with -r option"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="omniperf"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
ROCM_WHEEL_DIR="${BUILD_DIR}/_wheel"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="$DASH_JAY -C $BUILD_DIR"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0;
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
WHEEL_PACKAGE=false
|
||||
|
||||
|
||||
#parse the arguments
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,static,address_sanitizer,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage ; exit 0;;
|
||||
-c | --clean)
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="Release" ; shift ;;
|
||||
-a | --address_sanitizer)
|
||||
set_asan_env_vars
|
||||
set_address_sanitizer_on ; shift ;;
|
||||
-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2" ; shift 2 ;;
|
||||
-w | --wheel)
|
||||
WHEEL_PACKAGE=true ; shift ;;
|
||||
--) shift; break;; # end delimiter
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars "$API_NAME" "$TARGET" "$BUILD_TYPE" "$SHARED_LIBS" "$CLEAN_OR_OUT" "$PKGTYPE" "$MAKETARGET"
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build() {
|
||||
echo "Building $PROJ_NAME"
|
||||
if [ "$DISTRO_ID" = centos-7 ]; then
|
||||
echo "Skip make and uploading packages for Omniperf on Centos7 distro, due to python dependency"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
|
||||
print_lib_type $SHARED_LIBS
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DCHECK_PYTHON_DEPS=NO \
|
||||
-DPYTHON_DEPS=${BUILD_DIR}/python-libs \
|
||||
-DMOD_INSTALL_PATH=${BUILD_DIR}/modulefiles \
|
||||
"$OMNIPERF_ROOT"
|
||||
fi
|
||||
|
||||
make $MAKE_OPTS
|
||||
make $MAKE_OPTS install
|
||||
make $MAKE_OPTS package
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
echo "Creating Omniperf wheel package"
|
||||
|
||||
# Copy the setup.py generator to build folder
|
||||
mkdir -p "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/generate_setup_py.py "$ROCM_WHEEL_DIR"
|
||||
cp -f "$SCRIPT_ROOT"/repackage_wheel.sh "$ROCM_WHEEL_DIR"
|
||||
cd "$ROCM_WHEEL_DIR" || exit
|
||||
|
||||
# Currently only supports python3.6
|
||||
./repackage_wheel.sh "$BUILD_DIR"/*.rpm python3.6
|
||||
|
||||
# Copy the wheel created to RPM folder which will be uploaded to artifactory
|
||||
copy_if WHL "WHL" "$PACKAGE_RPM" "$ROCM_WHEEL_DIR"/dist/*.whl
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo "${PACKAGE_DEB}";;
|
||||
("rpm")
|
||||
echo "${PACKAGE_RPM}";;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
(clean) clean ;;
|
||||
(build) build ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -1,191 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: ${BASH_SOURCE##*/} [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -s, --static Build static lib (.a). build instead of dynamic/shared(.so) "
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -a, --address_sanitizer Enable address sanitizer"
|
||||
echo " -o, --outdir <pkg_type> Print path of output directory containing packages of
|
||||
type referred to by pkg_type"
|
||||
echo " -w, --wheel Creates python wheel package of omnitrace.
|
||||
It needs to be used along with -r option"
|
||||
echo " -h, --help Prints this help"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
API_NAME="omnitrace"
|
||||
PROJ_NAME="$API_NAME"
|
||||
LIB_NAME="lib${API_NAME}"
|
||||
TARGET="build"
|
||||
MAKETARGET="deb"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_LIB="$(getLibPath)"
|
||||
BUILD_DIR="$(getBuildPath $API_NAME)"
|
||||
PACKAGE_DEB="$(getPackageRoot)/deb/$API_NAME"
|
||||
PACKAGE_RPM="$(getPackageRoot)/rpm/$API_NAME"
|
||||
BUILD_TYPE="Debug"
|
||||
MAKE_OPTS="-j 8"
|
||||
SHARED_LIBS="ON"
|
||||
CLEAN_OR_OUT=0
|
||||
MAKETARGET="deb"
|
||||
PKGTYPE="deb"
|
||||
ASAN=0
|
||||
|
||||
#parse the arguments
|
||||
VALID_STR=$(getopt -o hcraso:p:w --long help,clean,release,address_sanitizer,static,outdir:,package:,wheel -- "$@")
|
||||
eval set -- "$VALID_STR"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
printUsage
|
||||
exit 0
|
||||
;;
|
||||
-c | --clean)
|
||||
TARGET="clean"
|
||||
((CLEAN_OR_OUT |= 1))
|
||||
shift
|
||||
;;
|
||||
-r | --release)
|
||||
BUILD_TYPE="RelWithDebInfo"
|
||||
shift
|
||||
;;
|
||||
-a | --address_sanitizer)
|
||||
ack_and_ignore_asan
|
||||
|
||||
ASAN=1
|
||||
shift
|
||||
;;
|
||||
-s | --static)
|
||||
SHARED_LIBS="OFF"
|
||||
shift
|
||||
;;
|
||||
-o | --outdir)
|
||||
TARGET="outdir"
|
||||
PKGTYPE=$2
|
||||
((CLEAN_OR_OUT |= 2))
|
||||
shift 2
|
||||
;;
|
||||
-p | --package)
|
||||
MAKETARGET="$2"
|
||||
shift 2
|
||||
;;
|
||||
-w | --wheel)
|
||||
echo "omnitrace: wheel build option accepted and ignored"
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] " >&2
|
||||
exit 20
|
||||
;;
|
||||
esac
|
||||
|
||||
done
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $API_NAME $TARGET $BUILD_TYPE $SHARED_LIBS $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$BUILD_DIR"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME:?}"
|
||||
rm -rf "$PACKAGE_LIB/${LIB_NAME:?}"*
|
||||
}
|
||||
|
||||
build_omnitrace() {
|
||||
echo "Building $PROJ_NAME"
|
||||
if [ "$DISTRO_ID" = "mariner-2.0" ] || [ "$DISTRO_ID" = "ubuntu-24.04" ] || [ "$DISTRO_ID" = "azurelinux-3.0" ]; then
|
||||
echo "Skip make and uploading packages for Omnitrace on \"${DISTRO_ID}\" distro"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Skip make and uploading packages for Omnitrace on ASAN build"
|
||||
exit 0
|
||||
fi
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
mkdir -p "$BUILD_DIR"
|
||||
echo "Created build directory: $BUILD_DIR"
|
||||
fi
|
||||
|
||||
echo "Build directory: $BUILD_DIR"
|
||||
pushd "$BUILD_DIR" || exit
|
||||
print_lib_type $SHARED_LIBS
|
||||
|
||||
echo "ROCm CMake Params: $(rocm_cmake_params)"
|
||||
echo "ROCm Common CMake Params: $(rocm_common_cmake_params)"
|
||||
|
||||
|
||||
if [ $ASAN == 1 ]; then
|
||||
echo "Address Sanitizer path"
|
||||
|
||||
else
|
||||
cmake \
|
||||
$(rocm_cmake_params) \
|
||||
$(rocm_common_cmake_params) \
|
||||
-DOMNITRACE_BUILD_{LIBUNWIND,DYNINST}=ON \
|
||||
-DDYNINST_BUILD_{TBB,BOOST,ELFUTILS,LIBIBERTY}=ON \
|
||||
"$OMNITRACE_ROOT"
|
||||
fi
|
||||
|
||||
|
||||
popd || exit
|
||||
|
||||
echo "Make Options: $MAKE_OPTS"
|
||||
cmake --build "$BUILD_DIR" --target all -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target install -- $MAKE_OPTS
|
||||
cmake --build "$BUILD_DIR" --target package -- $MAKE_OPTS
|
||||
|
||||
copy_if DEB "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_DEB" "$BUILD_DIR/${API_NAME}"*.deb
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$PACKAGE_RPM" "$BUILD_DIR/${API_NAME}"*.rpm
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
"deb")
|
||||
echo "${PACKAGE_DEB}"
|
||||
;;
|
||||
"rpm")
|
||||
echo "${PACKAGE_RPM}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
verifyEnvSetup
|
||||
|
||||
case "$TARGET" in
|
||||
clean) clean ;;
|
||||
build) build_omnitrace ;;
|
||||
outdir) print_output_directory ;;
|
||||
*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -1,141 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/compute_utils.sh"
|
||||
PROJ_NAME=OpenCL-ICD-Loader
|
||||
TARGET="build"
|
||||
MAKEOPTS="$DASH_JAY"
|
||||
BUILD_TYPE="Debug"
|
||||
PACKAGE_ROOT="$(getPackageRoot)"
|
||||
PACKAGE_DEB="$PACKAGE_ROOT/deb/${PROJ_NAME,,}"
|
||||
PACKAGE_RPM="$PACKAGE_ROOT/rpm/${PROJ_NAME,,}"
|
||||
CLEAN_OR_OUT=0;
|
||||
PKGTYPE="deb"
|
||||
MAKETARGET="deb"
|
||||
API_NAME="rocm-opencl-icd-loader"
|
||||
|
||||
printUsage() {
|
||||
echo
|
||||
echo "Usage: $(basename "${BASH_SOURCE}") [options ...]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -c, --clean Clean output and delete all intermediate work"
|
||||
echo " -p, --package <type> Specify packaging format"
|
||||
echo " -r, --release Make a release build instead of a debug build"
|
||||
echo " -h, --help Prints this help"
|
||||
echo " -o, --outdir Print path of output directory containing packages"
|
||||
echo " -s, --static Component/Build does not support static builds just accepting this param & ignore. No effect of the param on this build"
|
||||
echo
|
||||
echo "Possible values for <type>:"
|
||||
echo " deb -> Debian format (default)"
|
||||
echo " rpm -> RPM format"
|
||||
echo
|
||||
return 0
|
||||
}
|
||||
|
||||
RET_CONFLICT=1
|
||||
check_conflicting_options $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
if [ $RET_CONFLICT -ge 30 ]; then
|
||||
print_vars $TARGET $BUILD_TYPE $CLEAN_OR_OUT $PKGTYPE $MAKETARGET
|
||||
exit $RET_CONFLICT
|
||||
fi
|
||||
|
||||
clean_opencl_icd_loader() {
|
||||
echo "Cleaning $PROJ_NAME"
|
||||
rm -rf "$PACKAGE_DEB"
|
||||
rm -rf "$PACKAGE_RPM"
|
||||
rm -rf "$PACKAGE_ROOT/${PROJ_NAME,,}"
|
||||
}
|
||||
|
||||
copy_pkg_files_to_rocm() {
|
||||
local comp_folder=$1
|
||||
local comp_pkg_name=$2
|
||||
|
||||
cd "${OUT_DIR}/${PKGTYPE}/${comp_folder}"|| exit 2
|
||||
if [ "${PKGTYPE}" = 'deb' ]; then
|
||||
dpkg-deb -x ${comp_pkg_name}_*.deb pkg/
|
||||
else
|
||||
mkdir pkg && pushd pkg/ || exit 2
|
||||
if [[ "${comp_pkg_name}" != *-dev* ]]; then
|
||||
rpm2cpio ../${comp_pkg_name}-*.rpm | cpio -idmv
|
||||
else
|
||||
rpm2cpio ../${comp_pkg_name}el-*.rpm | cpio -idmv
|
||||
fi
|
||||
popd || exit 2
|
||||
fi
|
||||
ls ./pkg -alt
|
||||
cp -r ./pkg/*/rocm*/* "${ROCM_PATH}" || exit 2
|
||||
rm -rf pkg/
|
||||
}
|
||||
|
||||
build_opencl_icd_loader() {
|
||||
echo "Downloading $PROJ_NAME" package
|
||||
if [ "$DISTRO_NAME" = ubuntu ]; then
|
||||
mkdir -p "$PACKAGE_DEB"
|
||||
local rocm_ver=${ROCM_VERSION}
|
||||
if [ ${ROCM_VERSION##*.} = 0 ]; then
|
||||
rocm_ver=${ROCM_VERSION%.*}
|
||||
fi
|
||||
local url="https://repo.radeon.com/rocm/apt/${rocm_ver}/pool/main/r/${API_NAME}/"
|
||||
local package
|
||||
package=$(curl -s "$url" | grep -Po 'href="\K[^"]*' | grep "${DISTRO_RELEASE}" | head -n 1)
|
||||
|
||||
if [ -z "$package" ]; then
|
||||
echo "No package found for Ubuntu version $DISTRO_RELEASE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
wget -t3 -P "$PACKAGE_DEB" "${url}${package}"
|
||||
copy_pkg_files_to_rocm ${PROJ_NAME,,} ${API_NAME}
|
||||
else
|
||||
echo "$DISTRO_ID is not supported..."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Installing $PROJ_NAME" package
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
echo ${PACKAGE_DEB};;
|
||||
("rpm")
|
||||
echo ${PACKAGE_RPM};;
|
||||
(*)
|
||||
echo "Invalid package type \"${PKGTYPE}\" provided for -o" >&2; exit 1;;
|
||||
esac
|
||||
exit
|
||||
}
|
||||
|
||||
VALID_STR=`getopt -o hcraswlo:p: --long help,clean,release,outdir:,package: -- "$@"`
|
||||
eval set -- "$VALID_STR"
|
||||
while true ;
|
||||
do
|
||||
case "$1" in
|
||||
(-c | --clean )
|
||||
TARGET="clean" ; ((CLEAN_OR_OUT|=1)) ; shift ;;
|
||||
(-r | --release )
|
||||
BUILD_TYPE="RelWithDebInfo" ; shift ;;
|
||||
(-h | --help )
|
||||
printUsage ; exit 0 ;;
|
||||
(-a | --address_sanitizer)
|
||||
ack_and_ignore_asan ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
MAKETARGET="$2" ; shift 2;;
|
||||
(-s | --static)
|
||||
echo "-s parameter accepted but ignored" ; shift ;;
|
||||
--) shift; break;;
|
||||
(*)
|
||||
echo " This should never come but just incase : UNEXPECTED ERROR Parm : [$1] ">&2 ; exit 20;;
|
||||
esac
|
||||
done
|
||||
|
||||
case $TARGET in
|
||||
(clean) clean_opencl_icd_loader ;;
|
||||
(build) build_opencl_icd_loader ;;
|
||||
(outdir) print_output_directory ;;
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
echo "Operation complete"
|
||||
@@ -32,7 +32,6 @@ ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
|
||||
ROCM_CMAKE_BUILD_DIR="$(getBuildPath rocm-cmake)"
|
||||
ROCM_CMAKE_PACKAGE_DEB="$(getPackageRoot)/deb/rocm-cmake"
|
||||
ROCM_CMAKE_PACKAGE_RPM="$(getPackageRoot)/rpm/rocm-cmake"
|
||||
ROCM_WHEEL_DIR="${ROCM_CMAKE_BUILD_DIR}/_wheel"
|
||||
ROCM_CMAKE_BUILD_TYPE="debug"
|
||||
BUILD_TYPE="Debug"
|
||||
SHARED_LIBS="ON"
|
||||
@@ -56,8 +55,6 @@ do
|
||||
ack_and_ignore_asan ; shift ;;
|
||||
(-s | --static)
|
||||
SHARED_LIBS="OFF" ; shift ;;
|
||||
(-w | --wheel)
|
||||
WHEEL_PACKAGE=true ; shift ;;
|
||||
(-o | --outdir)
|
||||
TARGET="outdir"; PKGTYPE=$2 ; OUT_DIR_SPECIFIED=1 ; ((CLEAN_OR_OUT|=2)) ; shift 2 ;;
|
||||
(-p | --package)
|
||||
@@ -78,7 +75,6 @@ fi
|
||||
|
||||
|
||||
clean_rocm_cmake() {
|
||||
rm -rf "$ROCM_WHEEL_DIR"
|
||||
rm -rf $ROCM_CMAKE_BUILD_DIR
|
||||
rm -rf $ROCM_CMAKE_PACKAGE_DEB
|
||||
rm -rf $ROCM_CMAKE_PACKAGE_RPM
|
||||
@@ -106,19 +102,6 @@ build_rocm_cmake() {
|
||||
copy_if RPM "${CPACKGEN:-"DEB;RPM"}" "$ROCM_CMAKE_PACKAGE_RPM" $ROCM_CMAKE_BUILD_DIR/rocm-cmake*.rpm
|
||||
}
|
||||
|
||||
create_wheel_package() {
|
||||
echo "Creating rocm-cmake wheel package"
|
||||
# Copy the setup.py generator to build folder
|
||||
mkdir -p $ROCM_WHEEL_DIR
|
||||
cp -f $SCRIPT_ROOT/generate_setup_py.py $ROCM_WHEEL_DIR
|
||||
cp -f $SCRIPT_ROOT/repackage_wheel.sh $ROCM_WHEEL_DIR
|
||||
cd $ROCM_WHEEL_DIR
|
||||
# Currently only supports python3.6
|
||||
./repackage_wheel.sh $ROCM_CMAKE_BUILD_DIR/rocm-cmake*.rpm python3.6
|
||||
# Copy the wheel created to RPM folder which will be uploaded to artifactory
|
||||
copy_if WHL "WHL" "$ROCM_CMAKE_PACKAGE_RPM" "$ROCM_WHEEL_DIR"/dist/*.whl
|
||||
}
|
||||
|
||||
print_output_directory() {
|
||||
case ${PKGTYPE} in
|
||||
("deb")
|
||||
@@ -138,9 +121,4 @@ case $TARGET in
|
||||
(*) die "Invalid target $TARGET" ;;
|
||||
esac
|
||||
|
||||
if [[ $WHEEL_PACKAGE == true ]]; then
|
||||
echo "Wheel Package build started !!!!"
|
||||
create_wheel_package
|
||||
fi
|
||||
|
||||
echo "Operation complete"
|
||||
|
||||
@@ -7,7 +7,6 @@ bison
|
||||
bridge-utils
|
||||
build-essential
|
||||
bzip2
|
||||
ccache
|
||||
check
|
||||
chrpath
|
||||
cifs-utils
|
||||
@@ -121,11 +120,9 @@ python3-yaml
|
||||
python3.8-dev
|
||||
re2c
|
||||
redis-tools
|
||||
# Eventually we should be able to remove rpm for debian builds.
|
||||
rpm
|
||||
rsync
|
||||
ssh
|
||||
# This makes life more pleasent inside the container
|
||||
strace
|
||||
sudo
|
||||
systemtap-sdt-dev
|
||||
|
||||
@@ -1,285 +0,0 @@
|
||||
#! /usr/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
apt-get -y update
|
||||
DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install --no-install-recommends -y $(sed 's/#.*//' /tmp/packages)
|
||||
apt-get clean
|
||||
rm -rf /var/cache/apt/ /var/lib/apt/lists/* /etc/apt/apt.conf.d/01proxy
|
||||
|
||||
#Install 2.17.1 version of git as we are seeing issues with 2.25 , where it was not allowing to add git submodules if the user is different for parent git directory
|
||||
curl -o git.tar.gz https://cdn.kernel.org/pub/software/scm/git/git-2.17.1.tar.gz
|
||||
tar -zxf git.tar.gz
|
||||
cd git-*
|
||||
make prefix=/usr/local all
|
||||
make prefix=/usr/local install
|
||||
git --version
|
||||
|
||||
#install argparse and CppHeaderParser python modules for roctracer and rocprofiler
|
||||
#install rocm-docs-core for the docs-as-code project. Only needed on one OS
|
||||
# CppHeader needs setuptools. setuptools needs wheel.
|
||||
# Looks like I need them as seperate commands
|
||||
# Sigh, install both python2 and python 3 version
|
||||
pip3 install --no-cache-dir setuptools wheel tox
|
||||
pip3 install --no-cache-dir CppHeaderParser argparse requests lxml barectf recommonmark jinja2==3.0.0 websockets matplotlib numpy scipy minimal msgpack pytest sphinx joblib PyYAML rocm-docs-core cmake==3.25.2 pandas myst-parser
|
||||
|
||||
# Allow sudo for everyone user
|
||||
echo 'ALL ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/everyone
|
||||
|
||||
# Install OCaml packages to build LLVM's OCaml bindings to be used in lightning compiler test pipeline
|
||||
wget -nv https://sourceforge.net/projects/opam.mirror/files/2.1.4/opam-2.1.4-x86_64-linux -O /usr/local/bin/opam
|
||||
chmod +x /usr/local/bin/opam
|
||||
opam init --yes --disable-sandboxing
|
||||
opam install ctypes --yes
|
||||
|
||||
# Install and modify git-repo (#!/usr/bin/env python -> #!/usr/bin/env python3)
|
||||
curl https://storage.googleapis.com/git-repo-downloads/repo > /usr/bin/repo
|
||||
chmod a+x /usr/bin/repo
|
||||
|
||||
# Build ccache from the source
|
||||
cd /tmp
|
||||
git clone https://github.com/ccache/ccache -b v4.7.5
|
||||
cd ccache
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make
|
||||
make install
|
||||
cd /tmp
|
||||
rm -rf ccache
|
||||
|
||||
# Install sharp from MLNX_OFED_LINUX as dependency for rccl-rdma-sharp-plugins
|
||||
cd /var/tmp
|
||||
mkdir mlnx
|
||||
wget -O mlnx/tar.tgz https://content.mellanox.com/ofed/MLNX_OFED-24.01-0.3.3.1/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.tgz
|
||||
tar -xz -C mlnx -f mlnx/tar.tgz
|
||||
apt-key add mlnx/*/RPM-GPG-KEY-Mellanox
|
||||
echo "deb [arch=amd64] file:$(echo $PWD/mlnx/*/DEBS) ./" > /etc/apt/sources.list.d/sharp.list
|
||||
apt update
|
||||
apt install -y sharp
|
||||
apt clean
|
||||
rm -rf /var/cache/apt/ /var/lib/apt/lists/* mlnx /etc/apt/sources.list.d/sharp.list
|
||||
|
||||
apt update
|
||||
apt -y install libunwind-dev
|
||||
apt -y install libgoogle-glog-dev
|
||||
|
||||
# Install python3.8 from source
|
||||
curl -LO https://www.python.org/ftp/python/3.8.13/Python-3.8.13.tar.xz
|
||||
tar -xvf Python-3.8.13.tar.xz
|
||||
pwd
|
||||
ls /var/tmp/
|
||||
ls Python-3.8.13
|
||||
mv Python-3.8.13 /opt/
|
||||
apt install build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libsqlite3-dev libreadline-dev libffi-dev curl libbz2-dev pkg-config make -y
|
||||
cd /opt/Python-3.8.13/
|
||||
./configure --enable-optimizations --enable-shared
|
||||
make
|
||||
make -j 6
|
||||
make altinstall
|
||||
ldconfig /opt/Python3.8.13
|
||||
python3.8 --version
|
||||
|
||||
# roctracer and rocprofiler needs this python3.8
|
||||
python3.8 -m pip install setuptools wheel
|
||||
python3.8 -m pip install CppHeaderParser argparse requests lxml PyYAML joblib
|
||||
|
||||
#Install older version of hwloc-devel package for rocrtst
|
||||
curl -lO https://download.open-mpi.org/release/hwloc/v1.11/hwloc-1.11.13.tar.bz2
|
||||
tar -xvf hwloc-1.11.13.tar.bz2
|
||||
cd hwloc-1.11.13
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
cp /usr/local/lib/libhwloc.so.5 /usr/lib
|
||||
hwloc-info --version
|
||||
|
||||
# Install gtest
|
||||
mkdir -p /tmp/gtest
|
||||
cd /tmp/gtest
|
||||
wget https://github.com/google/googletest/archive/refs/tags/v1.14.0.zip -O googletest.zip
|
||||
unzip googletest.zip
|
||||
cd googletest-1.14.0/
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$(nproc)
|
||||
make install
|
||||
rm -rf /tmp/gtest
|
||||
|
||||
## Install gRPC from source
|
||||
## RDC Pre-requisites
|
||||
GRPC_ARCHIVE=grpc-1.61.0.tar.gz
|
||||
mkdir /tmp/grpc
|
||||
mkdir /usr/grpc
|
||||
cd /tmp
|
||||
git clone --recurse-submodules -b v1.61.0 https://github.com/grpc/grpc
|
||||
cd grpc
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake -DgRPC_INSTALL=ON -DBUILD_SHARED_LIBS=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX=/usr/grpc -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=14 -DCMAKE_SHARED_LINKER_FLAGS_INIT=-Wl,--enable-new-dtags,--build-id=sha1,--rpath,'$ORIGIN' ..
|
||||
make -j $(nproc) install
|
||||
rm -rf /tmp/grpc
|
||||
|
||||
## rocBLAS Pre-requisites
|
||||
## Download prebuilt AMD multithreaded blis (2.0)
|
||||
## Reference : https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/install.sh#L403
|
||||
mkdir -p /tmp/blis
|
||||
cd /tmp/blis
|
||||
wget -O - https://github.com/amd/blis/releases/download/2.0/aocl-blis-mt-ubuntu-2.0.tar.gz | tar xfz -
|
||||
mv amd-blis-mt /usr/blis
|
||||
cd /
|
||||
rm -rf /tmp/blis
|
||||
|
||||
## rocBLAS Pre-requisites(SWDEV-404612)
|
||||
## Download aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
mkdir -p /tmp/aocl
|
||||
cd /tmp/aocl
|
||||
wget -nv https://download.amd.com/developer/eula/aocl/aocl-4-2/aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
apt install ./aocl-linux-gcc-4.2.0_1_amd64.deb
|
||||
rm -rf /tmp/aocl
|
||||
|
||||
## hipBLAS Pre-requisites
|
||||
## lapack(3.9.1v)
|
||||
## Reference https://github.com/ROCmSoftwarePlatform/rocSOLVER/blob/develop/install.sh#L174
|
||||
lapack_version=3.9.1
|
||||
lapack_srcdir=lapack-$lapack_version
|
||||
lapack_blddir=lapack-$lapack_version-bld
|
||||
mkdir -p /tmp/lapack
|
||||
cd /tmp/lapack
|
||||
rm -rf "$lapack_srcdir" "$lapack_blddir"
|
||||
wget -O - https://github.com/Reference-LAPACK/lapack/archive/refs/tags/v3.9.1.tar.gz | tar xzf -
|
||||
cmake -H$lapack_srcdir -B$lapack_blddir -DCMAKE_BUILD_TYPE=Release -DCMAKE_Fortran_FLAGS=-fno-optimize-sibling-calls -DBUILD_TESTING=OFF -DCBLAS=ON -DLAPACKE=OFF
|
||||
make -j$(nproc) -C "$lapack_blddir"
|
||||
make -C "$lapack_blddir" install
|
||||
cd $lapack_blddir
|
||||
cp -r ./include/* /usr/local/include/
|
||||
cp -r ./lib/* /usr/local/lib
|
||||
cd /
|
||||
rm -rf /tmp/lapack
|
||||
|
||||
## rocSOLVER Pre-requisites
|
||||
## FMT(7.1.3v)
|
||||
## Reference https://github.com/ROCmSoftwarePlatform/rocSOLVER/blob/develop/install.sh#L152
|
||||
fmt_version=7.1.3
|
||||
fmt_srcdir=fmt-$fmt_version
|
||||
fmt_blddir=fmt-$fmt_version-bld
|
||||
mkdir -p /tmp/fmt
|
||||
cd /tmp/fmt
|
||||
rm -rf "$fmt_srcdir" "$fmt_blddir"
|
||||
wget -O - https://github.com/fmtlib/fmt/archive/refs/tags/7.1.3.tar.gz | tar xzf -
|
||||
cmake -H$fmt_srcdir -B$fmt_blddir -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_CXX_EXTENSIONS=OFF -DCMAKE_CXX_STANDARD_REQUIRED=ON -DFMT_DOC=OFF -DFMT_TEST=OFF
|
||||
make -j$(nproc) -C "$fmt_blddir"
|
||||
make -C "$fmt_blddir" install
|
||||
|
||||
# Build and install libjpeg-turbo
|
||||
mkdir -p /tmp/libjpeg-turbo
|
||||
cd /tmp/libjpeg-turbo
|
||||
wget -nv https://github.com/rrawther/libjpeg-turbo/archive/refs/heads/2.0.6.2.zip -O libjpeg-turbo-2.0.6.2.zip
|
||||
unzip libjpeg-turbo-2.0.6.2.zip
|
||||
cd libjpeg-turbo-2.0.6.2
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=RELEASE -DENABLE_STATIC=FALSE -DCMAKE_INSTALL_DEFAULT_LIBDIR=lib ..
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/libjpeg-turbo
|
||||
|
||||
# Get released ninja from source
|
||||
mkdir -p /tmp/ninja
|
||||
cd /tmp/ninja
|
||||
wget -nv https://codeload.github.com/Kitware/ninja/zip/refs/tags/v1.11.1.g95dee.kitware.jobserver-1 -O ninja.zip
|
||||
unzip ninja.zip
|
||||
cd ninja-1.11.1.g95dee.kitware.jobserver-1
|
||||
./configure.py --bootstrap
|
||||
cp ninja /usr/local/bin/
|
||||
rm -rf /tmp/ninja
|
||||
|
||||
# Install FFmpeg and dependencies
|
||||
# Build NASM
|
||||
mkdir -p /tmp/nasm-2.15.05
|
||||
cd /tmp
|
||||
wget -qO- "https://distfiles.macports.org/nasm/nasm-2.15.05.tar.bz2" | tar -xvj
|
||||
cd nasm-2.15.05
|
||||
./autogen.sh
|
||||
./configure --prefix="/usr/local"
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/nasm-2.15.05
|
||||
|
||||
# Build YASM
|
||||
mkdir -p /tmp/yasm-1.3.0
|
||||
cd /tmp
|
||||
wget -qO- "http://www.tortall.net/projects/yasm/releases/yasm-1.3.0.tar.gz" | tar -xvz
|
||||
cd yasm-1.3.0
|
||||
./configure --prefix="/usr/local"
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/yasm-1.3.0
|
||||
|
||||
# Build x264
|
||||
mkdir -p /tmp/x264-snapshot-20191217-2245-stable
|
||||
cd /tmp
|
||||
wget -qO- "https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-20191217-2245-stable.tar.bz2" | tar -xvj
|
||||
cd /tmp/x264-snapshot-20191217-2245-stable
|
||||
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig" ./configure --prefix="/usr/local" --enable-shared
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/x264-snapshot-20191217-2245-stable
|
||||
|
||||
# Build x265
|
||||
mkdir -p /tmp/x265_2.7
|
||||
cd /tmp
|
||||
wget -qO- "https://get.videolan.org/x265/x265_2.7.tar.gz" | tar -xvz
|
||||
cd /tmp/x265_2.7/build/linux
|
||||
cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="/usr/local" -DENABLE_SHARED:bool=on ../../source
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/x265_2.7
|
||||
|
||||
# Build fdk-aac
|
||||
mkdir -p /tmp/fdk-aac-2.0.2
|
||||
cd /tmp
|
||||
wget -qO- "https://sourceforge.net/projects/opencore-amr/files/fdk-aac/fdk-aac-2.0.2.tar.gz" | tar -xvz
|
||||
cd /tmp/fdk-aac-2.0.2
|
||||
autoreconf -fiv
|
||||
./configure --prefix="/usr/local" --enable-shared --disable-static
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/fdk-aac-2.0.2
|
||||
|
||||
# Build FFmpeg
|
||||
cd /tmp
|
||||
git clone -b release/4.4 https://git.ffmpeg.org/ffmpeg.git ffmpeg
|
||||
cd ffmpeg
|
||||
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig"
|
||||
./configure --prefix="/usr/local" --extra-cflags="-I/usr/local/include" --extra-ldflags="-L/usr/local/lib" --extra-libs=-lpthread --extra-libs=-lm --enable-shared --disable-static --enable-libx264 --enable-libx265 --enable-libfdk-aac --enable-gpl --enable-nonfree
|
||||
make -j$(nproc) install
|
||||
rm -rf /tmp/ffmpeg
|
||||
|
||||
cp /tmp/local-pin-600 /etc/apt/preferences.d
|
||||
|
||||
command -v lbzip2
|
||||
ln -sf $(command -v lbzip2) /usr/local/bin/compressor || ln -sf $(command -v bzip2) /usr/local/bin/compressor
|
||||
|
||||
# Install Google Benchmark
|
||||
mkdir -p /tmp/Gbenchmark
|
||||
cd /tmp/Gbenchmark
|
||||
wget -qO- https://github.com/google/benchmark/archive/refs/tags/v1.6.1.tar.gz | tar xz
|
||||
cmake -Sbenchmark-1.6.1 -Bbuild -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DBENCHMARK_ENABLE_TESTING=OFF -DCMAKE_CXX_STANDARD=14
|
||||
make -j -C build
|
||||
cd /tmp/Gbenchmark/build
|
||||
make install
|
||||
|
||||
# Build boost-1.85.0 from source for RPP
|
||||
# Installing in a non-standard location since the test packages of hipFFT and rocFFT pick up the version of
|
||||
# the installed Boost library and declare a package dependency on that specific version of Boost.
|
||||
# For example, if this was installed in the standard location it would declare a dependency on libboost-dev(el)1.85.0
|
||||
# which is not available as a package in any distro.
|
||||
# Once this is fixed, we can remove the Boost package from the requirements list and install this
|
||||
# in the standard location
|
||||
mkdir -p /tmp/boost-1.85.0
|
||||
cd /tmp/boost-1.85.0
|
||||
wget -nv https://sourceforge.net/projects/boost/files/boost/1.85.0/boost_1_85_0.tar.bz2 -O ./boost_1_85_0.tar.bz2
|
||||
tar -xf boost_1_85_0.tar.bz2 --use-compress-program="/usr/local/bin/compressor"
|
||||
cd boost_1_85_0
|
||||
./bootstrap.sh --prefix=${RPP_DEPS_LOCATION} --with-python=python3
|
||||
./b2 stage -j$(nproc) threading=multi link=shared cxxflags="-std=c++11"
|
||||
./b2 install threading=multi link=shared --with-system --with-filesystem
|
||||
./b2 stage -j$(nproc) threading=multi link=static cxxflags="-std=c++11 -fpic" cflags="-fpic"
|
||||
./b2 install threading=multi link=static --with-system --with-filesystem
|
||||
rm -rf /tmp/boost-1.85.0
|
||||
@@ -7,7 +7,6 @@ bison
|
||||
bridge-utils
|
||||
build-essential
|
||||
bzip2
|
||||
ccache
|
||||
check
|
||||
chrpath
|
||||
cifs-utils
|
||||
|
||||
Reference in New Issue
Block a user