From b541be7bcb6541b4a00633972ed5d0a546ad4e85 Mon Sep 17 00:00:00 2001 From: Peter Park Date: Tue, 15 Oct 2024 10:47:56 -0400 Subject: [PATCH] Update bitsandbytes branch in docs (#3898) --- docs/how-to/llm-fine-tuning-optimization/model-quantization.rst | 2 +- .../single-gpu-fine-tuning-and-inference.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/how-to/llm-fine-tuning-optimization/model-quantization.rst b/docs/how-to/llm-fine-tuning-optimization/model-quantization.rst index b37fbab7e..479f37b49 100644 --- a/docs/how-to/llm-fine-tuning-optimization/model-quantization.rst +++ b/docs/how-to/llm-fine-tuning-optimization/model-quantization.rst @@ -181,7 +181,7 @@ Installing bitsandbytes # Clone the github repo git clone --recurse https://github.com/ROCm/bitsandbytes.git cd bitsandbytes - git checkout rocm_enabled + git checkout rocm_enabled_multi_backend # Install dependencies pip install -r requirements-dev.txt diff --git a/docs/how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference.rst b/docs/how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference.rst index f96dd3bae..6b1946ea3 100644 --- a/docs/how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference.rst +++ b/docs/how-to/llm-fine-tuning-optimization/single-gpu-fine-tuning-and-inference.rst @@ -91,7 +91,7 @@ Setting up the base implementation environment # Use -DBNB_ROCM_ARCH to target a specific GPU architecture. git clone --recurse https://github.com/ROCm/bitsandbytes.git cd bitsandbytes - git checkout rocm_enabled + git checkout rocm_enabled_multi_backend pip install -r requirements-dev.txt cmake -DBNB_ROCM_ARCH="gfx942" -DCOMPUTE_BACKEND=hip -S . python setup.py install