bugfix(pyproject) Convert from dependency groups to extras and update docks to use UV's built in torch support

This commit is contained in:
Heathen711
2025-07-17 03:58:26 +00:00
parent 4b5c481b7a
commit c84f8465b8
7 changed files with 833 additions and 333 deletions

View File

@@ -22,6 +22,10 @@
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
# GPU_DRIVER=cuda #| rocm
## If you are using ROCM, you will need to ensure that the render group within the container and the host system use the same group ID.
## To obtain the group ID of the render group on the host system, run `getent group render` and grab the number.
# RENDER_GROUP_ID=
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
# CONTAINER_UID=1000

View File

@@ -74,7 +74,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
--mount=type=bind,source=invokeai/version,target=invokeai/version \
ulimit -n 30000 && \
uv sync --group $GPU_DRIVER --frozen
uv sync --extra $GPU_DRIVER --frozen
# Link amdgpu.ids for ROCm builds
# contributed by https://github.com/Rubonnek

View File

@@ -48,11 +48,8 @@ services:
invokeai-rocm:
<<: *invokeai
environment:
# if set, CONTAINER_INVOKEAI_ROOT will override the Invoke runtime directory location *inside* the container
- INVOKEAI_ROOT=${CONTAINER_INVOKEAI_ROOT:-/invokeai}
- HF_HOME
- AMD_VISIBLE_DEVICES=all
- RENDER_GROUP_ID=993
- RENDER_GROUP_ID=${RENDER_GROUP_ID}
runtime: amd
profiles:
- rocm

View File

@@ -14,6 +14,8 @@ set -e -o pipefail
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
printenv
USER_ID=${CONTAINER_UID:-1000}
USER=ubuntu
# if the user does not exist, create it. It is expected to be present on ubuntu >=24.x
@@ -24,9 +26,13 @@ usermod -u ${USER_ID} ${USER} 1>/dev/null
## ROCM specific configuration
# render group within the container must match the host render group
# otherwise the container will not be able to access the host GPU.
groupmod -g ${RENDER_GROUP_ID:-993} render
usermod -a -G render ${USER}
usermod -a -G video ${USER}
if [[ -v "RENDER_GROUP_ID" ]] && [[ ! -z "${RENDER_GROUP_ID}" ]]; then
# ensure the render group exists
groupmod -g ${RENDER_GROUP_ID} render
usermod -a -G render ${USER}
usermod -a -G video ${USER}
fi
### Set the $PUBLIC_KEY env var to enable SSH access.
# We do not install openssh-server in the image by default to avoid bloat.

View File

@@ -69,34 +69,34 @@ The following commands vary depending on the version of Invoke being installed a
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using (UV's built in torch support.)[https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection]
=== "Invoke v5.12 and later"
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
- **In all other cases, do not use an index.**
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu128`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.3`.
- **In all other cases, do not use a torch backend.**
=== "Invoke v5.10.0 to v5.11.0"
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu126`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.2.4`.
- **In all other cases, do not use an index.**
=== "Invoke v5.0.0 to v5.9.1"
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.1`.
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.1`.
- **In all other cases, do not use an index.**
=== "Invoke v4"
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm5.2`.
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm5.2`.
- **In all other cases, do not use an index.**
8. Install the `invokeai` package. Substitute the package specifier and version.
@@ -105,10 +105,10 @@ The following commands vary depending on the version of Invoke being installed a
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
```
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
If you determined you needed to use a torch backend in the previous step, you'll need to set the backend like this:
```sh
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --torch-backend=<VERSION> --force-reinstall
```
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:

View File

@@ -76,11 +76,6 @@ dependencies = [
"semver~=3.0.1",
]
[dependency-groups]
cpu = ["torch==2.7.1+cpu", "torchvision==0.22.1+cpu"]
cuda = ["torch==2.7.1+cu128", "torchvision==0.22.1+cu128"]
rocm = ["torch==2.7.1+rocm6.3", "torchvision==0.22.1+rocm6.3"]
[project.optional-dependencies]
"xformers" = [
# Core generation dependencies, pinned for reproducible builds.
@@ -88,16 +83,13 @@ rocm = ["torch==2.7.1+rocm6.3", "torchvision==0.22.1+rocm6.3"]
# torch 2.4+cu carries its own triton dependency
]
# These enable the usage of installing the package with specific support.
# uv pip install .[rocm] --python 3.12 --python-preference only-managed --force-reinstall --index-strategy unsafe-best-match
# Problem is that these break `uv lock --index-strategy unsafe-best-match`
# This does work though, as the pyproject.toml has the indexes defined.
# uv pip install . torch==2.7.1+rocm6.3 --force-reinstall --index-strategy unsafe-best-match
# Maybe we update the docs to show these instead of the --index way?
# cpu = ["torch==2.7.1+cpu"]
# cuda = ["torch==2.7.1+cu128"]
# rocm = ["torch==2.7.1+rocm6.3"]
"cpu" = ["torch==2.7.1+cpu", "torchvision==0.22.1+cpu"]
"cuda" = ["torch==2.7.1+cu128", "torchvision==0.22.1+cu128"]
"rocm" = [
"torch==2.7.1+rocm6.3",
"torchvision==0.22.1+rocm6.3",
"pytorch-triton-rocm",
]
"onnx" = ["onnxruntime"]
"onnx-cuda" = ["onnxruntime-gpu"]
@@ -129,25 +121,38 @@ rocm = ["torch==2.7.1+rocm6.3", "torchvision==0.22.1+rocm6.3"]
# Prevent opencv-python from ever being chosen during dependency resolution.
# This prevents conflicts with opencv-contrib-python, which Invoke requires.
override-dependencies = ["opencv-python; sys_platform=='never'"]
conflicts = [[{ group = "cpu" }, { group = "cuda" }, { group = "rocm" }]]
conflicts = [[{ extra = "cpu" }, { extra = "cuda" }, { extra = "rocm" }]]
index-strategy = "unsafe-best-match"
[tool.uv.sources]
torch = [
{ index = "torch-cpu", extra = "cpu" },
{ index = "torch-cuda", extra = "cuda" },
{ index = "torch-rocm", extra = "rocm" },
]
torchvision = [
{ index = "torch-cpu", extra = "cpu" },
{ index = "torch-cuda", extra = "cuda" },
{ index = "torch-rocm", extra = "rocm" },
]
pytorch-triton-rocm = [
{ index = "torch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" },
]
# This will cause: `uv lock --index-strategy unsafe-best-match` to be needed for future locks
# If you are updating these, make sure to update the docker/Dockerfile as well.
[[tool.uv.index]]
name = "torch-cpu"
url = "https://download.pytorch.org/whl/cpu"
group = "cpu"
explicit = true
[[tool.uv.index]]
name = "torch-cuda"
url = "https://download.pytorch.org/whl/cu128"
group = "cuda"
explicit = true
[[tool.uv.index]]
name = "torch-rocm"
url = "https://download.pytorch.org/whl/rocm6.3"
group = "rocm"
explicit = true
[project.scripts]
"invokeai-web" = "invokeai.app.run_app:run_app"

1062
uv.lock generated

File diff suppressed because it is too large Load Diff