From c0c4d7ca6989908fed3984cabea02cc3587b3a19 Mon Sep 17 00:00:00 2001 From: Matthias Wild <40327258+mauwii@users.noreply.github.com> Date: Fri, 16 Dec 2022 13:53:37 +0100 Subject: [PATCH] update (docker-)build scripts, `.dockerignore` and add patchmatch (#1970) * update build scripts and dockerignore updates to build and run script: - read repository name - include flavor in container name - read arch via arch command - use latest tag instead of arch - don't bindmount `$HOME/.huggingface` - make sure HUGGINGFACE_TOKEN is set updates to .dockerignore - include environment-and-requirements - exclude binary_installer - exclude docker-build - exclude docs * disable push and pr triggers of cloud image also disable pushing. This was decided since: - it is not multiarch useable - the default image is already cloud aproved * integrate patchmatch in container * pin verisons of recently introduced dependencies * remove now unecesarry part from build.sh move huggingface token to run script, so it can download missing models * move GPU_FLAGS to run script since not needed at build time * update env.sh - read REPOSITORY_NAME from env if available - add comment to explain the intension of this file - remove unecesarry exports * get rid of repository_name_lc * capitalize variables * update INSTALL_DOCKER with new variables * add comments pointing to the docs Co-authored-by: Lincoln Stein --- .dockerignore | 9 +---- .github/workflows/build-cloud-img.yml | 20 +++++------ docker-build/Dockerfile | 27 ++++++++++----- docker-build/build.sh | 46 +++++++++---------------- docker-build/env.sh | 20 ++++------- docker-build/run.sh | 28 ++++++++++----- docs/installation/040_INSTALL_DOCKER.md | 21 +++++------ 7 files changed, 83 insertions(+), 88 deletions(-) diff --git a/.dockerignore b/.dockerignore index 255335040f..5df924ddee 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,16 +1,13 @@ * !backend +!environments-and-requirements !frontend -!binary_installer !ldm !main.py !scripts !server !static !setup.py -!docker-build -!docs -docker-build/Dockerfile # Guard against pulling in any models that might exist in the directory tree **/*.pt* @@ -19,8 +16,4 @@ docker-build/Dockerfile !configs configs/models.yaml -# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists -!environment* -environment.yml - **/__pycache__ diff --git a/.github/workflows/build-cloud-img.yml b/.github/workflows/build-cloud-img.yml index 9ef41a26c3..f27cbea80a 100644 --- a/.github/workflows/build-cloud-img.yml +++ b/.github/workflows/build-cloud-img.yml @@ -1,15 +1,15 @@ name: Build and push cloud image on: workflow_dispatch: - push: - branches: - - main - tags: - - v* - # we will NOT push the image on pull requests, only test buildability. - pull_request: - branches: - - main + # push: + # branches: + # - main + # tags: + # - v* + # # we will NOT push the image on pull requests, only test buildability. + # pull_request: + # branches: + # - main permissions: contents: read @@ -82,6 +82,6 @@ jobs: file: docker-build/Dockerfile.cloud platforms: Linux/${{ matrix.arch }} # do not push the image on PRs - push: ${{ github.event_name != 'pull_request' }} + push: false tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/docker-build/Dockerfile b/docker-build/Dockerfile index d85d65dd57..353a02b50c 100644 --- a/docker-build/Dockerfile +++ b/docker-build/Dockerfile @@ -14,9 +14,10 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# set workdir, PATH and copy sources -WORKDIR /usr/src/app -ENV PATH /usr/src/app/.venv/bin:$PATH +# set WORKDIR, PATH and copy sources +ARG WORKDIR=/usr/src/app +WORKDIR ${WORKDIR} +ENV PATH ${WORKDIR}/.venv/bin:$PATH ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./ @@ -38,18 +39,28 @@ FROM python:3.10-slim AS runtime RUN apt-get update \ && apt-get install -y \ --no-install-recommends \ + build-essential=12.9 \ libgl1-mesa-glx=20.3.* \ libglib2.0-0=2.66.* \ + libopencv-dev=4.5.* \ + python3-opencv=4.5.* \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -WORKDIR /usr/src/app -COPY --from=builder /usr/src/app . - -# set Environment, Entrypoint and default CMD +# setup environment +ARG WORKDIR=/usr/src/app +WORKDIR ${WORKDIR} +COPY --from=builder ${WORKDIR} . +ENV PATH=${WORKDIR}/.venv/bin:$PATH ENV INVOKEAI_ROOT /data ENV INVOKE_MODEL_RECONFIGURE --yes -ENV PATH=/usr/src/app/.venv/bin:$PATH +# Initialize patchmatch +RUN ln -sf \ + /usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \ + /usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \ + && python3 -c "from patchmatch import patch_match" + +# set Entrypoint and default CMD ENTRYPOINT [ "python3", "scripts/invoke.py" ] CMD [ "--web", "--host=0.0.0.0" ] diff --git a/docker-build/build.sh b/docker-build/build.sh index 6f0fbc174f..14e010d9c3 100755 --- a/docker-build/build.sh +++ b/docker-build/build.sh @@ -1,49 +1,35 @@ #!/usr/bin/env bash set -e -# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!! -# configure values by using env when executing build.sh f.e. `env ARCH=aarch64 ./build.sh` +# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup source ./docker-build/env.sh \ || echo "please execute docker-build/build.sh from repository root" \ || exit 1 -pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt} -dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile} +PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt} +DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile} # print the settings echo -e "You are using these values:\n" -echo -e "Dockerfile:\t ${dockerfile}" -echo -e "requirements:\t ${pip_requirements}" -echo -e "volumename:\t ${volumename}" -echo -e "arch:\t\t ${arch}" -echo -e "platform:\t ${platform}" -echo -e "invokeai_tag:\t ${invokeai_tag}\n" +echo -e "Dockerfile:\t ${DOCKERFILE}" +echo -e "Requirements:\t ${PIP_REQUIREMENTS}" +echo -e "Volumename:\t ${VOLUMENAME}" +echo -e "arch:\t\t ${ARCH}" +echo -e "Platform:\t ${PLATFORM}" +echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n" -if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then - echo "Volume already exists" - echo +if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then + echo -e "Volume already exists\n" else echo -n "createing docker volume " - docker volume create "${volumename}" + docker volume create "${VOLUMENAME}" fi # Build Container docker build \ - --platform="${platform}" \ - --tag="${invokeai_tag}" \ - --build-arg="PIP_REQUIREMENTS=${pip_requirements}" \ - --file="${dockerfile}" \ + --platform="${PLATFORM}" \ + --tag="${INVOKEAI_TAG}" \ + --build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \ + --file="${DOCKERFILE}" \ . - -docker run \ - --rm \ - --platform="$platform" \ - --name="$project_name" \ - --hostname="$project_name" \ - --mount="source=$volumename,target=/data" \ - --mount="type=bind,source=$HOME/.huggingface,target=/root/.huggingface" \ - --env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \ - --entrypoint="python3" \ - "${invokeai_tag}" \ - scripts/configure_invokeai.py --yes diff --git a/docker-build/env.sh b/docker-build/env.sh index 76d4127ec1..a9021b484d 100644 --- a/docker-build/env.sh +++ b/docker-build/env.sh @@ -1,15 +1,9 @@ #!/usr/bin/env bash -project_name=${PROJECT_NAME:-invokeai} -volumename=${VOLUMENAME:-${project_name}_data} -arch=${ARCH:-x86_64} -platform=${PLATFORM:-Linux/${arch}} -invokeai_tag=${INVOKEAI_TAG:-${project_name}:${arch}} -gpus=${GPU_FLAGS:+--gpus=${GPU_FLAGS}} - -export project_name -export volumename -export arch -export platform -export invokeai_tag -export gpus +# Variables shared by build.sh and run.sh +REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")} +VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data} +ARCH=${ARCH:-$(arch)} +PLATFORM=${PLATFORM:-Linux/${ARCH}} +CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda} +INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-latest} diff --git a/docker-build/run.sh b/docker-build/run.sh index d2f232d6fa..b7089fccd2 100755 --- a/docker-build/run.sh +++ b/docker-build/run.sh @@ -1,21 +1,31 @@ #!/usr/bin/env bash set -e -source ./docker-build/env.sh || echo "please run from repository root" || exit 1 +# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container +# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!! + +source ./docker-build/env.sh \ + || echo "please run from repository root" \ + || exit 1 + +# check if HUGGINGFACE_TOKEN is available +# You must have accepted the terms of use for required models +HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN} echo -e "You are using these values:\n" -echo -e "volumename:\t ${volumename}" -echo -e "invokeai_tag:\t ${invokeai_tag}\n" +echo -e "Volumename:\t ${VOLUMENAME}" +echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n" docker run \ --interactive \ --tty \ --rm \ - --platform="$platform" \ - --name="$project_name" \ - --hostname="$project_name" \ - --mount="source=$volumename,target=/data" \ + --platform="$PLATFORM" \ + --name="${REPOSITORY_NAME,,}" \ + --hostname="${REPOSITORY_NAME,,}" \ + --mount="source=$VOLUMENAME,target=/data" \ + --env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \ --publish=9090:9090 \ --cap-add=sys_nice \ - $gpus \ - "$invokeai_tag" ${1:+$@} + ${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \ + "$INVOKEAI_TAG" ${1:+$@} diff --git a/docs/installation/040_INSTALL_DOCKER.md b/docs/installation/040_INSTALL_DOCKER.md index 9b9ccaadf0..c7c2d6adae 100644 --- a/docs/installation/040_INSTALL_DOCKER.md +++ b/docs/installation/040_INSTALL_DOCKER.md @@ -78,15 +78,16 @@ Some Suggestions of variables you may want to change besides the Token:
-| Environment-Variable | Default value | Description | -| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- | -| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models | -| `PROJECT_NAME` | `invokeai` | affects the project folder, tag- and volume name | -| `VOLUMENAME` | `${PROJECT_NAME}_data` | Name of the Docker Volume where model files will be stored | -| `ARCH` | `x86_64` | can be changed to f.e. aarch64 if you are using a ARM based CPU | -| `INVOKEAI_TAG` | `${PROJECT_NAME}:${ARCH}` | the Container Repository / Tag which will be used | -| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) | -| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development | +| Environment-Variable | Default value | Description | +| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- | +| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models | +| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name | +| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored | +| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch | +| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used | +| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) | +| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. | +| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
@@ -129,7 +130,7 @@ also do so. ## Running the container on your GPU -If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra +If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra environment variable to enable GPU usage and have the process run much faster: ```bash