Compare commits

...

16 Commits

Author SHA1 Message Date
Lincoln Stein
ded521b019 pass regression tests when translator package not installed 2023-07-31 12:35:39 -04:00
Lincoln Stein
a3980cc756 changed declaration of dummy translate class for pytest 2023-07-31 09:04:26 -04:00
Lincoln Stein
6f15a67592 Merge branch 'main' into feat/translate 2023-07-31 08:40:17 -04:00
Lincoln Stein
a597b4bfaf add dummy ts object to pass pytests 2023-07-31 08:28:23 -04:00
Lincoln Stein
6ac4338f00 blackified 2023-07-31 08:07:36 -04:00
Alexandre Macabies
eb642653cb Add Nix Flake for development, which uses Python virtualenv. 2023-07-31 19:14:30 +10:00
Lincoln Stein
f4ead5e07f fix keyerror bug that was causing merge script to crash 2023-07-30 19:25:44 -04:00
Lincoln Stein
6d24ca7f52 3.0.1post3 (#4082)
This is a relatively stable release that corrects the urgent windows
install and model manager problems in 3.0.1. It still has two known
bugs:

1. Many inpainting models are not loading correctly.
2. The merge script is failing to start.
2023-07-30 18:03:35 -04:00
Lincoln Stein
2164da8592 blackify 2023-07-30 16:25:06 -04:00
Lincoln Stein
4121c261a0 fix missing models when INVOKEAI_ROOT="." 2023-07-30 13:37:18 -04:00
Lincoln Stein
99823d5039 more fixes to update and install 2023-07-30 11:57:06 -04:00
Lincoln Stein
0abceb0e7b Merge branch 'main' of github.com:invoke-ai/InvokeAI 2023-07-30 11:08:27 -04:00
ymgenesis
73e25d8dbe Update communityNodes.md
- Remove FaceMask and add link FaceTools repository, which includes FaceMask, FaceOff, and FacePlace
- Move Ideal Size up from under the template entry
2023-07-30 10:59:56 -04:00
Lincoln Stein
ba817b5648 added popup for translation service 2023-07-30 09:14:26 -04:00
Lincoln Stein
0c31eaee61 blackified 2023-07-29 21:14:18 -04:00
Lincoln Stein
e73c12cac2 add non-English language translator node 2023-07-29 20:21:57 -04:00
10 changed files with 189 additions and 24 deletions

View File

@@ -14,20 +14,25 @@ The nodes linked below have been developed and contributed by members of the Inv
## List of Nodes
### Face Mask
### FaceTools
**Description:** This node autodetects a face in the image using MediaPipe and masks it by making it transparent. Via outpainting you can swap faces with other faces, or invert the mask and swap things around the face with other things. Additionally, you can supply X and Y offset values to scale/change the shape of the mask for finer control. The node also outputs an all-white mask in the same dimensions as the input image. This is needed by the inpaint node (and unified canvas) for outpainting.
**Description:** FaceTools is a collection of nodes created to manipulate faces as you would in Unified Canvas. It includes FaceMask, FaceOff, and FacePlace. FaceMask autodetects a face in the image using MediaPipe and creates a mask from it. FaceOff similarly detects a face, then takes the face off of the image by adding a square bounding box around it and cropping/scaling it. FacePlace puts the bounded face image from FaceOff back onto the original image. Using these nodes with other inpainting node(s), you can put new faces on existing things, put new things around existing faces, and work closer with a face as a bounded image. Additionally, you can supply X and Y offset values to scale/change the shape of the mask for finer control on FaceMask and FaceOff. See GitHub repository below for usage examples.
**Node Link:** https://github.com/ymgenesis/InvokeAI/blob/facemaskmediapipe/invokeai/app/invocations/facemask.py
**Node Link:** https://github.com/ymgenesis/FaceTools/
**Example Node Graph:** https://www.mediafire.com/file/gohn5sb1bfp8use/21-July_2023-FaceMask.json/file
**FaceMask Output Examples**
**Output Examples**
![5cc8abce-53b0-487a-b891-3bf94dcc8960](https://github.com/invoke-ai/InvokeAI/assets/25252829/43f36d24-1429-4ab1-bd06-a4bedfe0955e)
![b920b710-1882-49a0-8d02-82dff2cca907](https://github.com/invoke-ai/InvokeAI/assets/25252829/7660c1ed-bf7d-4d0a-947f-1fc1679557ba)
![71a91805-fda5-481c-b380-264665703133](https://github.com/invoke-ai/InvokeAI/assets/25252829/f8f6a2ee-2b68-4482-87da-b90221d5c3e2)
![2e3168cb-af6a-475d-bfac-c7b7fd67b4c2](https://github.com/ymgenesis/InvokeAI/assets/25252829/a5ad7d44-2ada-4b3c-a56e-a21f8244a1ac)
![2_annotated](https://github.com/ymgenesis/InvokeAI/assets/25252829/53416c8a-a23b-4d76-bb6d-3cfd776e0096)
![2fe2150c-fd08-4e26-8c36-f0610bf441bb](https://github.com/ymgenesis/InvokeAI/assets/25252829/b0f7ecfe-f093-4147-a904-b9f131b41dc9)
![831b6b98-4f0f-4360-93c8-69a9c1338cbe](https://github.com/ymgenesis/InvokeAI/assets/25252829/fc7b0622-e361-4155-8a76-082894d084f0)
<hr>
### Ideal Size
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
**Node Link:** https://github.com/JPPhoto/ideal-size-node
--------------------------------
### Super Cool Node Template
@@ -42,11 +47,5 @@ The nodes linked below have been developed and contributed by members of the Inv
![Invoke AI](https://invoke-ai.github.io/InvokeAI/assets/invoke_ai_banner.png)
### Ideal Size
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
**Node Link:** https://github.com/JPPhoto/ideal-size-node
## Help
If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).

25
flake.lock generated Normal file
View File

@@ -0,0 +1,25 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1690630721,
"narHash": "sha256-Y04onHyBQT4Erfr2fc82dbJTfXGYrf4V0ysLUYnPOP8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d2b52322f35597c62abf56de91b0236746b2a03d",
"type": "github"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

81
flake.nix Normal file
View File

@@ -0,0 +1,81 @@
# Important note: this flake does not attempt to create a fully isolated, 'pure'
# Python environment for InvokeAI. Instead, it depends on local invocations of
# virtualenv/pip to install the required (binary) packages, most importantly the
# prebuilt binary pytorch packages with CUDA support.
# ML Python packages with CUDA support, like pytorch, are notoriously expensive
# to compile so it's purposefuly not what this flake does.
{
description = "An (impure) flake to develop on InvokeAI.";
outputs = { self, nixpkgs }:
let
system = "x86_64-linux";
pkgs = import nixpkgs {
inherit system;
config.allowUnfree = true;
};
python = pkgs.python310;
mkShell = { dir, install }:
let
setupScript = pkgs.writeScript "setup-invokai" ''
# This must be sourced using 'source', not executed.
${python}/bin/python -m venv ${dir}
${dir}/bin/python -m pip install ${install}
# ${dir}/bin/python -c 'import torch; assert(torch.cuda.is_available())'
source ${dir}/bin/activate
'';
in
pkgs.mkShell rec {
buildInputs = with pkgs; [
# Backend: graphics, CUDA.
cudaPackages.cudnn
cudaPackages.cuda_nvrtc
cudatoolkit
freeglut
glib
gperf
procps
libGL
libGLU
linuxPackages.nvidia_x11
python
stdenv.cc
stdenv.cc.cc.lib
xorg.libX11
xorg.libXext
xorg.libXi
xorg.libXmu
xorg.libXrandr
xorg.libXv
zlib
# Pre-commit hooks.
black
# Frontend.
yarn
nodejs
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
CUDA_PATH = pkgs.cudatoolkit;
EXTRA_LDFLAGS = "-L${pkgs.linuxPackages.nvidia_x11}/lib";
shellHook = ''
if [[ -f "${dir}/bin/activate" ]]; then
source "${dir}/bin/activate"
echo "Using Python: $(which python)"
else
echo "Use 'source ${setupScript}' to set up the environment."
fi
'';
};
in
{
devShells.${system} = rec {
develop = mkShell { dir = "venv"; install = "-e '.[xformers]' --extra-index-url https://download.pytorch.org/whl/cu118"; };
default = develop;
};
};
}

View File

@@ -41,7 +41,7 @@ IF /I "%choice%" == "1" (
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
) ELSE IF /I "%choice%" == "7" (
echo Running invokeai-configure...
python .venv\Scripts\invokeai-configure.exe --yes --default_only
python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight
) ELSE IF /I "%choice%" == "8" (
echo Developer Console
echo Python command is:

View File

@@ -82,7 +82,7 @@ do_choice() {
7)
clear
printf "Re-run the configure script to fix a broken install or to complete a major upgrade\n"
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only --skip-sd-weights
;;
8)
clear

View File

@@ -0,0 +1,52 @@
# Copyright (c) 2023 Lincoln D. Stein
from typing import Literal, Union, List
from pydantic import Field
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
InvocationContext,
InvocationConfig,
)
# from .params import StringOutput
translate_available = False
try:
import translators as ts
translate_available = True
TRANSLATORS = tuple(ts.translators_pool)
except:
TRANSLATORS = ("google", "bing")
DEFAULT_PROMPT = "" if translate_available else "To use this node, please 'pip install --upgrade translators'"
class TranslateOutput(BaseInvocationOutput):
"""Translated string output"""
type: Literal["translated_string_output"] = "translated_string_output"
prompt: str = Field(default=None, description="The translated prompt string")
class TranslateInvocation(BaseInvocation):
"""Use the translators package to translate 330 languages into English prompts"""
# fmt: off
type: Literal["translate"] = "translate"
# Inputs
text: str = Field(default=DEFAULT_PROMPT, description="Prompt in any language")
translator: Literal[TRANSLATORS] = Field(default="google", description="The translator service to use")
# fmt: on
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {"title": "Translate", "tags": ["prompt", "translate", "translator"]},
}
def invoke(self, context: InvocationContext) -> TranslateOutput:
translation: str = ts.translate_text(self.text, translator=self.translator)
return TranslateOutput(prompt=translation)

View File

@@ -274,7 +274,7 @@ class InvokeAISettings(BaseSettings):
@classmethod
def _excluded(self) -> List[str]:
# internal fields that shouldn't be exposed as command line options
return ["type", "initconf"]
return ["type", "initconf", "cached_root"]
@classmethod
def _excluded_from_yaml(self) -> List[str]:
@@ -290,6 +290,7 @@ class InvokeAISettings(BaseSettings):
"restore",
"root",
"nsfw_checker",
"cached_root",
]
class Config:
@@ -423,6 +424,7 @@ class InvokeAIAppConfig(InvokeAISettings):
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging")
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
cached_root : Path = Field(default=None, description="internal use only", category="DEPRECATED")
# fmt: on
def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False):
@@ -470,10 +472,15 @@ class InvokeAIAppConfig(InvokeAISettings):
"""
Path to the runtime root directory
"""
if self.root:
return Path(self.root).expanduser().absolute()
# we cache value of root to protect against it being '.' and the cwd changing
if self.cached_root:
root = self.cached_root
elif self.root:
root = Path(self.root).expanduser().absolute()
else:
return self.find_root()
root = self.find_root()
self.cached_root = root
return self.cached_root
@property
def root_dir(self) -> Path:

View File

@@ -650,7 +650,8 @@ def process_and_execute(
):
# need to reinitialize config in subprocess
config = InvokeAIAppConfig.get_config()
config.parse_args(['--root',opt.root])
args = ["--root", opt.root] if opt.root else []
config.parse_args(args)
# set up so that stderr is sent to conn_out
if conn_out:

View File

@@ -320,7 +320,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
def get_model_names(self, base_model: BaseModelType = None) -> List[str]:
model_names = [
info["name"]
info["model_name"]
for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model)
if info["model_format"] == "diffusers"
]

View File

@@ -1 +1 @@
__version__ = "3.0.1post2"
__version__ = "3.0.1post3"