From 87789c1de8462433eb2b2368d0599026fffbf7c7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 7 Mar 2023 23:52:53 -0500 Subject: [PATCH 01/24] add InvokeAIGenerator and InvokeAIGeneratorFactory classes --- invokeai/backend/generator/__init__.py | 11 +- invokeai/backend/generator/base.py | 208 +++++++++++++++++++++++-- 2 files changed, 207 insertions(+), 12 deletions(-) diff --git a/invokeai/backend/generator/__init__.py b/invokeai/backend/generator/__init__.py index b01e93ad81..d617622ed8 100644 --- a/invokeai/backend/generator/__init__.py +++ b/invokeai/backend/generator/__init__.py @@ -1,5 +1,14 @@ """ Initialization file for the invokeai.generator package """ -from .base import Generator +from .base import ( + InvokeAIGeneratorFactory, + InvokeAIGenerator, + InvokeAIGeneratorBasicParams, + InvokeAIGeneratorOutput, + Txt2Img, + Img2Img, + Inpaint, + Generator, +) from .inpaint import infill_methods diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index a834e9dba3..497a56b360 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -4,9 +4,14 @@ including img2img, txt2img, and inpaint """ from __future__ import annotations +import copy +import importlib +import dataclasses +import diffusers import os import random import traceback +from abc import ABCMeta, abstractmethod from contextlib import nullcontext from pathlib import Path @@ -17,13 +22,204 @@ from PIL import Image, ImageChops, ImageFilter from accelerate.utils import set_seed from diffusers import DiffusionPipeline from tqdm import trange +from typing import List, Type, Callable +from dataclasses import dataclass, field +from diffusers.schedulers import SchedulerMixin as Scheduler import invokeai.assets.web as web_assets from ..util.util import rand_perlin_2d +from ..prompting.conditioning import get_uc_and_c_and_ec +from ..model_management.model_manager import ModelManager +from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline + downsampling = 8 CAUTION_IMG = "caution.png" +class InvokeAIGeneratorFactory(object): + def __init__(self, + model_manager: ModelManager, + params: InvokeAIGeneratorBasicParams + ): + self.model_manager = model_manager + self.params = params + + def make_generator(self, generatorclass: Type[InvokeAIGenerator], **keyword_args)->InvokeAIGenerator: + return generatorclass(self.model_manager, + self.params, + **keyword_args + ) +@dataclass +class InvokeAIGeneratorBasicParams: + seed: int=None + width: int=512 + height: int=512 + cfg_scale: int=7.5 + steps: int=20 + ddim_eta: float=0.0 + model: str='stable-diffusion-1.5' + scheduler: int='ddim' + precision: str='float16' + perlin: float=0.0 + threshold: int=0.0 + h_symmetry_time_pct: float=None + v_symmetry_time_pct: float=None + variation_amount: float = 0.0 + with_variations: list = field(default_factory=list) + +@dataclass +class InvokeAIGeneratorOutput: + image: Image + seed: int + model_name: str + model_hash: str + params: InvokeAIGeneratorBasicParams + +# we are interposing a wrapper around the original Generator classes so that +# old code that calls Generate will continue to work. +class InvokeAIGenerator(metaclass=ABCMeta): + scheduler_map = dict( + ddim=diffusers.DDIMScheduler, + dpmpp_2=diffusers.DPMSolverMultistepScheduler, + k_dpm_2=diffusers.KDPM2DiscreteScheduler, + k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler, + k_dpmpp_2=diffusers.DPMSolverMultistepScheduler, + k_euler=diffusers.EulerDiscreteScheduler, + k_euler_a=diffusers.EulerAncestralDiscreteScheduler, + k_heun=diffusers.HeunDiscreteScheduler, + k_lms=diffusers.LMSDiscreteScheduler, + plms=diffusers.PNDMScheduler, + ) + + def __init__(self, + model_manager: ModelManager, + params: InvokeAIGeneratorBasicParams + ): + self.model_manager=model_manager + self.params=params + + def generate(self, + prompt: str='', + callback: callable=None, + step_callback: callable=None, + **keyword_args, + )->List[InvokeAIGeneratorOutput]: + + model_name = self.params.model or self.model_manager.current_model + model_info: dict = self.model_manager.get_model(model_name) + model:StableDiffusionGeneratorPipeline = model_info['model'] + model_hash = model_info['hash'] + scheduler: Scheduler = self.get_scheduler( + model=model, + scheduler_name=self.params.scheduler + ) + uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model) + + def _wrap_results(image: Image, seed: int, **kwargs): + nonlocal results + results.append(output) + + generator = self.load_generator(model, self._generator_name()) + if self.params.variation_amount > 0: + generator.set_variation(self.params.seed, + self.params.variation_amount, + self.params.with_variations) + + while True: + results = generator.generate(prompt, + conditioning=(uc, c, extra_conditioning_info), + sampler=scheduler, + **dataclasses.asdict(self.params), + **keyword_args + ) + output = InvokeAIGeneratorOutput( + image=results[0][0], + seed=results[0][1], + model_name = model_name, + model_hash = model_hash, + params=copy.copy(self.params) + ) + if callback: + callback(output) + yield output + + def load_generator(self, model: StableDiffusionGeneratorPipeline, class_name: str): + module_name = f'invokeai.backend.generator.{class_name.lower()}' + module = importlib.import_module(module_name) + constructor = getattr(module, class_name) + return constructor(model, self.params.precision) + + def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler: + scheduler_class = self.scheduler_map.get(scheduler_name,'ddim') + scheduler = scheduler_class.from_config(model.scheduler.config) + # hack copied over from generate.py + if not hasattr(scheduler, 'uses_inpainting_model'): + scheduler.uses_inpainting_model = lambda: False + return scheduler + + @abstractmethod + def _generator_name(self)->str: + ''' + In derived classes will return the name of the generator to use. + ''' + pass + +# ------------------------------------ +class Txt2Img(InvokeAIGenerator): + def _generator_name(self)->str: + return 'Txt2Img' + +# ------------------------------------ +class Img2Img(InvokeAIGenerator): + def generate(self, + init_image: Image | torch.FloatTensor, + strength: float=0.75, + **keyword_args + )->List[InvokeAIGeneratorOutput]: + return super().generate(init_image=init_image, + strength=strength, + **keyword_args + ) + + def _generator_name(self)->str: + return 'Img2Img' + +# ------------------------------------ +# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff +class Inpaint(Img2Img): + def generate(self, + mask_image: Image | torch.FloatTensor, + # Seam settings - when 0, doesn't fill seam + seam_size: int = 0, + seam_blur: int = 0, + seam_strength: float = 0.7, + seam_steps: int = 10, + tile_size: int = 32, + inpaint_replace=False, + infill_method=None, + inpaint_width=None, + inpaint_height=None, + inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF), + **keyword_args + )->List[InvokeAIGeneratorOutput]: + return super().generate( + mask_image=mask_image, + seam_size=seam_size, + seam_blur=seam_blur, + seam_strength=seam_strength, + seam_steps=seam_steps, + tile_size=tile_size, + inpaint_replace=inpaint_replace, + infill_method=infill_method, + inpaint_width=inpaint_width, + inpaint_height=inpaint_height, + inpaint_fill=inpaint_fill, + **keyword_args + ) + + def _generator_name(self)->str: + return 'Inpaint' + class Generator: downsampling_factor: int @@ -64,10 +260,10 @@ class Generator: def generate( self, prompt, - init_image, width, height, sampler, + init_image=None, iterations=1, seed=None, image_callback=None, @@ -293,16 +489,6 @@ class Generator: else: return (seed, None) - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self, width, height): - """ - Returns a tensor filled with random numbers, either form a normal distribution - (txt2img) or from the latent image (img2img, inpaint) - """ - raise NotImplementedError( - "get_noise() must be implemented in a descendent class" - ) - def get_perlin_noise(self, width, height): fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device # limit noise to only the diffusion image channels, not the mask channels From 5d37fa6e365ef98551bddcf4db30a1cf1e7ac93f Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 00:18:29 -0500 Subject: [PATCH 02/24] node-based txt2img working without generate --- invokeai/app/api/dependencies.py | 6 +- invokeai/app/cli_app.py | 10 +- invokeai/app/invocations/generate.py | 79 +++++---- invokeai/app/services/generate_initializer.py | 135 +++------------ invokeai/app/services/invocation_services.py | 20 +-- invokeai/backend/__init__.py | 6 + invokeai/backend/generator/base.py | 156 ++++++++---------- invokeai/backend/safety_checker.py | 89 ++++++++++ 8 files changed, 247 insertions(+), 254 deletions(-) create mode 100644 invokeai/backend/safety_checker.py diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 271a2e3be3..58e6c81492 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -4,7 +4,7 @@ import os from argparse import Namespace from ...backend import Globals -from ..services.generate_initializer import get_generate +from ..services.generate_initializer import get_generator_factory from ..services.graph import GraphExecutionState from ..services.image_storage import DiskImageStorage from ..services.invocation_queue import MemoryInvocationQueue @@ -47,7 +47,7 @@ class ApiDependencies: # TODO: Use a logger print(f">> Internet connectivity is {Globals.internet_available}") - generate = get_generate(args, config) + generator_factory = get_generator_factory(args, config) events = FastAPIEventService(event_handler_id) @@ -61,7 +61,7 @@ class ApiDependencies: db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generate=generate, + generator_factory=generator_factory, events=events, images=images, queue=MemoryInvocationQueue(), diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 721760b222..d0190903ff 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -17,7 +17,7 @@ from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_gra from .invocations import * from .invocations.baseinvocation import BaseInvocation from .services.events import EventServiceBase -from .services.generate_initializer import get_generate +from .services.generate_initializer import get_generator_factory from .services.graph import EdgeConnection, GraphExecutionState from .services.image_storage import DiskImageStorage from .services.invocation_queue import MemoryInvocationQueue @@ -106,11 +106,7 @@ def invoke_cli(): args = Args() config = args.parse_args() - generate = get_generate(args, config) - - # NOTE: load model on first use, uncomment to load at startup - # TODO: Make this a config option? - # generate.load_model() + generator_factory = get_generator_factory(args, config) events = EventServiceBase() @@ -122,7 +118,7 @@ def invoke_cli(): db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generate=generate, + generator_factory=generator_factory, events=events, images=DiskImageStorage(output_folder), queue=MemoryInvocationQueue(), diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index 15c5f17438..cf2ef8aa45 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -12,9 +12,10 @@ from ..services.image_storage import ImageType from ..services.invocation_services import InvocationServices from .baseinvocation import BaseInvocation, InvocationContext from .image import ImageField, ImageOutput +from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator SAMPLER_NAME_VALUES = Literal[ - "ddim", "plms", "k_lms", "k_dpm_2", "k_dpm_2_a", "k_euler", "k_euler_a", "k_heun" + tuple(InvokeAIGenerator.schedulers()) ] @@ -57,19 +58,24 @@ class TextToImageInvocation(BaseInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == "": - self.model = context.services.generate.model_name + factory = context.services.generator_factory + if self.model: + factory.model_name = self.model + else: + self.model = factory.model_name - # Set the model (if already cached, this does nothing) - context.services.generate.set_model(self.model) + txt2img = factory.make_generator(Txt2Img) - results = context.services.generate.prompt2image( + outputs = txt2img.generate( prompt=self.prompt, step_callback=step_callback, **self.dict( exclude={"prompt"} ), # Shorthand for passing all of the parameters above manually ) + # Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object + # each time it is called. We only need the first one. + generate_output = next(outputs) # Results are image and seed, unwrap for now and ignore the seed # TODO: pre-seed? @@ -78,7 +84,7 @@ class TextToImageInvocation(BaseInvocation): image_name = context.services.images.create_name( context.graph_execution_state_id, self.id ) - context.services.images.save(image_type, image_name, results[0][0]) + context.services.images.save(image_type, image_name, generate_output.image) return ImageOutput( image=ImageField(image_type=image_type, image_name=image_name) ) @@ -115,23 +121,24 @@ class ImageToImageInvocation(TextToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == "": - self.model = context.services.generate.model_name + factory = context.services.generator_factory + self.model = self.model or factory.model_name + factory.model_name = self.model + img2img = factory.make_generator(Img2Img) - # Set the model (if already cached, this does nothing) - context.services.generate.set_model(self.model) - - results = context.services.generate.prompt2image( - prompt=self.prompt, - init_img=image, - init_mask=mask, - step_callback=step_callback, - **self.dict( - exclude={"prompt", "image", "mask"} - ), # Shorthand for passing all of the parameters above manually + generator_output = next( + img2img.generate( + prompt=self.prompt, + init_img=image, + init_mask=mask, + step_callback=step_callback, + **self.dict( + exclude={"prompt", "image", "mask"} + ), # Shorthand for passing all of the parameters above manually + ) ) - result_image = results[0][0] + result_image = generator_output.image # Results are image and seed, unwrap for now and ignore the seed # TODO: pre-seed? @@ -145,7 +152,6 @@ class ImageToImageInvocation(TextToImageInvocation): image=ImageField(image_type=image_type, image_name=image_name) ) - class InpaintInvocation(ImageToImageInvocation): """Generates an image using inpaint.""" @@ -180,23 +186,24 @@ class InpaintInvocation(ImageToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == "": - self.model = context.services.generate.model_name + factory = context.services.generator_factory + self.model = self.model or factory.model_name + factory.model_name = self.model + inpaint = factory.make_generator(Inpaint) - # Set the model (if already cached, this does nothing) - context.services.generate.set_model(self.model) - - results = context.services.generate.prompt2image( - prompt=self.prompt, - init_img=image, - init_mask=mask, - step_callback=step_callback, - **self.dict( - exclude={"prompt", "image", "mask"} - ), # Shorthand for passing all of the parameters above manually + generator_output = next( + inpaint.generate( + prompt=self.prompt, + init_img=image, + init_mask=mask, + step_callback=step_callback, + **self.dict( + exclude={"prompt", "image", "mask"} + ), # Shorthand for passing all of the parameters above manually + ) ) - result_image = results[0][0] + result_image = generator_output.image # Results are image and seed, unwrap for now and ignore the seed # TODO: pre-seed? diff --git a/invokeai/app/services/generate_initializer.py b/invokeai/app/services/generate_initializer.py index 9801909742..4ac5a5d706 100644 --- a/invokeai/app/services/generate_initializer.py +++ b/invokeai/app/services/generate_initializer.py @@ -1,16 +1,17 @@ import os import sys +import torch import traceback from argparse import Namespace +from omegaconf import OmegaConf import invokeai.version -from invokeai.backend import Generate, ModelManager - +from ...backend import ModelManager, InvokeAIGeneratorBasicParams, InvokeAIGeneratorFactory +from ...backend.util import choose_precision, choose_torch_device from ...backend import Globals - # TODO: most of this code should be split into individual services as the Generate.py code is deprecated -def get_generate(args, config) -> Generate: +def get_generator_factory(args, config) -> InvokeAIGeneratorFactory: if not args.conf: config_file = os.path.join(Globals.root, "configs", "models.yaml") if not os.path.exists(config_file): @@ -63,49 +64,43 @@ def get_generate(args, config) -> Generate: print(f"{e}. Aborting.") sys.exit(-1) - # creating a Generate object: + # creating an InvokeAIGeneratorFactory object: try: - gen = Generate( - conf=args.conf, - model=args.model, - sampler_name=args.sampler_name, - embedding_path=embedding_path, - full_precision=args.full_precision, - precision=args.precision, - gfpgan=gfpgan, - codeformer=codeformer, - esrgan=esrgan, - free_gpu_mem=args.free_gpu_mem, - safety_checker=args.safety_checker, + device = torch.device(choose_torch_device()) + precision = 'float16' if args.precision=='float16' \ + else 'float32' if args.precision=='float32' \ + else choose_precision(device) + + model_manager = ModelManager( + OmegaConf.load(args.conf), + precision=precision, + device_type=device, max_loaded_models=args.max_loaded_models, ) + # TO DO: initialize and pass safety checker!!! + params = InvokeAIGeneratorBasicParams( + precision=precision, + ) + factory = InvokeAIGeneratorFactory(model_manager, params) except (FileNotFoundError, TypeError, AssertionError) as e: - report_model_error(opt, e) + report_model_error(args, e) except (IOError, KeyError) as e: print(f"{e}. Aborting.") sys.exit(-1) if args.seamless: + #TODO: do something here ? print(">> changed to seamless tiling mode") - # preload the model - try: - gen.load_model() - except KeyError: - pass - except Exception as e: - report_model_error(args, e) - # try to autoconvert new models # autoimport new .ckpt files if path := args.autoconvert: - gen.model_manager.autoconvert_weights( + model_manager.autoconvert_weights( conf_path=args.conf, weights_directory=path, ) - return gen - + return factory def load_face_restoration(opt): try: @@ -171,85 +166,3 @@ def report_model_error(opt: Namespace, e: Exception): # sys.argv = previous_args # main() # would rather do a os.exec(), but doesn't exist? # sys.exit(0) - - -# Temporary initializer for Generate until we migrate off of it -def old_get_generate(args, config) -> Generate: - # TODO: Remove the need for globals - from invokeai.backend.globals import Globals - - # alert - setting globals here - Globals.root = os.path.expanduser( - args.root_dir or os.environ.get("INVOKEAI_ROOT") or os.path.abspath(".") - ) - Globals.try_patchmatch = args.patchmatch - - print(f'>> InvokeAI runtime directory is "{Globals.root}"') - - # these two lines prevent a horrible warning message from appearing - # when the frozen CLIP tokenizer is imported - import transformers - - transformers.logging.set_verbosity_error() - - # Loading Face Restoration and ESRGAN Modules - gfpgan, codeformer, esrgan = None, None, None - try: - if config.restore or config.esrgan: - from ldm.invoke.restoration import Restoration - - restoration = Restoration() - if config.restore: - gfpgan, codeformer = restoration.load_face_restore_models( - config.gfpgan_model_path - ) - else: - print(">> Face restoration disabled") - if config.esrgan: - esrgan = restoration.load_esrgan(config.esrgan_bg_tile) - else: - print(">> Upscaling disabled") - else: - print(">> Face restoration and upscaling disabled") - except (ModuleNotFoundError, ImportError): - print(traceback.format_exc(), file=sys.stderr) - print(">> You may need to install the ESRGAN and/or GFPGAN modules") - - # normalize the config directory relative to root - if not os.path.isabs(config.conf): - config.conf = os.path.normpath(os.path.join(Globals.root, config.conf)) - - if config.embeddings: - if not os.path.isabs(config.embedding_path): - embedding_path = os.path.normpath( - os.path.join(Globals.root, config.embedding_path) - ) - else: - embedding_path = None - - # TODO: lazy-initialize this by wrapping it - try: - generate = Generate( - conf=config.conf, - model=config.model, - sampler_name=config.sampler_name, - embedding_path=embedding_path, - full_precision=config.full_precision, - precision=config.precision, - gfpgan=gfpgan, - codeformer=codeformer, - esrgan=esrgan, - free_gpu_mem=config.free_gpu_mem, - safety_checker=config.safety_checker, - max_loaded_models=config.max_loaded_models, - ) - except (FileNotFoundError, TypeError, AssertionError): - # emergency_model_reconfigure() # TODO? - sys.exit(-1) - except (IOError, KeyError) as e: - print(f"{e}. Aborting.") - sys.exit(-1) - - generate.free_gpu_mem = config.free_gpu_mem - - return generate diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 42cbd6c271..0177d79107 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -1,5 +1,5 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from invokeai.backend import Generate +from invokeai.backend import InvokeAIGeneratorFactory from .events import EventServiceBase from .image_storage import ImageStorageBase @@ -10,7 +10,7 @@ from .item_storage import ItemStorageABC class InvocationServices: """Services that can be used by invocations""" - generate: Generate # TODO: wrap Generate, or split it up from model? + generator_factory: InvokeAIGeneratorFactory events: EventServiceBase images: ImageStorageBase queue: InvocationQueueABC @@ -20,15 +20,15 @@ class InvocationServices: processor: "InvocationProcessorABC" def __init__( - self, - generate: Generate, - events: EventServiceBase, - images: ImageStorageBase, - queue: InvocationQueueABC, - graph_execution_manager: ItemStorageABC["GraphExecutionState"], - processor: "InvocationProcessorABC", + self, + generator_factory: InvokeAIGeneratorFactory, + events: EventServiceBase, + images: ImageStorageBase, + queue: InvocationQueueABC, + graph_execution_manager: ItemStorageABC["GraphExecutionState"], + processor: "InvocationProcessorABC", ): - self.generate = generate + self.generator_factory = generator_factory self.events = events self.images = images self.queue = queue diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index 06089369c2..75fd0b5cb4 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -2,6 +2,12 @@ Initialization file for invokeai.backend """ from .generate import Generate +from .generator import ( + InvokeAIGeneratorBasicParams, + InvokeAIGeneratorFactory, + InvokeAIGenerator, + InvokeAIGeneratorOutput +) from .model_management import ModelManager from .args import Args from .globals import Globals diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 497a56b360..d6c70b4d80 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -4,7 +4,6 @@ including img2img, txt2img, and inpaint """ from __future__ import annotations -import copy import importlib import dataclasses import diffusers @@ -13,7 +12,6 @@ import random import traceback from abc import ABCMeta, abstractmethod from contextlib import nullcontext -from pathlib import Path import cv2 import numpy as np @@ -22,19 +20,59 @@ from PIL import Image, ImageChops, ImageFilter from accelerate.utils import set_seed from diffusers import DiffusionPipeline from tqdm import trange -from typing import List, Type, Callable +from typing import List, Type from dataclasses import dataclass, field from diffusers.schedulers import SchedulerMixin as Scheduler -import invokeai.assets.web as web_assets from ..util.util import rand_perlin_2d - +from ..safety_checker import SafetyChecker from ..prompting.conditioning import get_uc_and_c_and_ec from ..model_management.model_manager import ModelManager from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline downsampling = 8 -CAUTION_IMG = "caution.png" + +@dataclass +class InvokeAIGeneratorBasicParams: + seed: int=None + width: int=512 + height: int=512 + cfg_scale: int=7.5 + steps: int=20 + ddim_eta: float=0.0 + model_name: str='stable-diffusion-1.5' + scheduler: int='ddim' + precision: str='float16' + perlin: float=0.0 + threshold: int=0.0 + h_symmetry_time_pct: float=None + v_symmetry_time_pct: float=None + variation_amount: float = 0.0 + with_variations: list=field(default_factory=list) + safety_checker: SafetyChecker=None + +@dataclass +class InvokeAIGeneratorOutput: + ''' + InvokeAIGeneratorOutput is a dataclass that contains the outputs of a generation + operation, including the image, its seed, the model name used to generate the image + and the model hash, as well as all the generate() parameters that went into + generating the image (in .params, also available as attributes) + ''' + image: Image + seed: int + model_name: str + model_hash: str + params: dict + + def __getattribute__(self,name): + try: + return object.__getattribute__(self, name) + except AttributeError: + params = object.__getattribute__(self, 'params') + if name in params: + return params[name] + raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{name}'") class InvokeAIGeneratorFactory(object): def __init__(self, @@ -49,31 +87,15 @@ class InvokeAIGeneratorFactory(object): self.params, **keyword_args ) -@dataclass -class InvokeAIGeneratorBasicParams: - seed: int=None - width: int=512 - height: int=512 - cfg_scale: int=7.5 - steps: int=20 - ddim_eta: float=0.0 - model: str='stable-diffusion-1.5' - scheduler: int='ddim' - precision: str='float16' - perlin: float=0.0 - threshold: int=0.0 - h_symmetry_time_pct: float=None - v_symmetry_time_pct: float=None - variation_amount: float = 0.0 - with_variations: list = field(default_factory=list) -@dataclass -class InvokeAIGeneratorOutput: - image: Image - seed: int - model_name: str - model_hash: str - params: InvokeAIGeneratorBasicParams + # getter and setter shortcuts for commonly used parameters + @property + def model_name(self)->str: + return self.params.model_name + + @model_name.setter + def model_name(self, model_name: str): + self.params.model_name=model_name # we are interposing a wrapper around the original Generator classes so that # old code that calls Generate will continue to work. @@ -93,7 +115,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): def __init__(self, model_manager: ModelManager, - params: InvokeAIGeneratorBasicParams + params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(), ): self.model_manager=model_manager self.params=params @@ -105,7 +127,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): **keyword_args, )->List[InvokeAIGeneratorOutput]: - model_name = self.params.model or self.model_manager.current_model + model_name = self.params.model_name or self.model_manager.current_model model_info: dict = self.model_manager.get_model(model_name) model:StableDiffusionGeneratorPipeline = model_info['model'] model_hash = model_info['hash'] @@ -124,24 +146,33 @@ class InvokeAIGenerator(metaclass=ABCMeta): generator.set_variation(self.params.seed, self.params.variation_amount, self.params.with_variations) + + generator_args = dataclasses.asdict(self.params) + generator_args.update(keyword_args) while True: results = generator.generate(prompt, conditioning=(uc, c, extra_conditioning_info), sampler=scheduler, - **dataclasses.asdict(self.params), - **keyword_args + **generator_args, ) output = InvokeAIGeneratorOutput( image=results[0][0], seed=results[0][1], model_name = model_name, model_hash = model_hash, - params=copy.copy(self.params) + params=generator_args, ) if callback: callback(output) yield output + + @classmethod + def schedulers(self)->List[str]: + ''' + Return list of all the schedulers that we currently handle. + ''' + return list(self.scheduler_map.keys()) def load_generator(self, model: StableDiffusionGeneratorPipeline, class_name: str): module_name = f'invokeai.backend.generator.{class_name.lower()}' @@ -219,8 +250,7 @@ class Inpaint(Img2Img): def _generator_name(self)->str: return 'Inpaint' - - + class Generator: downsampling_factor: int latent_channels: int @@ -240,7 +270,6 @@ class Generator: self.with_variations = [] self.use_mps_noise = False self.free_gpu_mem = None - self.caution_img = None # this is going to be overridden in img2img.py, txt2img.py and inpaint.py def get_make_image(self, prompt, **kwargs): @@ -272,7 +301,7 @@ class Generator: perlin=0.0, h_symmetry_time_pct=None, v_symmetry_time_pct=None, - safety_checker: dict = None, + safety_checker: SafetyChecker=None, free_gpu_mem: bool = False, **kwargs, ): @@ -325,7 +354,7 @@ class Generator: image = make_image(x_T) if self.safety_checker is not None: - image = self.safety_check(image) + image = self.safety_checker.check(image) results.append([image, seed]) @@ -548,53 +577,6 @@ class Generator: return v2 - def safety_check(self, image: Image.Image): - """ - If the CompViz safety checker flags an NSFW image, we - blur it out. - """ - import diffusers - - checker = self.safety_checker["checker"] - extractor = self.safety_checker["extractor"] - features = extractor([image], return_tensors="pt") - features.to(self.model.device) - - # unfortunately checker requires the numpy version, so we have to convert back - x_image = np.array(image).astype(np.float32) / 255.0 - x_image = x_image[None].transpose(0, 3, 1, 2) - - diffusers.logging.set_verbosity_error() - checked_image, has_nsfw_concept = checker( - images=x_image, clip_input=features.pixel_values - ) - if has_nsfw_concept[0]: - print( - "** An image with potential non-safe content has been detected. A blurred image will be returned. **" - ) - return self.blur(image) - else: - return image - - def blur(self, input): - blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) - try: - caution = self.get_caution_img() - if caution: - blurry.paste(caution, (0, 0), caution) - except FileNotFoundError: - pass - return blurry - - def get_caution_img(self): - path = None - if self.caution_img: - return self.caution_img - path = Path(web_assets.__path__[0]) / CAUTION_IMG - caution = Image.open(path) - self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) - return self.caution_img - # this is a handy routine for debugging use. Given a generated sample, # convert it into a PNG image and store it at the indicated path def save_sample(self, sample, filepath): diff --git a/invokeai/backend/safety_checker.py b/invokeai/backend/safety_checker.py new file mode 100644 index 0000000000..86cf31cc13 --- /dev/null +++ b/invokeai/backend/safety_checker.py @@ -0,0 +1,89 @@ +''' +SafetyChecker class - checks images against the StabilityAI NSFW filter +and blurs images that contain potential NSFW content. +''' +import diffusers +import numpy as np +import torch +import traceback +from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, +) +from pathlib import Path +from PIL import Image, ImageFilter +from transformers import AutoFeatureExtractor + +import invokeai.assets.web as web_assets +from .globals import global_cache_dir + +class SafetyChecker(object): + CAUTION_IMG = "caution.png" + + def __init__(self, device: torch.device): + self.device = device + try: + print(">> Initializing NSFW checker") + safety_model_id = "CompVis/stable-diffusion-safety-checker" + safety_model_path = global_cache_dir("hub") + self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( + safety_model_id, + local_files_only=True, + cache_dir=safety_model_path, + ) + self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained( + safety_model_id, + local_files_only=True, + cache_dir=safety_model_path, + ) + self.safety_checker.to(device) + self.safety_feature_extractor.to(device) + except Exception: + print( + "** An error was encountered while installing the safety checker:" + ) + print(traceback.format_exc()) + else: + print(">> NSFW checker is disabled") + + def check(self, image: Image.Image): + """ + Check provided image against the StabilityAI safety checker and return + + """ + + features = self.safety_feature_extractor([image], return_tensors="pt") + # unfortunately checker requires the numpy version, so we have to convert back + x_image = np.array(image).astype(np.float32) / 255.0 + x_image = x_image[None].transpose(0, 3, 1, 2) + + diffusers.logging.set_verbosity_error() + checked_image, has_nsfw_concept = self.safety_checker( + images=x_image, clip_input=features.pixel_values + ) + if has_nsfw_concept[0]: + print( + "** An image with potential non-safe content has been detected. A blurred image will be returned. **" + ) + return self.blur(image) + else: + return image + + def blur(self, input): + blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) + try: + caution = self.get_caution_img() + if caution: + blurry.paste(caution, (0, 0), caution) + except FileNotFoundError: + pass + return blurry + + def get_caution_img(self): + path = None + if self.caution_img: + return self.caution_img + path = Path(web_assets.__path__[0]) / self.CAUTION_IMG + caution = Image.open(path) + self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) + return self.caution_img + From b679a6ba377885595c3741a5a98275c0c912459c Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 01:09:54 -0500 Subject: [PATCH 03/24] model manager defaults to consistent values of device and precision --- invokeai/backend/__init__.py | 5 ++- invokeai/backend/generator/base.py | 35 +++++++++++++++---- .../backend/model_management/model_manager.py | 24 +++++++------ 3 files changed, 47 insertions(+), 17 deletions(-) diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index 75fd0b5cb4..f08050f576 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -6,7 +6,10 @@ from .generator import ( InvokeAIGeneratorBasicParams, InvokeAIGeneratorFactory, InvokeAIGenerator, - InvokeAIGeneratorOutput + InvokeAIGeneratorOutput, + Txt2Img, + Img2Img, + Inpaint ) from .model_management import ModelManager from .args import Args diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index d6c70b4d80..fcfc1eba37 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -5,6 +5,7 @@ including img2img, txt2img, and inpaint from __future__ import annotations import importlib +import itertools import dataclasses import diffusers import os @@ -20,7 +21,7 @@ from PIL import Image, ImageChops, ImageFilter from accelerate.utils import set_seed from diffusers import DiffusionPipeline from tqdm import trange -from typing import List, Type +from typing import List, Type, Iterator from dataclasses import dataclass, field from diffusers.schedulers import SchedulerMixin as Scheduler @@ -77,7 +78,7 @@ class InvokeAIGeneratorOutput: class InvokeAIGeneratorFactory(object): def __init__(self, model_manager: ModelManager, - params: InvokeAIGeneratorBasicParams + params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(), ): self.model_manager = model_manager self.params = params @@ -115,7 +116,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): def __init__(self, model_manager: ModelManager, - params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(), + params: InvokeAIGeneratorBasicParams, ): self.model_manager=model_manager self.params=params @@ -124,9 +125,30 @@ class InvokeAIGenerator(metaclass=ABCMeta): prompt: str='', callback: callable=None, step_callback: callable=None, + iterations: int=1, **keyword_args, - )->List[InvokeAIGeneratorOutput]: + )->Iterator[InvokeAIGeneratorOutput]: + ''' + Return an iterator across the indicated number of generations. + Each time the iterator is called it will return an InvokeAIGeneratorOutput + object. Use like this: + outputs = txt2img.generate(prompt='banana sushi', iterations=5) + for result in outputs: + print(result.image, result.seed) + + In the typical case of wanting to get just a single image, iterations + defaults to 1 and do: + + output = next(txt2img.generate(prompt='banana sushi') + + Pass None to get an infinite iterator. + + outputs = txt2img.generate(prompt='banana sushi', iterations=None) + for o in outputs: + print(o.image, o.seed) + + ''' model_name = self.params.model_name or self.model_manager.current_model model_info: dict = self.model_manager.get_model(model_name) model:StableDiffusionGeneratorPipeline = model_info['model'] @@ -149,8 +171,9 @@ class InvokeAIGenerator(metaclass=ABCMeta): generator_args = dataclasses.asdict(self.params) generator_args.update(keyword_args) - - while True: + + iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1) + for i in iteration_count: results = generator.generate(prompt, conditioning=(uc, c, extra_conditioning_info), sampler=scheduler, diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index b362500ff7..4224f848d1 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -34,8 +34,7 @@ from picklescan.scanner import scan_file_path from invokeai.backend.globals import Globals, global_cache_dir from ..stable_diffusion import StableDiffusionGeneratorPipeline -from ..util import CPU_DEVICE, ask_user, download_with_resume - +from ..util import CUDA_DEVICE, ask_user, download_with_resume class SDLegacyType(Enum): V1 = 1 @@ -51,23 +50,28 @@ VAE_TO_REPO_ID = { # hack, see note in convert_and_import() } class ModelManager(object): + ''' + Model manager handles loading, caching, importing, deleting, converting, and editing models. + ''' def __init__( self, - config: OmegaConf, - device_type: torch.device = CPU_DEVICE, + config: OmegaConf|Path, + device_type: torch.device = CUDA_DEVICE, precision: str = "float16", max_loaded_models=DEFAULT_MAX_MODELS, sequential_offload=False, ): """ - Initialize with the path to the models.yaml config file, - the torch device type, and precision. The optional - min_avail_mem argument specifies how much unused system - (CPU) memory to preserve. The cache of models in RAM will - grow until this value is approached. Default is 2G. + Initialize with the path to the models.yaml config file or + an initialized OmegaConf dictionary. Optional parameters + are the torch device type, precision, max_loaded_models, + and sequential_offload boolean. Note that the default device + type and precision are set up for a CUDA system running at half precision. """ # prevent nasty-looking CLIP log message transformers.logging.set_verbosity_error() + if not isinstance(config, DictConfig): + config = OmegaConf.load(config) self.config = config self.precision = precision self.device = torch.device(device_type) @@ -557,7 +561,7 @@ class ModelManager(object): """ model_name = model_name or Path(repo_or_path).stem model_description = ( - model_description or f"Imported diffusers model {model_name}" + description or f"Imported diffusers model {model_name}" ) new_config = dict( description=model_description, From c11e823ff36baee8bb4802725f0c861156afe5b6 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 16:30:06 -0500 Subject: [PATCH 04/24] remove unused _wrap_results --- invokeai/backend/generator/base.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 4b7c2323bd..484c3a4121 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -158,11 +158,6 @@ class InvokeAIGenerator(metaclass=ABCMeta): scheduler_name=self.params.scheduler ) uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model) - - def _wrap_results(image: Image, seed: int, **kwargs): - nonlocal results - results.append(output) - generator = self.load_generator(model, self._generator_name()) if self.params.variation_amount > 0: generator.set_variation(self.params.seed, From 95954188b256db02b78a12a978d0af87f15d0831 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 10 Mar 2023 19:33:04 -0500 Subject: [PATCH 05/24] remove factory pattern Factory pattern is now removed. Typical usage of the InvokeAIGenerator is now: ``` from invokeai.backend.generator import ( InvokeAIGeneratorBasicParams, Txt2Img, Img2Img, Inpaint, ) params = InvokeAIGeneratorBasicParams( model_name = 'stable-diffusion-1.5', steps = 30, scheduler = 'k_lms', cfg_scale = 8.0, height = 640, width = 640 ) print ('=== TXT2IMG TEST ===') txt2img = Txt2Img(manager, params) outputs = txt2img.generate(prompt='banana sushi', iterations=2) for i in outputs: print(f'image={output.image}, seed={output.seed}, model={output.params.model_name}, hash={output.model_hash}, steps={output.params.steps}') ``` The `params` argument is optional, so if you wish to accept default parameters and selectively override them, just do this: ``` outputs = Txt2Img(manager).generate(prompt='banana sushi', steps=50, scheduler='k_heun', model_name='stable-diffusion-2.1' ) ``` --- invokeai/app/api/dependencies.py | 4 +- invokeai/app/cli_app.py | 6 +- invokeai/app/invocations/generate.py | 28 ++---- invokeai/app/services/generate_initializer.py | 13 +-- invokeai/app/services/invocation_services.py | 8 +- invokeai/backend/__init__.py | 1 - invokeai/backend/generator/__init__.py | 1 - invokeai/backend/generator/base.py | 85 ++++++------------- .../backend/model_management/model_manager.py | 2 +- 9 files changed, 44 insertions(+), 104 deletions(-) diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 58e6c81492..70d7d1d1a7 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -4,7 +4,7 @@ import os from argparse import Namespace from ...backend import Globals -from ..services.generate_initializer import get_generator_factory +from ..services.generate_initializer import get_model_manager from ..services.graph import GraphExecutionState from ..services.image_storage import DiskImageStorage from ..services.invocation_queue import MemoryInvocationQueue @@ -47,7 +47,7 @@ class ApiDependencies: # TODO: Use a logger print(f">> Internet connectivity is {Globals.internet_available}") - generator_factory = get_generator_factory(args, config) + model_manager = get_model_manager(args, config) events = FastAPIEventService(event_handler_id) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index bf347f4061..25e6e5e85c 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -17,7 +17,7 @@ from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_gra from .invocations import * from .invocations.baseinvocation import BaseInvocation from .services.events import EventServiceBase -from .services.generate_initializer import get_generator_factory +from .services.generate_initializer import get_model_manager from .services.graph import EdgeConnection, GraphExecutionState from .services.image_storage import DiskImageStorage from .services.invocation_queue import MemoryInvocationQueue @@ -129,7 +129,7 @@ def invoke_cli(): args = Args() config = args.parse_args() - generator_factory = get_generator_factory(args, config) + model_manager = get_model_manager(args, config) events = EventServiceBase() @@ -141,7 +141,7 @@ def invoke_cli(): db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generator_factory=generator_factory, + model_manager=model_manager, events=events, images=DiskImageStorage(output_folder), queue=MemoryInvocationQueue(), diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index cf2ef8aa45..879b5c5951 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -18,7 +18,6 @@ SAMPLER_NAME_VALUES = Literal[ tuple(InvokeAIGenerator.schedulers()) ] - # Text to image class TextToImageInvocation(BaseInvocation): """Generates an image using text2img.""" @@ -58,15 +57,8 @@ class TextToImageInvocation(BaseInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - factory = context.services.generator_factory - if self.model: - factory.model_name = self.model - else: - self.model = factory.model_name - - txt2img = factory.make_generator(Txt2Img) - - outputs = txt2img.generate( + manager = context.services.model_manager + outputs = Txt2Img(manager).generate( prompt=self.prompt, step_callback=step_callback, **self.dict( @@ -121,13 +113,9 @@ class ImageToImageInvocation(TextToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - factory = context.services.generator_factory - self.model = self.model or factory.model_name - factory.model_name = self.model - img2img = factory.make_generator(Img2Img) - + manager = context.services.model_manager generator_output = next( - img2img.generate( + Img2Img(manager).generate( prompt=self.prompt, init_img=image, init_mask=mask, @@ -186,13 +174,9 @@ class InpaintInvocation(ImageToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - factory = context.services.generator_factory - self.model = self.model or factory.model_name - factory.model_name = self.model - inpaint = factory.make_generator(Inpaint) - + manager = context.services.model_manager generator_output = next( - inpaint.generate( + Inpaint(manager).generate( prompt=self.prompt, init_img=image, init_mask=mask, diff --git a/invokeai/app/services/generate_initializer.py b/invokeai/app/services/generate_initializer.py index 4ac5a5d706..144e50088e 100644 --- a/invokeai/app/services/generate_initializer.py +++ b/invokeai/app/services/generate_initializer.py @@ -6,12 +6,12 @@ from argparse import Namespace from omegaconf import OmegaConf import invokeai.version -from ...backend import ModelManager, InvokeAIGeneratorBasicParams, InvokeAIGeneratorFactory +from ...backend import ModelManager from ...backend.util import choose_precision, choose_torch_device from ...backend import Globals # TODO: most of this code should be split into individual services as the Generate.py code is deprecated -def get_generator_factory(args, config) -> InvokeAIGeneratorFactory: +def get_model_manager(args, config) -> ModelManager: if not args.conf: config_file = os.path.join(Globals.root, "configs", "models.yaml") if not os.path.exists(config_file): @@ -64,7 +64,7 @@ def get_generator_factory(args, config) -> InvokeAIGeneratorFactory: print(f"{e}. Aborting.") sys.exit(-1) - # creating an InvokeAIGeneratorFactory object: + # creating the model manager try: device = torch.device(choose_torch_device()) precision = 'float16' if args.precision=='float16' \ @@ -77,11 +77,6 @@ def get_generator_factory(args, config) -> InvokeAIGeneratorFactory: device_type=device, max_loaded_models=args.max_loaded_models, ) - # TO DO: initialize and pass safety checker!!! - params = InvokeAIGeneratorBasicParams( - precision=precision, - ) - factory = InvokeAIGeneratorFactory(model_manager, params) except (FileNotFoundError, TypeError, AssertionError) as e: report_model_error(args, e) except (IOError, KeyError) as e: @@ -100,7 +95,7 @@ def get_generator_factory(args, config) -> InvokeAIGeneratorFactory: weights_directory=path, ) - return factory + return model_manager def load_face_restoration(opt): try: diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 0177d79107..c51299b688 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -1,5 +1,5 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from invokeai.backend import InvokeAIGeneratorFactory +from invokeai.backend import ModelManager from .events import EventServiceBase from .image_storage import ImageStorageBase @@ -10,7 +10,7 @@ from .item_storage import ItemStorageABC class InvocationServices: """Services that can be used by invocations""" - generator_factory: InvokeAIGeneratorFactory + model_manager: ModelManager events: EventServiceBase images: ImageStorageBase queue: InvocationQueueABC @@ -21,14 +21,14 @@ class InvocationServices: def __init__( self, - generator_factory: InvokeAIGeneratorFactory, + model_manager: ModelManager, events: EventServiceBase, images: ImageStorageBase, queue: InvocationQueueABC, graph_execution_manager: ItemStorageABC["GraphExecutionState"], processor: "InvocationProcessorABC", ): - self.generator_factory = generator_factory + self.model_manager = model_manager self.events = events self.images = images self.queue = queue diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index f08050f576..a816486631 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -4,7 +4,6 @@ Initialization file for invokeai.backend from .generate import Generate from .generator import ( InvokeAIGeneratorBasicParams, - InvokeAIGeneratorFactory, InvokeAIGenerator, InvokeAIGeneratorOutput, Txt2Img, diff --git a/invokeai/backend/generator/__init__.py b/invokeai/backend/generator/__init__.py index d617622ed8..9d6263453a 100644 --- a/invokeai/backend/generator/__init__.py +++ b/invokeai/backend/generator/__init__.py @@ -2,7 +2,6 @@ Initialization file for the invokeai.generator package """ from .base import ( - InvokeAIGeneratorFactory, InvokeAIGenerator, InvokeAIGeneratorBasicParams, InvokeAIGeneratorOutput, diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 484c3a4121..db1afa0f88 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -11,7 +11,8 @@ import diffusers import os import random import traceback -from abc import ABCMeta, abstractmethod +from abc import ABCMeta +from argparse import Namespace from contextlib import nullcontext import cv2 @@ -21,7 +22,7 @@ from PIL import Image, ImageChops, ImageFilter from accelerate.utils import set_seed from diffusers import DiffusionPipeline from tqdm import trange -from typing import List, Type, Iterator +from typing import List, Iterator from dataclasses import dataclass, field from diffusers.schedulers import SchedulerMixin as Scheduler @@ -35,13 +36,13 @@ downsampling = 8 @dataclass class InvokeAIGeneratorBasicParams: + model_name: str='stable-diffusion-1.5' seed: int=None width: int=512 height: int=512 cfg_scale: int=7.5 steps: int=20 ddim_eta: float=0.0 - model_name: str='stable-diffusion-1.5' scheduler: int='ddim' precision: str='float16' perlin: float=0.0 @@ -62,41 +63,8 @@ class InvokeAIGeneratorOutput: ''' image: Image seed: int - model_name: str model_hash: str - params: dict - - def __getattribute__(self,name): - try: - return object.__getattribute__(self, name) - except AttributeError: - params = object.__getattribute__(self, 'params') - if name in params: - return params[name] - raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{name}'") - -class InvokeAIGeneratorFactory(object): - def __init__(self, - model_manager: ModelManager, - params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(), - ): - self.model_manager = model_manager - self.params = params - - def make_generator(self, generatorclass: Type[InvokeAIGenerator], **keyword_args)->InvokeAIGenerator: - return generatorclass(self.model_manager, - self.params, - **keyword_args - ) - - # getter and setter shortcuts for commonly used parameters - @property - def model_name(self)->str: - return self.params.model_name - - @model_name.setter - def model_name(self, model_name: str): - self.params.model_name=model_name + params: Namespace # we are interposing a wrapper around the original Generator classes so that # old code that calls Generate will continue to work. @@ -116,7 +84,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): def __init__(self, model_manager: ModelManager, - params: InvokeAIGeneratorBasicParams, + params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(), ): self.model_manager=model_manager self.params=params @@ -149,23 +117,24 @@ class InvokeAIGenerator(metaclass=ABCMeta): print(o.image, o.seed) ''' - model_name = self.params.model_name or self.model_manager.current_model + generator_args = dataclasses.asdict(self.params) + generator_args.update(keyword_args) + + model_name = generator_args.get('model_name') or self.model_manager.current_model model_info: dict = self.model_manager.get_model(model_name) model:StableDiffusionGeneratorPipeline = model_info['model'] model_hash = model_info['hash'] scheduler: Scheduler = self.get_scheduler( model=model, - scheduler_name=self.params.scheduler + scheduler_name=generator_args.get('scheduler') ) uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model) generator = self.load_generator(model, self._generator_name()) if self.params.variation_amount > 0: - generator.set_variation(self.params.seed, - self.params.variation_amount, - self.params.with_variations) - - generator_args = dataclasses.asdict(self.params) - generator_args.update(keyword_args) + generator.set_variation(generator_args.get('seed'), + generator_args.get('variation_amount'), + generator_args.get('with_variations') + ) iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1) for i in iteration_count: @@ -177,9 +146,8 @@ class InvokeAIGenerator(metaclass=ABCMeta): output = InvokeAIGeneratorOutput( image=results[0][0], seed=results[0][1], - model_name = model_name, model_hash = model_hash, - params=generator_args, + params=Namespace(**generator_args), ) if callback: callback(output) @@ -205,18 +173,19 @@ class InvokeAIGenerator(metaclass=ABCMeta): if not hasattr(scheduler, 'uses_inpainting_model'): scheduler.uses_inpainting_model = lambda: False return scheduler - - @abstractmethod - def _generator_name(self)->str: + + @classmethod + def _generator_name(cls): ''' - In derived classes will return the name of the generator to use. + In derived classes return the name of the generator to apply. + If you don't override will return the name of the derived + class, which nicely parallels the generator class names. ''' - pass + return cls.__name__ # ------------------------------------ class Txt2Img(InvokeAIGenerator): - def _generator_name(self)->str: - return 'Txt2Img' + pass # ------------------------------------ class Img2Img(InvokeAIGenerator): @@ -230,9 +199,6 @@ class Img2Img(InvokeAIGenerator): **keyword_args ) - def _generator_name(self)->str: - return 'Img2Img' - # ------------------------------------ # Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff class Inpaint(Img2Img): @@ -266,9 +232,6 @@ class Inpaint(Img2Img): **keyword_args ) - def _generator_name(self)->str: - return 'Inpaint' - class Generator: downsampling_factor: int latent_channels: int diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 4224f848d1..32ac4b180c 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -34,7 +34,7 @@ from picklescan.scanner import scan_file_path from invokeai.backend.globals import Globals, global_cache_dir from ..stable_diffusion import StableDiffusionGeneratorPipeline -from ..util import CUDA_DEVICE, ask_user, download_with_resume +from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume class SDLegacyType(Enum): V1 = 1 From 7e76eea059a3a30569e882b1a33a37a812c58c5c Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 07:50:39 -0500 Subject: [PATCH 06/24] add embiggen, remove complicated constructor --- invokeai/backend/generator/base.py | 46 ++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index db1afa0f88..e2ff81beb7 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -129,7 +129,8 @@ class InvokeAIGenerator(metaclass=ABCMeta): scheduler_name=generator_args.get('scheduler') ) uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model) - generator = self.load_generator(model, self._generator_name()) + gen_class = self._generator_class() + generator = gen_class(model, self.params.precision) if self.params.variation_amount > 0: generator.set_variation(generator_args.get('seed'), generator_args.get('variation_amount'), @@ -160,11 +161,8 @@ class InvokeAIGenerator(metaclass=ABCMeta): ''' return list(self.scheduler_map.keys()) - def load_generator(self, model: StableDiffusionGeneratorPipeline, class_name: str): - module_name = f'invokeai.backend.generator.{class_name.lower()}' - module = importlib.import_module(module_name) - constructor = getattr(module, class_name) - return constructor(model, self.params.precision) + def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]): + return generator_class(model, self.params.precision) def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler: scheduler_class = self.scheduler_map.get(scheduler_name,'ddim') @@ -175,17 +173,20 @@ class InvokeAIGenerator(metaclass=ABCMeta): return scheduler @classmethod - def _generator_name(cls): + def _generator_class(cls)->Type[Generator]: ''' In derived classes return the name of the generator to apply. If you don't override will return the name of the derived class, which nicely parallels the generator class names. ''' - return cls.__name__ + return Generator # ------------------------------------ class Txt2Img(InvokeAIGenerator): - pass + @classmethod + def _generator_class(cls): + from .txt2img import Txt2Img + return Txt2Img # ------------------------------------ class Img2Img(InvokeAIGenerator): @@ -198,6 +199,10 @@ class Img2Img(InvokeAIGenerator): strength=strength, **keyword_args ) + @classmethod + def _generator_class(cls): + from .img2img import Img2Img + return Img2Img # ------------------------------------ # Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff @@ -231,6 +236,29 @@ class Inpaint(Img2Img): inpaint_fill=inpaint_fill, **keyword_args ) + @classmethod + def _generator_class(cls): + from .inpaint import Inpaint + return Inpaint + +# ------------------------------------ +class Embiggen(Txt2Img): + def generate( + self, + embiggen: list=None, + embiggen_tiles: list = None, + strength: float=0.75, + **kwargs)->List[InvokeAIGeneratorOutput]: + return super().generate(embiggen=embiggen, + embiggen_tiles=embiggen_tiles, + strength=strength, + **kwargs) + + @classmethod + def _generator_class(cls): + from .embiggen import Embiggen + return Embiggen + class Generator: downsampling_factor: int From 675dd12b6c66f116249e129a58a042fc489f4c20 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 08:05:56 -0500 Subject: [PATCH 07/24] add attention map images to output object --- invokeai/backend/generator/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index e2ff81beb7..6f71eee2c7 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -64,6 +64,7 @@ class InvokeAIGeneratorOutput: image: Image seed: int model_hash: str + attention_maps_images: List[Image] params: Namespace # we are interposing a wrapper around the original Generator classes so that @@ -147,6 +148,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): output = InvokeAIGeneratorOutput( image=results[0][0], seed=results[0][1], + attention_maps_images=results[0][2], model_hash = model_hash, params=Namespace(**generator_args), ) @@ -366,7 +368,7 @@ class Generator: if self.safety_checker is not None: image = self.safety_checker.check(image) - results.append([image, seed]) + results.append([image, seed, attention_maps_images]) if image_callback is not None: attention_maps_image = ( From 250b0ab182407c7200fbae7a2c9a4056bc39b8c2 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 08:33:23 -0500 Subject: [PATCH 08/24] add seamless tiling support --- invokeai/backend/generator/base.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 6f71eee2c7..fd62c659ea 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -4,7 +4,6 @@ including img2img, txt2img, and inpaint """ from __future__ import annotations -import importlib import itertools import dataclasses import diffusers @@ -22,10 +21,11 @@ from PIL import Image, ImageChops, ImageFilter from accelerate.utils import set_seed from diffusers import DiffusionPipeline from tqdm import trange -from typing import List, Iterator +from typing import List, Iterator, Type from dataclasses import dataclass, field from diffusers.schedulers import SchedulerMixin as Scheduler +from ..image_util import configure_model_padding from ..util.util import rand_perlin_2d from ..safety_checker import SafetyChecker from ..prompting.conditioning import get_uc_and_c_and_ec @@ -47,6 +47,8 @@ class InvokeAIGeneratorBasicParams: precision: str='float16' perlin: float=0.0 threshold: int=0.0 + seamless: bool=False + seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y']) h_symmetry_time_pct: float=None v_symmetry_time_pct: float=None variation_amount: float = 0.0 @@ -138,6 +140,18 @@ class InvokeAIGenerator(metaclass=ABCMeta): generator_args.get('with_variations') ) + if isinstance(model, DiffusionPipeline): + for component in [model.unet, model.vae]: + configure_model_padding(component, + generator_args.get('seamless',False), + generator_args.get('seamless_axes') + ) + else: + configure_model_padding(model, + generator_args.get('seamless',False), + generator_args.get('seamless_axes') + ) + iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1) for i in iteration_count: results = generator.generate(prompt, From d612f11c1102e069dd055fd8c7a5924432de31dd Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 09:06:46 -0500 Subject: [PATCH 09/24] initialize InvokeAIGenerator object with model, not manager --- invokeai/app/invocations/generate.py | 13 +++++++------ invokeai/backend/generator/base.py | 12 +++++------- invokeai/backend/model_management/model_manager.py | 7 ++++++- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index 879b5c5951..c1a0028293 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -57,8 +57,9 @@ class TextToImageInvocation(BaseInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - manager = context.services.model_manager - outputs = Txt2Img(manager).generate( + # (right now uses whatever current model is set in model manager) + model= context.services.model_manager.get_model() + outputs = Txt2Img(model).generate( prompt=self.prompt, step_callback=step_callback, **self.dict( @@ -113,9 +114,9 @@ class ImageToImageInvocation(TextToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - manager = context.services.model_manager + model = context.services.model_manager.get_model() generator_output = next( - Img2Img(manager).generate( + Img2Img(model).generate( prompt=self.prompt, init_img=image, init_mask=mask, @@ -174,9 +175,9 @@ class InpaintInvocation(ImageToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - manager = context.services.model_manager + manager = context.services.model_manager.get_model() generator_output = next( - Inpaint(manager).generate( + Inpaint(model).generate( prompt=self.prompt, init_img=image, init_mask=mask, diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index fd62c659ea..4ec0f9d54f 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -29,14 +29,12 @@ from ..image_util import configure_model_padding from ..util.util import rand_perlin_2d from ..safety_checker import SafetyChecker from ..prompting.conditioning import get_uc_and_c_and_ec -from ..model_management.model_manager import ModelManager from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline downsampling = 8 @dataclass class InvokeAIGeneratorBasicParams: - model_name: str='stable-diffusion-1.5' seed: int=None width: int=512 height: int=512 @@ -86,10 +84,10 @@ class InvokeAIGenerator(metaclass=ABCMeta): ) def __init__(self, - model_manager: ModelManager, + model_info: dict, params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(), ): - self.model_manager=model_manager + self.model_info=model_info self.params=params def generate(self, @@ -123,8 +121,8 @@ class InvokeAIGenerator(metaclass=ABCMeta): generator_args = dataclasses.asdict(self.params) generator_args.update(keyword_args) - model_name = generator_args.get('model_name') or self.model_manager.current_model - model_info: dict = self.model_manager.get_model(model_name) + model_info = self.model_info + model_name = model_info['model_name'] model:StableDiffusionGeneratorPipeline = model_info['model'] model_hash = model_info['hash'] scheduler: Scheduler = self.get_scheduler( @@ -164,7 +162,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): seed=results[0][1], attention_maps_images=results[0][2], model_hash = model_hash, - params=Namespace(**generator_args), + params=Namespace(model_name=model_name,**generator_args), ) if callback: callback(output) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index c62f4198e5..c17abc44e3 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -88,12 +88,15 @@ class ModelManager(object): """ return model_name in self.config - def get_model(self, model_name: str): + def get_model(self, model_name: str=None): """ Given a model named identified in models.yaml, return the model object. If in RAM will load into GPU VRAM. If on disk, will load from there. """ + if not model_name: + return self.current_model if self.current_model else self.get_model(self.default_model()) + if not self.valid_model(model_name): print( f'** "{model_name}" is not a known model name. Please check your models.yaml file' @@ -116,6 +119,7 @@ class ModelManager(object): else: # we're about to load a new model, so potentially offload the least recently used one requested_model, width, height, hash = self._load_model(model_name) self.models[model_name] = { + "model_name": model_name, "model": requested_model, "width": width, "height": height, @@ -125,6 +129,7 @@ class ModelManager(object): self.current_model = model_name self._push_newest_model(model_name) return { + "model_name": model_name, "model": requested_model, "width": width, "height": height, From 027b316fd23393e83b563507c1935205a4148ab1 Mon Sep 17 00:00:00 2001 From: gallegonovato Date: Sat, 11 Mar 2023 16:56:03 +0100 Subject: [PATCH 10/24] translationBot(ui): update translation (Spanish) Currently translated at 100.0% (500 of 500 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (482 of 482 strings) translationBot(ui): update translation (Spanish) Currently translated at 100.0% (480 of 480 strings) Co-authored-by: gallegonovato Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/es/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/es.json | 40 ++++++++++++++++++-- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index a52d9c10ec..ad3fdaf3ed 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -63,7 +63,14 @@ "back": "Atrás", "statusConvertingModel": "Convertir el modelo", "statusModelConverted": "Modelo adaptado", - "statusMergingModels": "Fusionar modelos" + "statusMergingModels": "Fusionar modelos", + "oceanTheme": "Océano", + "langPortuguese": "Portugués", + "langKorean": "Coreano", + "langHebrew": "Hebreo", + "pinOptionsPanel": "Pin del panel de opciones", + "loading": "Cargando", + "loadingInvokeAI": "Cargando invocar a la IA" }, "gallery": { "generations": "Generaciones", @@ -385,14 +392,19 @@ "modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.", "modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.", "ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados", - "modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.", + "modelMergeHeaderHelp1": "Puede unir hasta tres modelos diferentes para crear una combinación que se adapte a sus necesidades.", "inverseSigmoid": "Sigmoideo inverso", "weightedSum": "Modelo de suma ponderada", "sigmoid": "Función sigmoide", "allModels": "Todos los modelos", "repo_id": "Identificador del repositorio", "pathToCustomConfig": "Ruta a la configuración personalizada", - "customConfig": "Configuración personalizada" + "customConfig": "Configuración personalizada", + "v2_base": "v2 (512px)", + "none": "ninguno", + "pickModelType": "Elige el tipo de modelo", + "v2_768": "v2 (768px)", + "addDifference": "Añadir una diferencia" }, "parameters": { "images": "Imágenes", @@ -588,5 +600,27 @@ "betaDarkenOutside": "Oscurecer fuera", "betaLimitToBox": "Limitar a caja", "betaPreserveMasked": "Preservar área enmascarada" + }, + "accessibility": { + "invokeProgressBar": "Activar la barra de progreso", + "modelSelect": "Seleccionar modelo", + "reset": "Reiniciar", + "uploadImage": "Cargar imagen", + "previousImage": "Imagen anterior", + "nextImage": "Siguiente imagen", + "useThisParameter": "Utiliza este parámetro", + "copyMetadataJson": "Copiar los metadatos JSON", + "exitViewer": "Salir del visor", + "zoomIn": "Acercar", + "zoomOut": "Alejar", + "rotateCounterClockwise": "Girar en sentido antihorario", + "rotateClockwise": "Girar en sentido horario", + "flipHorizontally": "Voltear horizontalmente", + "flipVertically": "Voltear verticalmente", + "modifyConfig": "Modificar la configuración", + "toggleAutoscroll": "Activar el autodesplazamiento", + "toggleLogViewer": "Alternar el visor de registros", + "showGallery": "Mostrar galería", + "showOptionsPanel": "Mostrar el panel de opciones" } } From 8833d76709b89cac430e42109eeb079e317269b1 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti Date: Sat, 11 Mar 2023 16:56:04 +0100 Subject: [PATCH 11/24] translationBot(ui): update translation (Italian) Currently translated at 100.0% (500 of 500 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (500 of 500 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (482 of 482 strings) translationBot(ui): update translation (Italian) Currently translated at 100.0% (480 of 480 strings) Co-authored-by: Riccardo Giovanetti Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/it.json | 38 ++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 9bdf5b7798..61aa5c6a08 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -63,7 +63,14 @@ "langSimplifiedChinese": "Cinese semplificato", "langDutch": "Olandese", "statusModelConverted": "Modello Convertito", - "statusConvertingModel": "Conversione Modello" + "statusConvertingModel": "Conversione Modello", + "langKorean": "Coreano", + "langPortuguese": "Portoghese", + "pinOptionsPanel": "Blocca il pannello Opzioni", + "loading": "Caricamento in corso", + "oceanTheme": "Oceano", + "langHebrew": "Ebraico", + "loadingInvokeAI": "Caricamento Invoke AI" }, "gallery": { "generations": "Generazioni", @@ -392,7 +399,12 @@ "customSaveLocation": "Ubicazione salvataggio personalizzata", "weightedSum": "Somma pesata", "sigmoid": "Sigmoide", - "inverseSigmoid": "Sigmoide inverso" + "inverseSigmoid": "Sigmoide inverso", + "v2_base": "v2 (512px)", + "v2_768": "v2 (768px)", + "none": "niente", + "addDifference": "Aggiungi differenza", + "pickModelType": "Scegli il tipo di modello" }, "parameters": { "images": "Immagini", @@ -588,5 +600,27 @@ "betaDarkenOutside": "Oscura all'esterno", "betaLimitToBox": "Limita al rettangolo", "betaPreserveMasked": "Conserva quanto mascherato" + }, + "accessibility": { + "modelSelect": "Seleziona modello", + "invokeProgressBar": "Barra di avanzamento generazione", + "uploadImage": "Carica immagine", + "previousImage": "Immagine precedente", + "nextImage": "Immagine successiva", + "useThisParameter": "Usa questo parametro", + "reset": "Reimposta", + "copyMetadataJson": "Copia i metadati JSON", + "exitViewer": "Esci dal visualizzatore", + "zoomIn": "Zoom avanti", + "zoomOut": "Zoom Indietro", + "rotateCounterClockwise": "Ruotare in senso antiorario", + "rotateClockwise": "Ruotare in senso orario", + "flipHorizontally": "Capovolgi orizzontalmente", + "toggleLogViewer": "Attiva/disattiva visualizzatore registro", + "showGallery": "Mostra la galleria immagini", + "showOptionsPanel": "Mostra il pannello opzioni", + "flipVertically": "Capovolgi verticalmente", + "toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico", + "modifyConfig": "Modifica configurazione" } } From 0b953d98f51d61cf49c2f9f11c5bc3024d1c2104 Mon Sep 17 00:00:00 2001 From: Felipe Nogueira Date: Sat, 11 Mar 2023 16:56:04 +0100 Subject: [PATCH 12/24] translationBot(ui): update translation (Portuguese (Brazil)) Currently translated at 98.1% (471 of 480 strings) Co-authored-by: Felipe Nogueira Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/pt_BR/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/pt_BR.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index 7d433aa430..18b7ab57e1 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -63,7 +63,10 @@ "statusMergingModels": "Mesclando Modelos", "statusMergedModels": "Modelos Mesclados", "langRussian": "Russo", - "langSpanish": "Espanhol" + "langSpanish": "Espanhol", + "pinOptionsPanel": "Fixar painel de opções", + "loadingInvokeAI": "Carregando Invoke AI", + "loading": "Carregando" }, "gallery": { "generations": "Gerações", From 40d82b29cfce40d142e9282ae0c7541599f8bce6 Mon Sep 17 00:00:00 2001 From: "wa.code" Date: Sat, 11 Mar 2023 16:56:05 +0100 Subject: [PATCH 13/24] translationBot(ui): update translation (Chinese (Traditional)) Currently translated at 7.0% (34 of 480 strings) Co-authored-by: wa.code Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hant/ Translation: InvokeAI/Web UI --- .../frontend/web/public/locales/zh_Hant.json | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/zh_Hant.json b/invokeai/frontend/web/public/locales/zh_Hant.json index b0ae670e01..af7b0cf328 100644 --- a/invokeai/frontend/web/public/locales/zh_Hant.json +++ b/invokeai/frontend/web/public/locales/zh_Hant.json @@ -19,6 +19,21 @@ "discordLabel": "Discord", "nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。", "reportBugLabel": "回報錯誤", - "githubLabel": "GitHub" + "githubLabel": "GitHub", + "langKorean": "韓語", + "langPortuguese": "葡萄牙語", + "hotkeysLabel": "快捷鍵", + "languagePickerLabel": "切換語言", + "langDutch": "荷蘭語", + "langFrench": "法語", + "langGerman": "德語", + "langItalian": "義大利語", + "langJapanese": "日語", + "langPolish": "波蘭語", + "langBrPortuguese": "巴西葡萄牙語", + "langRussian": "俄語", + "langSpanish": "西班牙語", + "text2img": "文字到圖像", + "unifiedCanvas": "統一畫布" } } From 8ec2ae7954b65f43aca0c540eed60e644dacc972 Mon Sep 17 00:00:00 2001 From: Sergey Krashevich Date: Sat, 11 Mar 2023 16:56:05 +0100 Subject: [PATCH 14/24] translationBot(ui): update translation (Russian) Currently translated at 86.3% (416 of 482 strings) Co-authored-by: Sergey Krashevich Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ru/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ru.json | 38 ++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index e08540809b..d4178119e4 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -46,7 +46,15 @@ "statusLoadingModel": "Загрузка модели", "statusModelChanged": "Модель изменена", "githubLabel": "Github", - "discordLabel": "Discord" + "discordLabel": "Discord", + "statusMergingModels": "Слияние моделей", + "statusModelConverted": "Модель сконвертирована", + "statusMergedModels": "Модели объединены", + "pinOptionsPanel": "Закрепить панель настроек", + "loading": "Загрузка", + "loadingInvokeAI": "Загрузка Invoke AI", + "back": "Назад", + "statusConvertingModel": "Конвертация модели" }, "gallery": { "generations": "Генерации", @@ -323,7 +331,30 @@ "deleteConfig": "Удалить конфигурацию", "deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?", "deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.", - "repoIDValidationMsg": "Онлайн-репозиторий модели" + "repoIDValidationMsg": "Онлайн-репозиторий модели", + "convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 4 – 7 Гб.", + "invokeAIFolder": "Каталог InvokeAI", + "ignoreMismatch": "Игнорировать несоответствия между выбранными моделями", + "addCheckpointModel": "Добавить модель Checkpoint/Safetensor", + "formMessageDiffusersModelLocationDesc": "Укажите хотя бы одно.", + "convertToDiffusersHelpText3": "Файл модели на диске НЕ будет удалён или изменён. Вы сможете заново добавить его в Model Manager при необходимости.", + "vaeRepoID": "ID репозитория VAE", + "mergedModelName": "Название объединенной модели", + "checkpointModels": "Checkpoints", + "allModels": "Все модели", + "addDiffuserModel": "Добавить Diffusers", + "repo_id": "ID репозитория", + "formMessageDiffusersVAELocationDesc": "Если не указано, InvokeAI будет искать файл VAE рядом с моделью.", + "convert": "Преобразовать", + "convertToDiffusers": "Преобразовать в Diffusers", + "convertToDiffusersHelpText1": "Модель будет преобразована в формат 🧨 Diffusers.", + "convertToDiffusersHelpText4": "Это единоразовое действие. Оно может занять 30—60 секунд в зависимости от характеристик вашего компьютера.", + "convertToDiffusersHelpText6": "Вы хотите преобразовать эту модель?", + "statusConverting": "Преобразование", + "modelConverted": "Модель преобразована", + "invokeRoot": "Каталог InvokeAI", + "modelsMerged": "Модели объединены", + "mergeModels": "Объединить модели" }, "parameters": { "images": "Изображения", @@ -503,5 +534,8 @@ "betaDarkenOutside": "Затемнить снаружи", "betaLimitToBox": "Ограничить выделением", "betaPreserveMasked": "Сохранять маскируемую область" + }, + "accessibility": { + "modelSelect": "Выбор модели" } } From 50b56d6088dade131c2cb387fa095e0a32630669 Mon Sep 17 00:00:00 2001 From: ssantos Date: Sat, 11 Mar 2023 16:56:06 +0100 Subject: [PATCH 15/24] translationBot(ui): update translation (Portuguese) Currently translated at 99.2% (496 of 500 strings) Co-authored-by: ssantos Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/pt/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/pt.json | 556 ++++++++++++++++++- 1 file changed, 555 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index 39d383e37f..6e26b9ea56 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -63,6 +63,560 @@ "statusGeneratingOutpainting": "Geração de Ampliação", "statusGenerationComplete": "Geração Completa", "statusMergingModels": "Mesclando Modelos", - "statusMergedModels": "Modelos Mesclados" + "statusMergedModels": "Modelos Mesclados", + "oceanTheme": "Oceano", + "pinOptionsPanel": "Fixar painel de opções", + "loading": "A carregar", + "loadingInvokeAI": "A carregar Invoke AI", + "langPortuguese": "Português" + }, + "gallery": { + "galleryImageResetSize": "Resetar Imagem", + "gallerySettings": "Configurações de Galeria", + "maintainAspectRatio": "Mater Proporções", + "autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente", + "pinGallery": "Fixar Galeria", + "singleColumnLayout": "Disposição em Coluna Única", + "allImagesLoaded": "Todas as Imagens Carregadas", + "loadMore": "Carregar Mais", + "noImagesInGallery": "Sem Imagens na Galeria", + "generations": "Gerações", + "showGenerations": "Mostrar Gerações", + "uploads": "Enviados", + "showUploads": "Mostrar Enviados", + "galleryImageSize": "Tamanho da Imagem" + }, + "hotkeys": { + "generalHotkeys": "Atalhos Gerais", + "galleryHotkeys": "Atalhos da Galeria", + "toggleViewer": { + "title": "Ativar Visualizador", + "desc": "Abrir e fechar o Visualizador de Imagens" + }, + "maximizeWorkSpace": { + "desc": "Fechar painéis e maximixar área de trabalho", + "title": "Maximizar a Área de Trabalho" + }, + "changeTabs": { + "title": "Mudar Guias", + "desc": "Trocar para outra área de trabalho" + }, + "consoleToggle": { + "desc": "Abrir e fechar console", + "title": "Ativar Console" + }, + "setPrompt": { + "title": "Definir Prompt", + "desc": "Usar o prompt da imagem atual" + }, + "sendToImageToImage": { + "desc": "Manda a imagem atual para Imagem Para Imagem", + "title": "Mandar para Imagem Para Imagem" + }, + "previousImage": { + "desc": "Mostra a imagem anterior na galeria", + "title": "Imagem Anterior" + }, + "nextImage": { + "title": "Próxima Imagem", + "desc": "Mostra a próxima imagem na galeria" + }, + "decreaseGalleryThumbSize": { + "desc": "Diminui o tamanho das thumbs na galeria", + "title": "Diminuir Tamanho da Galeria de Imagem" + }, + "selectBrush": { + "title": "Selecionar Pincel", + "desc": "Seleciona o pincel" + }, + "selectEraser": { + "title": "Selecionar Apagador", + "desc": "Seleciona o apagador" + }, + "decreaseBrushSize": { + "title": "Diminuir Tamanho do Pincel", + "desc": "Diminui o tamanho do pincel/apagador" + }, + "increaseBrushOpacity": { + "desc": "Aumenta a opacidade do pincel", + "title": "Aumentar Opacidade do Pincel" + }, + "moveTool": { + "title": "Ferramenta Mover", + "desc": "Permite navegar pela tela" + }, + "decreaseBrushOpacity": { + "desc": "Diminui a opacidade do pincel", + "title": "Diminuir Opacidade do Pincel" + }, + "toggleSnap": { + "title": "Ativar Encaixe", + "desc": "Ativa Encaixar na Grade" + }, + "quickToggleMove": { + "title": "Ativar Mover Rapidamente", + "desc": "Temporariamente ativa o modo Mover" + }, + "toggleLayer": { + "title": "Ativar Camada", + "desc": "Ativa a seleção de camada de máscara/base" + }, + "clearMask": { + "title": "Limpar Máscara", + "desc": "Limpa toda a máscara" + }, + "hideMask": { + "title": "Esconder Máscara", + "desc": "Esconde e Revela a máscara" + }, + "mergeVisible": { + "title": "Fundir Visível", + "desc": "Fundir todas as camadas visíveis das telas" + }, + "downloadImage": { + "desc": "Descarregar a tela atual", + "title": "Descarregar Imagem" + }, + "undoStroke": { + "title": "Desfazer Traço", + "desc": "Desfaz um traço de pincel" + }, + "redoStroke": { + "title": "Refazer Traço", + "desc": "Refaz o traço de pincel" + }, + "keyboardShortcuts": "Atalhos de Teclado", + "appHotkeys": "Atalhos do app", + "invoke": { + "title": "Invocar", + "desc": "Gerar uma imagem" + }, + "cancel": { + "title": "Cancelar", + "desc": "Cancelar geração de imagem" + }, + "focusPrompt": { + "title": "Foco do Prompt", + "desc": "Foco da área de texto do prompt" + }, + "toggleOptions": { + "title": "Ativar Opções", + "desc": "Abrir e fechar o painel de opções" + }, + "pinOptions": { + "title": "Fixar Opções", + "desc": "Fixar o painel de opções" + }, + "closePanels": { + "title": "Fechar Painéis", + "desc": "Fecha os painéis abertos" + }, + "unifiedCanvasHotkeys": "Atalhos da Tela Unificada", + "toggleGallery": { + "title": "Ativar Galeria", + "desc": "Abrir e fechar a gaveta da galeria" + }, + "setSeed": { + "title": "Definir Seed", + "desc": "Usar seed da imagem atual" + }, + "setParameters": { + "title": "Definir Parâmetros", + "desc": "Usar todos os parâmetros da imagem atual" + }, + "restoreFaces": { + "title": "Restaurar Rostos", + "desc": "Restaurar a imagem atual" + }, + "upscale": { + "title": "Redimensionar", + "desc": "Redimensionar a imagem atual" + }, + "showInfo": { + "title": "Mostrar Informações", + "desc": "Mostrar metadados de informações da imagem atual" + }, + "deleteImage": { + "title": "Apagar Imagem", + "desc": "Apaga a imagem atual" + }, + "toggleGalleryPin": { + "title": "Ativar Fixar Galeria", + "desc": "Fixa e desafixa a galeria na interface" + }, + "increaseGalleryThumbSize": { + "title": "Aumentar Tamanho da Galeria de Imagem", + "desc": "Aumenta o tamanho das thumbs na galeria" + }, + "increaseBrushSize": { + "title": "Aumentar Tamanho do Pincel", + "desc": "Aumenta o tamanho do pincel/apagador" + }, + "fillBoundingBox": { + "title": "Preencher Caixa Delimitadora", + "desc": "Preenche a caixa delimitadora com a cor do pincel" + }, + "eraseBoundingBox": { + "title": "Apagar Caixa Delimitadora", + "desc": "Apaga a área da caixa delimitadora" + }, + "colorPicker": { + "title": "Selecionar Seletor de Cor", + "desc": "Seleciona o seletor de cores" + }, + "showHideBoundingBox": { + "title": "Mostrar/Esconder Caixa Delimitadora", + "desc": "Ativa a visibilidade da caixa delimitadora" + }, + "saveToGallery": { + "title": "Gravara Na Galeria", + "desc": "Grava a tela atual na galeria" + }, + "copyToClipboard": { + "title": "Copiar para a Área de Transferência", + "desc": "Copia a tela atual para a área de transferência" + }, + "resetView": { + "title": "Resetar Visualização", + "desc": "Reseta Visualização da Tela" + }, + "previousStagingImage": { + "title": "Imagem de Preparação Anterior", + "desc": "Área de Imagem de Preparação Anterior" + }, + "nextStagingImage": { + "title": "Próxima Imagem de Preparação Anterior", + "desc": "Próxima Área de Imagem de Preparação Anterior" + }, + "acceptStagingImage": { + "title": "Aceitar Imagem de Preparação Anterior", + "desc": "Aceitar Área de Imagem de Preparação Anterior" + } + }, + "modelManager": { + "modelAdded": "Modelo Adicionado", + "modelUpdated": "Modelo Atualizado", + "modelEntryDeleted": "Entrada de modelo excluída", + "description": "Descrição", + "modelLocationValidationMsg": "Caminho para onde o seu modelo está localizado.", + "repo_id": "Repo ID", + "vaeRepoIDValidationMsg": "Repositório Online do seu VAE", + "width": "Largura", + "widthValidationMsg": "Largura padrão do seu modelo.", + "height": "Altura", + "heightValidationMsg": "Altura padrão do seu modelo.", + "findModels": "Encontrar Modelos", + "scanAgain": "Digitalize Novamente", + "deselectAll": "Deselecionar Tudo", + "showExisting": "Mostrar Existente", + "deleteConfig": "Apagar Config", + "convertToDiffusersHelpText6": "Deseja converter este modelo?", + "mergedModelName": "Nome do modelo mesclado", + "alpha": "Alpha", + "interpolationType": "Tipo de Interpolação", + "modelMergeHeaderHelp1": "Pode mesclar até três modelos diferentes para criar uma mistura que atenda às suas necessidades.", + "modelMergeHeaderHelp2": "Apenas Diffusers estão disponíveis para mesclagem. Se deseja mesclar um modelo de checkpoint, por favor, converta-o para Diffusers primeiro.", + "modelMergeInterpAddDifferenceHelp": "Neste modo, o Modelo 3 é primeiro subtraído do Modelo 2. A versão resultante é mesclada com o Modelo 1 com a taxa alpha definida acima.", + "nameValidationMsg": "Insira um nome para o seu modelo", + "descriptionValidationMsg": "Adicione uma descrição para o seu modelo", + "config": "Configuração", + "modelExists": "Modelo Existe", + "selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo", + "noModelsFound": "Nenhum Modelo Encontrado", + "v2_768": "v2 (768px)", + "inpainting": "v1 Inpainting", + "customConfig": "Configuração personalizada", + "pathToCustomConfig": "Caminho para configuração personalizada", + "statusConverting": "A converter", + "modelConverted": "Modelo Convertido", + "ignoreMismatch": "Ignorar Divergências entre Modelos Selecionados", + "addDifference": "Adicionar diferença", + "pickModelType": "Escolha o tipo de modelo", + "safetensorModels": "SafeTensors", + "cannotUseSpaces": "Não pode usar espaços", + "addNew": "Adicionar Novo", + "addManually": "Adicionar Manualmente", + "manual": "Manual", + "name": "Nome", + "configValidationMsg": "Caminho para o ficheiro de configuração do seu modelo.", + "modelLocation": "Localização do modelo", + "repoIDValidationMsg": "Repositório Online do seu Modelo", + "updateModel": "Atualizar Modelo", + "availableModels": "Modelos Disponíveis", + "load": "Carregar", + "active": "Ativado", + "notLoaded": "Não carregado", + "deleteModel": "Apagar modelo", + "deleteMsg1": "Tem certeza de que deseja apagar esta entrada do modelo de InvokeAI?", + "deleteMsg2": "Isso não vai apagar o ficheiro de modelo checkpoint do seu disco. Pode lê-los, se desejar.", + "convertToDiffusers": "Converter para Diffusers", + "convertToDiffusersHelpText1": "Este modelo será convertido ao formato 🧨 Diffusers.", + "convertToDiffusersHelpText2": "Este processo irá substituir a sua entrada de Gestor de Modelos por uma versão Diffusers do mesmo modelo.", + "convertToDiffusersHelpText3": "O seu ficheiro de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Pode adicionar o seu ponto de verificação ao Gestor de modelos novamente, se desejar.", + "convertToDiffusersSaveLocation": "Local para Gravar", + "v2_base": "v2 (512px)", + "mergeModels": "Mesclar modelos", + "modelOne": "Modelo 1", + "modelTwo": "Modelo 2", + "modelThree": "Modelo 3", + "mergedModelSaveLocation": "Local de Salvamento", + "merge": "Mesclar", + "modelsMerged": "Modelos mesclados", + "mergedModelCustomSaveLocation": "Caminho Personalizado", + "invokeAIFolder": "Pasta Invoke AI", + "inverseSigmoid": "Sigmóide Inversa", + "none": "nenhum", + "modelManager": "Gerente de Modelo", + "model": "Modelo", + "allModels": "Todos os Modelos", + "checkpointModels": "Checkpoints", + "diffusersModels": "Diffusers", + "addNewModel": "Adicionar Novo modelo", + "addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor", + "addDiffuserModel": "Adicionar Diffusers", + "vaeLocation": "Localização VAE", + "vaeLocationValidationMsg": "Caminho para onde o seu VAE está localizado.", + "vaeRepoID": "VAE Repo ID", + "addModel": "Adicionar Modelo", + "search": "Procurar", + "cached": "Em cache", + "checkpointFolder": "Pasta de Checkpoint", + "clearCheckpointFolder": "Apagar Pasta de Checkpoint", + "modelsFound": "Modelos Encontrados", + "selectFolder": "Selecione a Pasta", + "selected": "Selecionada", + "selectAll": "Selecionar Tudo", + "addSelected": "Adicione Selecionado", + "delete": "Apagar", + "formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers", + "formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.", + "formMessageDiffusersVAELocation": "Localização do VAE", + "formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo ficheiro VAE dentro do local do modelo.", + "convert": "Converter", + "convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, a depender das especificações do seu computador.", + "convertToDiffusersHelpText5": "Por favor, certifique-se de que tenha espaço suficiente no disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.", + "v1": "v1", + "sameFolder": "Mesma pasta", + "invokeRoot": "Pasta do InvokeAI", + "custom": "Personalizado", + "customSaveLocation": "Local de salvamento personalizado", + "modelMergeAlphaHelp": "Alpha controla a força da mistura dos modelos. Valores de alpha mais baixos resultam numa influência menor do segundo modelo.", + "sigmoid": "Sigmóide", + "weightedSum": "Soma Ponderada" + }, + "parameters": { + "width": "Largura", + "seed": "Seed", + "hiresStrength": "Força da Alta Resolução", + "negativePrompts": "Indicações negativas", + "general": "Geral", + "randomizeSeed": "Seed Aleatório", + "shuffle": "Embaralhar", + "noiseThreshold": "Limite de Ruído", + "perlinNoise": "Ruído de Perlin", + "variations": "Variatções", + "seedWeights": "Pesos da Seed", + "restoreFaces": "Restaurar Rostos", + "faceRestoration": "Restauração de Rosto", + "type": "Tipo", + "denoisingStrength": "A força de remoção de ruído", + "scale": "Escala", + "otherOptions": "Outras Opções", + "seamlessTiling": "Ladrilho Sem Fronteira", + "hiresOptim": "Otimização de Alta Res", + "imageFit": "Caber Imagem Inicial No Tamanho de Saída", + "codeformerFidelity": "Fidelidade", + "seamSize": "Tamanho da Fronteira", + "seamBlur": "Desfoque da Fronteira", + "seamStrength": "Força da Fronteira", + "seamSteps": "Passos da Fronteira", + "tileSize": "Tamanho do Ladrilho", + "boundingBoxHeader": "Caixa Delimitadora", + "seamCorrectionHeader": "Correção de Fronteira", + "infillScalingHeader": "Preencimento e Escala", + "img2imgStrength": "Força de Imagem Para Imagem", + "toggleLoopback": "Ativar Loopback", + "symmetry": "Simetria", + "promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)", + "sendTo": "Mandar para", + "openInViewer": "Abrir No Visualizador", + "closeViewer": "Fechar Visualizador", + "usePrompt": "Usar Prompt", + "deleteImage": "Apagar Imagem", + "initialImage": "Imagem inicial", + "showOptionsPanel": "Mostrar Painel de Opções", + "strength": "Força", + "upscaling": "Redimensionando", + "upscale": "Redimensionar", + "upscaleImage": "Redimensionar Imagem", + "scaleBeforeProcessing": "Escala Antes do Processamento", + "invoke": "Invocar", + "images": "Imagems", + "steps": "Passos", + "cfgScale": "Escala CFG", + "height": "Altura", + "sampler": "Amostrador", + "imageToImage": "Imagem para Imagem", + "variationAmount": "Quntidade de Variatções", + "scaledWidth": "L Escalada", + "scaledHeight": "A Escalada", + "infillMethod": "Método de Preenchimento", + "hSymmetryStep": "H Passo de Simetria", + "vSymmetryStep": "V Passo de Simetria", + "cancel": { + "immediate": "Cancelar imediatamente", + "schedule": "Cancelar após a iteração atual", + "isScheduled": "A cancelar", + "setType": "Definir tipo de cancelamento" + }, + "sendToImg2Img": "Mandar para Imagem Para Imagem", + "sendToUnifiedCanvas": "Mandar para Tela Unificada", + "copyImage": "Copiar imagem", + "copyImageToLink": "Copiar Imagem Para a Ligação", + "downloadImage": "Descarregar Imagem", + "useSeed": "Usar Seed", + "useAll": "Usar Todos", + "useInitImg": "Usar Imagem Inicial", + "info": "Informações" + }, + "settings": { + "confirmOnDelete": "Confirmar Antes de Apagar", + "displayHelpIcons": "Mostrar Ícones de Ajuda", + "useCanvasBeta": "Usar Layout de Telas Beta", + "enableImageDebugging": "Ativar Depuração de Imagem", + "useSlidersForAll": "Usar deslizadores para todas as opções", + "resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.", + "models": "Modelos", + "displayInProgress": "Mostrar Progresso de Imagens Em Andamento", + "saveSteps": "Gravar imagens a cada n passos", + "resetWebUI": "Reiniciar Interface", + "resetWebUIDesc2": "Se as imagens não estão a aparecer na galeria ou algo mais não está a funcionar, favor tentar reiniciar antes de postar um problema no GitHub.", + "resetComplete": "A interface foi reiniciada. Atualize a página para carregar." + }, + "toast": { + "uploadFailed": "Envio Falhou", + "uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez", + "uploadFailedUnableToLoadDesc": "Não foj possível carregar o ficheiro", + "downloadImageStarted": "Download de Imagem Começou", + "imageNotLoadedDesc": "Nenhuma imagem encontrada a enviar para o módulo de imagem para imagem", + "imageLinkCopied": "Ligação de Imagem Copiada", + "imageNotLoaded": "Nenhuma Imagem Carregada", + "parametersFailed": "Problema ao carregar parâmetros", + "parametersFailedDesc": "Não foi possível carregar imagem incial.", + "seedSet": "Seed Definida", + "upscalingFailed": "Redimensionamento Falhou", + "promptNotSet": "Prompt Não Definido", + "tempFoldersEmptied": "Pasta de Ficheiros Temporários Esvaziada", + "imageCopied": "Imagem Copiada", + "imageSavedToGallery": "Imagem Salva na Galeria", + "canvasMerged": "Tela Fundida", + "sentToImageToImage": "Mandar Para Imagem Para Imagem", + "sentToUnifiedCanvas": "Enviada para a Tela Unificada", + "parametersSet": "Parâmetros Definidos", + "parametersNotSet": "Parâmetros Não Definidos", + "parametersNotSetDesc": "Nenhum metadado foi encontrado para essa imagem.", + "seedNotSet": "Seed Não Definida", + "seedNotSetDesc": "Não foi possível achar a seed para a imagem.", + "promptSet": "Prompt Definido", + "promptNotSetDesc": "Não foi possível achar prompt para essa imagem.", + "faceRestoreFailed": "Restauração de Rosto Falhou", + "metadataLoadFailed": "Falha ao tentar carregar metadados", + "initialImageSet": "Imagem Inicial Definida", + "initialImageNotSet": "Imagem Inicial Não Definida", + "initialImageNotSetDesc": "Não foi possível carregar imagem incial" + }, + "tooltip": { + "feature": { + "prompt": "Este é o campo de prompt. O prompt inclui objetos de geração e termos estilísticos. Também pode adicionar peso (importância do token) no prompt, mas comandos e parâmetros de CLI não funcionarão.", + "other": "Essas opções ativam modos alternativos de processamento para o Invoke. 'Seamless tiling' criará padrões repetidos na saída. 'High resolution' é uma geração em duas etapas com img2img: use essa configuração quando desejar uma imagem maior e mais coerente sem artefatos. Levará mais tempo do que o txt2img usual.", + "seed": "O valor da semente afeta o ruído inicial a partir do qual a imagem é formada. Pode usar as sementes já existentes de imagens anteriores. 'Limiar de ruído' é usado para mitigar artefatos em valores CFG altos (experimente a faixa de 0-10) e o Perlin para adicionar ruído Perlin durante a geração: ambos servem para adicionar variação às suas saídas.", + "imageToImage": "Image to Image carrega qualquer imagem como inicial, que é então usada para gerar uma nova junto com o prompt. Quanto maior o valor, mais a imagem resultante mudará. Valores de 0.0 a 1.0 são possíveis, a faixa recomendada é de 0.25 a 0.75", + "faceCorrection": "Correção de rosto com GFPGAN ou Codeformer: o algoritmo detecta rostos na imagem e corrige quaisquer defeitos. Um valor alto mudará mais a imagem, a resultar em rostos mais atraentes. Codeformer com uma fidelidade maior preserva a imagem original às custas de uma correção de rosto mais forte.", + "seamCorrection": "Controla o tratamento das emendas visíveis que ocorrem entre as imagens geradas no canvas.", + "gallery": "A galeria exibe as gerações da pasta de saída conforme elas são criadas. As configurações são armazenadas em ficheiros e acessadas pelo menu de contexto.", + "variations": "Experimente uma variação com um valor entre 0,1 e 1,0 para mudar o resultado para uma determinada semente. Variações interessantes da semente estão entre 0,1 e 0,3.", + "upscale": "Use o ESRGAN para ampliar a imagem imediatamente após a geração.", + "boundingBox": "A caixa delimitadora é a mesma que as configurações de largura e altura para Texto para Imagem ou Imagem para Imagem. Apenas a área na caixa será processada.", + "infillAndScaling": "Gira os métodos de preenchimento (usados em áreas mascaradas ou apagadas do canvas) e a escala (útil para tamanhos de caixa delimitadora pequenos)." + } + }, + "unifiedCanvas": { + "emptyTempImagesFolderMessage": "Esvaziar a pasta de ficheiros de imagem temporários também reseta completamente a Tela Unificada. Isso inclui todo o histórico de desfazer/refazer, imagens na área de preparação e a camada base da tela.", + "scaledBoundingBox": "Caixa Delimitadora Escalada", + "boundingBoxPosition": "Posição da Caixa Delimitadora", + "next": "Próximo", + "accept": "Aceitar", + "showHide": "Mostrar/Esconder", + "discardAll": "Descartar Todos", + "betaClear": "Limpar", + "betaDarkenOutside": "Escurecer Externamente", + "base": "Base", + "brush": "Pincel", + "showIntermediates": "Mostrar Intermediários", + "showGrid": "Mostrar Grade", + "clearCanvasHistoryConfirm": "Tem certeza que quer limpar o histórico de tela?", + "boundingBox": "Caixa Delimitadora", + "canvasDimensions": "Dimensões da Tela", + "canvasPosition": "Posição da Tela", + "cursorPosition": "Posição do cursor", + "previous": "Anterior", + "betaLimitToBox": "Limitar á Caixa", + "layer": "Camada", + "mask": "Máscara", + "maskingOptions": "Opções de Mascaramento", + "enableMask": "Ativar Máscara", + "preserveMaskedArea": "Preservar Área da Máscara", + "clearMask": "Limpar Máscara", + "eraser": "Apagador", + "fillBoundingBox": "Preencher Caixa Delimitadora", + "eraseBoundingBox": "Apagar Caixa Delimitadora", + "colorPicker": "Seletor de Cor", + "brushOptions": "Opções de Pincel", + "brushSize": "Tamanho", + "move": "Mover", + "resetView": "Resetar Visualização", + "mergeVisible": "Fundir Visível", + "saveToGallery": "Gravar na Galeria", + "copyToClipboard": "Copiar para a Área de Transferência", + "downloadAsImage": "Descarregar Como Imagem", + "undo": "Desfazer", + "redo": "Refazer", + "clearCanvas": "Limpar Tela", + "canvasSettings": "Configurações de Tela", + "snapToGrid": "Encaixar na Grade", + "darkenOutsideSelection": "Escurecer Seleção Externa", + "autoSaveToGallery": "Gravar Automaticamente na Galeria", + "saveBoxRegionOnly": "Gravar Apenas a Região da Caixa", + "limitStrokesToBox": "Limitar Traços à Caixa", + "showCanvasDebugInfo": "Mostrar Informações de Depuração daTela", + "clearCanvasHistory": "Limpar o Histórico da Tela", + "clearHistory": "Limpar Históprico", + "clearCanvasHistoryMessage": "Limpar o histórico de tela deixa a sua tela atual intacta, mas limpa de forma irreversível o histórico de desfazer e refazer.", + "emptyTempImageFolder": "Esvaziar a Pasta de Ficheiros de Imagem Temporários", + "emptyFolder": "Esvaziar Pasta", + "emptyTempImagesFolderConfirm": "Tem certeza que quer esvaziar a pasta de ficheiros de imagem temporários?", + "activeLayer": "Camada Ativa", + "canvasScale": "Escala da Tela", + "betaPreserveMasked": "Preservar Máscarado" + }, + "accessibility": { + "invokeProgressBar": "Invocar barra de progresso", + "reset": "Repôr", + "nextImage": "Próxima imagem", + "useThisParameter": "Usar este parâmetro", + "copyMetadataJson": "Copiar metadados JSON", + "zoomIn": "Ampliar", + "zoomOut": "Reduzir", + "rotateCounterClockwise": "Girar no sentido anti-horário", + "rotateClockwise": "Girar no sentido horário", + "flipVertically": "Espelhar verticalmente", + "modifyConfig": "Modificar config", + "toggleAutoscroll": "Alternar rolagem automática", + "showGallery": "Mostrar galeria", + "showOptionsPanel": "Mostrar painel de opções", + "uploadImage": "Enviar imagem", + "previousImage": "Imagem anterior", + "flipHorizontally": "Espelhar horizontalmente", + "toggleLogViewer": "Alternar visualizador de registo" } } From c14241436ba62fe9d4d8c9d9a4f856d24cd97b3b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 10:56:53 -0500 Subject: [PATCH 16/24] move ModelManager initialization into its own module and restore embedding support --- invokeai/app/api/dependencies.py | 6 +- invokeai/app/services/generate_initializer.py | 128 ----------------- .../app/services/model_manager_initializer.py | 136 ++++++++++++++++++ invokeai/backend/generate.py | 13 +- .../backend/model_management/model_manager.py | 28 +++- 5 files changed, 161 insertions(+), 150 deletions(-) create mode 100644 invokeai/app/services/model_manager_initializer.py diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 70d7d1d1a7..9114a9285d 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -4,7 +4,7 @@ import os from argparse import Namespace from ...backend import Globals -from ..services.generate_initializer import get_model_manager +from ..services.model_manager_initializer import get_model_manager from ..services.graph import GraphExecutionState from ..services.image_storage import DiskImageStorage from ..services.invocation_queue import MemoryInvocationQueue @@ -47,8 +47,6 @@ class ApiDependencies: # TODO: Use a logger print(f">> Internet connectivity is {Globals.internet_available}") - model_manager = get_model_manager(args, config) - events = FastAPIEventService(event_handler_id) output_folder = os.path.abspath( @@ -61,7 +59,7 @@ class ApiDependencies: db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generator_factory=generator_factory, + model_manager=get_model_manager(args, config), events=events, images=images, queue=MemoryInvocationQueue(), diff --git a/invokeai/app/services/generate_initializer.py b/invokeai/app/services/generate_initializer.py index 144e50088e..6dd65e69b1 100644 --- a/invokeai/app/services/generate_initializer.py +++ b/invokeai/app/services/generate_initializer.py @@ -6,97 +6,8 @@ from argparse import Namespace from omegaconf import OmegaConf import invokeai.version -from ...backend import ModelManager -from ...backend.util import choose_precision, choose_torch_device from ...backend import Globals -# TODO: most of this code should be split into individual services as the Generate.py code is deprecated -def get_model_manager(args, config) -> ModelManager: - if not args.conf: - config_file = os.path.join(Globals.root, "configs", "models.yaml") - if not os.path.exists(config_file): - report_model_error( - args, FileNotFoundError(f"The file {config_file} could not be found.") - ) - - print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}") - print(f'>> InvokeAI runtime directory is "{Globals.root}"') - - # these two lines prevent a horrible warning message from appearing - # when the frozen CLIP tokenizer is imported - import transformers # type: ignore - - transformers.logging.set_verbosity_error() - import diffusers - - diffusers.logging.set_verbosity_error() - - # Loading Face Restoration and ESRGAN Modules - gfpgan, codeformer, esrgan = load_face_restoration(args) - - # normalize the config directory relative to root - if not os.path.isabs(args.conf): - args.conf = os.path.normpath(os.path.join(Globals.root, args.conf)) - - if args.embeddings: - if not os.path.isabs(args.embedding_path): - embedding_path = os.path.normpath( - os.path.join(Globals.root, args.embedding_path) - ) - else: - embedding_path = args.embedding_path - else: - embedding_path = None - - # migrate legacy models - ModelManager.migrate_models() - - # load the infile as a list of lines - if args.infile: - try: - if os.path.isfile(args.infile): - infile = open(args.infile, "r", encoding="utf-8") - elif args.infile == "-": # stdin - infile = sys.stdin - else: - raise FileNotFoundError(f"{args.infile} not found.") - except (FileNotFoundError, IOError) as e: - print(f"{e}. Aborting.") - sys.exit(-1) - - # creating the model manager - try: - device = torch.device(choose_torch_device()) - precision = 'float16' if args.precision=='float16' \ - else 'float32' if args.precision=='float32' \ - else choose_precision(device) - - model_manager = ModelManager( - OmegaConf.load(args.conf), - precision=precision, - device_type=device, - max_loaded_models=args.max_loaded_models, - ) - except (FileNotFoundError, TypeError, AssertionError) as e: - report_model_error(args, e) - except (IOError, KeyError) as e: - print(f"{e}. Aborting.") - sys.exit(-1) - - if args.seamless: - #TODO: do something here ? - print(">> changed to seamless tiling mode") - - # try to autoconvert new models - # autoimport new .ckpt files - if path := args.autoconvert: - model_manager.autoconvert_weights( - conf_path=args.conf, - weights_directory=path, - ) - - return model_manager - def load_face_restoration(opt): try: gfpgan, codeformer, esrgan = None, None, None @@ -122,42 +33,3 @@ def load_face_restoration(opt): return gfpgan, codeformer, esrgan -def report_model_error(opt: Namespace, e: Exception): - print(f'** An error occurred while attempting to initialize the model: "{str(e)}"') - print( - "** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models." - ) - yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE") - if yes_to_all: - print( - "** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE" - ) - else: - response = input( - "Do you want to run invokeai-configure script to select and/or reinstall models? [y] " - ) - if response.startswith(("n", "N")): - return - - print("invokeai-configure is launching....\n") - - # Match arguments that were set on the CLI - # only the arguments accepted by the configuration script are parsed - root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] - config = ["--config", opt.conf] if opt.conf is not None else [] - previous_args = sys.argv - sys.argv = ["invokeai-configure"] - sys.argv.extend(root_dir) - sys.argv.extend(config) - if yes_to_all is not None: - for arg in yes_to_all.split(): - sys.argv.append(arg) - - from invokeai.frontend.install import invokeai_configure - - invokeai_configure() - # TODO: Figure out how to restart - # print('** InvokeAI will now restart') - # sys.argv = previous_args - # main() # would rather do a os.exec(), but doesn't exist? - # sys.exit(0) diff --git a/invokeai/app/services/model_manager_initializer.py b/invokeai/app/services/model_manager_initializer.py new file mode 100644 index 0000000000..d7c0f0c9b2 --- /dev/null +++ b/invokeai/app/services/model_manager_initializer.py @@ -0,0 +1,136 @@ +import os +import sys +import torch +from argparse import Namespace +from omegaconf import OmegaConf +from pathlib import Path + +import invokeai.version +from ...backend import ModelManager +from ...backend.util import choose_precision, choose_torch_device +from ...backend import Globals + +# TODO: most of this code should be split into individual services as the Generate.py code is deprecated +def get_model_manager(args, config) -> ModelManager: + if not args.conf: + config_file = os.path.join(Globals.root, "configs", "models.yaml") + if not os.path.exists(config_file): + report_model_error( + args, FileNotFoundError(f"The file {config_file} could not be found.") + ) + + print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}") + print(f'>> InvokeAI runtime directory is "{Globals.root}"') + + # these two lines prevent a horrible warning message from appearing + # when the frozen CLIP tokenizer is imported + import transformers # type: ignore + + transformers.logging.set_verbosity_error() + import diffusers + + diffusers.logging.set_verbosity_error() + + # normalize the config directory relative to root + if not os.path.isabs(args.conf): + args.conf = os.path.normpath(os.path.join(Globals.root, args.conf)) + + if args.embeddings: + if not os.path.isabs(args.embedding_path): + embedding_path = os.path.normpath( + os.path.join(Globals.root, args.embedding_path) + ) + else: + embedding_path = args.embedding_path + else: + embedding_path = None + + # migrate legacy models + ModelManager.migrate_models() + + # load the infile as a list of lines + if args.infile: + try: + if os.path.isfile(args.infile): + infile = open(args.infile, "r", encoding="utf-8") + elif args.infile == "-": # stdin + infile = sys.stdin + else: + raise FileNotFoundError(f"{args.infile} not found.") + except (FileNotFoundError, IOError) as e: + print(f"{e}. Aborting.") + sys.exit(-1) + + # creating the model manager + try: + device = torch.device(choose_torch_device()) + precision = 'float16' if args.precision=='float16' \ + else 'float32' if args.precision=='float32' \ + else choose_precision(device) + + model_manager = ModelManager( + OmegaConf.load(args.conf), + precision=precision, + device_type=device, + max_loaded_models=args.max_loaded_models, + embedding_path = Path(embedding_path), + ) + except (FileNotFoundError, TypeError, AssertionError) as e: + report_model_error(args, e) + except (IOError, KeyError) as e: + print(f"{e}. Aborting.") + sys.exit(-1) + + if args.seamless: + #TODO: do something here ? + print(">> changed to seamless tiling mode") + + # try to autoconvert new models + # autoimport new .ckpt files + if path := args.autoconvert: + model_manager.autoconvert_weights( + conf_path=args.conf, + weights_directory=path, + ) + + return model_manager + +def report_model_error(opt: Namespace, e: Exception): + print(f'** An error occurred while attempting to initialize the model: "{str(e)}"') + print( + "** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models." + ) + yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE") + if yes_to_all: + print( + "** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE" + ) + else: + response = input( + "Do you want to run invokeai-configure script to select and/or reinstall models? [y] " + ) + if response.startswith(("n", "N")): + return + + print("invokeai-configure is launching....\n") + + # Match arguments that were set on the CLI + # only the arguments accepted by the configuration script are parsed + root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] + config = ["--config", opt.conf] if opt.conf is not None else [] + previous_args = sys.argv + sys.argv = ["invokeai-configure"] + sys.argv.extend(root_dir) + sys.argv.extend(config) + if yes_to_all is not None: + for arg in yes_to_all.split(): + sys.argv.append(arg) + + from invokeai.frontend.install import invokeai_configure + + invokeai_configure() + # TODO: Figure out how to restart + # print('** InvokeAI will now restart') + # sys.argv = previous_args + # main() # would rather do a os.exec(), but doesn't exist? + # sys.exit(0) diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py index 35dba41ffb..22e4ff177d 100644 --- a/invokeai/backend/generate.py +++ b/invokeai/backend/generate.py @@ -222,6 +222,7 @@ class Generate: self.precision, max_loaded_models=max_loaded_models, sequential_offload=self.free_gpu_mem, + embedding_path=Path(self.embedding_path), ) # don't accept invalid models fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME @@ -940,18 +941,6 @@ class Generate: self.generators = {} set_seed(random.randrange(0, np.iinfo(np.uint32).max)) - if self.embedding_path is not None: - print(f">> Loading embeddings from {self.embedding_path}") - for root, _, files in os.walk(self.embedding_path): - for name in files: - ti_path = os.path.join(root, name) - self.model.textual_inversion_manager.load_textual_inversion( - ti_path, defer_injecting_tokens=True - ) - print( - f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}' - ) - self.model_name = model_name self._set_scheduler() # requires self.model_name to be set first return self.model diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index c17abc44e3..50ea13a2d0 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -54,12 +54,13 @@ class ModelManager(object): Model manager handles loading, caching, importing, deleting, converting, and editing models. ''' def __init__( - self, - config: OmegaConf|Path, - device_type: torch.device = CUDA_DEVICE, - precision: str = "float16", - max_loaded_models=DEFAULT_MAX_MODELS, - sequential_offload=False, + self, + config: OmegaConf|Path, + device_type: torch.device = CUDA_DEVICE, + precision: str = "float16", + max_loaded_models=DEFAULT_MAX_MODELS, + sequential_offload=False, + embedding_path: Path=None, ): """ Initialize with the path to the models.yaml config file or @@ -80,6 +81,7 @@ class ModelManager(object): self.stack = [] # this is an LRU FIFO self.current_model = None self.sequential_offload = sequential_offload + self.embedding_path = embedding_path def valid_model(self, model_name: str) -> bool: """ @@ -434,6 +436,7 @@ class ModelManager(object): height = width print(f" | Default image dimensions = {width} x {height}") + self._add_embeddings_to_model(pipeline) return pipeline, width, height, model_hash @@ -1070,6 +1073,19 @@ class ModelManager(object): self.stack.remove(model_name) self.stack.append(model_name) + def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline): + if self.embedding_path is not None: + print(f">> Loading embeddings from {self.embedding_path}") + for root, _, files in os.walk(self.embedding_path): + for name in files: + ti_path = os.path.join(root, name) + model.textual_inversion_manager.load_textual_inversion( + ti_path, defer_injecting_tokens=True + ) + print( + f'>> Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}' + ) + def _has_cuda(self) -> bool: return self.device.type == "cuda" From 270032670ad26e0c63870ee8bc0e19c0de5e6e07 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 8 Mar 2023 23:39:34 +1100 Subject: [PATCH 17/24] build: exclude ui from `test-invoke-pip` --- .github/workflows/test-invoke-pip-skip.yml | 12 ++++++------ .github/workflows/test-invoke-pip.yml | 6 ++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-invoke-pip-skip.yml b/.github/workflows/test-invoke-pip-skip.yml index 7f9758814e..c2347e5ce3 100644 --- a/.github/workflows/test-invoke-pip-skip.yml +++ b/.github/workflows/test-invoke-pip-skip.yml @@ -1,12 +1,12 @@ name: Test invoke.py pip on: pull_request: - paths-ignore: - - 'pyproject.toml' - - 'invokeai/**' - - 'invokeai/backend/**' - - 'invokeai/configs/**' - - 'invokeai/frontend/web/dist/**' + paths: + - '**' + - '!pyproject.toml' + - '!invokeai/**' + - 'invokeai/frontend/web/**' + - '!invokeai/frontend/web/dist/**' merge_group: workflow_dispatch: diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 20d7ac94c1..30ed05379c 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -6,15 +6,13 @@ on: paths: - 'pyproject.toml' - 'invokeai/**' - - 'invokeai/backend/**' - - 'invokeai/configs/**' + - '!invokeai/frontend/web/**' - 'invokeai/frontend/web/dist/**' pull_request: paths: - 'pyproject.toml' - 'invokeai/**' - - 'invokeai/backend/**' - - 'invokeai/configs/**' + - '!invokeai/frontend/web/**' - 'invokeai/frontend/web/dist/**' types: - 'ready_for_review' From 580f9ecded4da2533c364d1b63f90c14db5ecb5a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 11:32:57 -0500 Subject: [PATCH 18/24] simplify passing of config options --- invokeai/app/api/dependencies.py | 14 ++--- invokeai/app/api_app.py | 6 +-- invokeai/app/cli_app.py | 9 ++-- .../app/services/model_manager_initializer.py | 54 +++++++------------ 4 files changed, 33 insertions(+), 50 deletions(-) diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 9114a9285d..7d733ec47c 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -37,12 +37,12 @@ class ApiDependencies: invoker: Invoker = None @staticmethod - def initialize(args, config, event_handler_id: int): - Globals.try_patchmatch = args.patchmatch - Globals.always_use_cpu = args.always_use_cpu - Globals.internet_available = args.internet_available and check_internet() - Globals.disable_xformers = not args.xformers - Globals.ckpt_convert = args.ckpt_convert + def initialize(config, event_handler_id: int): + Globals.try_patchmatch = config.patchmatch + Globals.always_use_cpu = config.always_use_cpu + Globals.internet_available = config.internet_available and check_internet() + Globals.disable_xformers = not config.xformers + Globals.ckpt_convert = config.ckpt_convert # TODO: Use a logger print(f">> Internet connectivity is {Globals.internet_available}") @@ -59,7 +59,7 @@ class ApiDependencies: db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - model_manager=get_model_manager(args, config), + model_manager=get_model_manager(config), events=events, images=images, queue=MemoryInvocationQueue(), diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index fb64ca3b7a..238fb0180b 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -53,11 +53,11 @@ config = {} # Add startup event to load dependencies @app.on_event("startup") async def startup_event(): - args = Args() - config = args.parse_args() + config = Args() + config.parse_args() ApiDependencies.initialize( - args=args, config=config, event_handler_id=event_handler_id + config=config, event_handler_id=event_handler_id ) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 25e6e5e85c..6e0664eebf 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -17,7 +17,7 @@ from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_gra from .invocations import * from .invocations.baseinvocation import BaseInvocation from .services.events import EventServiceBase -from .services.generate_initializer import get_model_manager +from .services.model_manager_initializer import get_model_manager from .services.graph import EdgeConnection, GraphExecutionState from .services.image_storage import DiskImageStorage from .services.invocation_queue import MemoryInvocationQueue @@ -126,10 +126,9 @@ def invoke_all(context: CliContext): def invoke_cli(): - args = Args() - config = args.parse_args() - - model_manager = get_model_manager(args, config) + config = Args() + config.parse_args() + model_manager = get_model_manager(config) events = EventServiceBase() diff --git a/invokeai/app/services/model_manager_initializer.py b/invokeai/app/services/model_manager_initializer.py index d7c0f0c9b2..88a3ff6c43 100644 --- a/invokeai/app/services/model_manager_initializer.py +++ b/invokeai/app/services/model_manager_initializer.py @@ -2,6 +2,7 @@ import os import sys import torch from argparse import Namespace +from invokeai.backend import Args from omegaconf import OmegaConf from pathlib import Path @@ -11,12 +12,12 @@ from ...backend.util import choose_precision, choose_torch_device from ...backend import Globals # TODO: most of this code should be split into individual services as the Generate.py code is deprecated -def get_model_manager(args, config) -> ModelManager: - if not args.conf: +def get_model_manager(config: Args) -> ModelManager: + if not config.conf: config_file = os.path.join(Globals.root, "configs", "models.yaml") if not os.path.exists(config_file): report_model_error( - args, FileNotFoundError(f"The file {config_file} could not be found.") + config, FileNotFoundError(f"The file {config_file} could not be found.") ) print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}") @@ -32,64 +33,47 @@ def get_model_manager(args, config) -> ModelManager: diffusers.logging.set_verbosity_error() # normalize the config directory relative to root - if not os.path.isabs(args.conf): - args.conf = os.path.normpath(os.path.join(Globals.root, args.conf)) + if not os.path.isabs(config.conf): + config.conf = os.path.normpath(os.path.join(Globals.root, config.conf)) - if args.embeddings: - if not os.path.isabs(args.embedding_path): + if config.embeddings: + if not os.path.isabs(config.embedding_path): embedding_path = os.path.normpath( - os.path.join(Globals.root, args.embedding_path) + os.path.join(Globals.root, config.embedding_path) ) else: - embedding_path = args.embedding_path + embedding_path = config.embedding_path else: embedding_path = None # migrate legacy models ModelManager.migrate_models() - # load the infile as a list of lines - if args.infile: - try: - if os.path.isfile(args.infile): - infile = open(args.infile, "r", encoding="utf-8") - elif args.infile == "-": # stdin - infile = sys.stdin - else: - raise FileNotFoundError(f"{args.infile} not found.") - except (FileNotFoundError, IOError) as e: - print(f"{e}. Aborting.") - sys.exit(-1) - # creating the model manager try: device = torch.device(choose_torch_device()) - precision = 'float16' if args.precision=='float16' \ - else 'float32' if args.precision=='float32' \ + precision = 'float16' if config.precision=='float16' \ + else 'float32' if config.precision=='float32' \ else choose_precision(device) model_manager = ModelManager( - OmegaConf.load(args.conf), + OmegaConf.load(config.conf), precision=precision, device_type=device, - max_loaded_models=args.max_loaded_models, + max_loaded_models=config.max_loaded_models, embedding_path = Path(embedding_path), ) except (FileNotFoundError, TypeError, AssertionError) as e: - report_model_error(args, e) + report_model_error(config, e) except (IOError, KeyError) as e: print(f"{e}. Aborting.") sys.exit(-1) - if args.seamless: - #TODO: do something here ? - print(">> changed to seamless tiling mode") - # try to autoconvert new models # autoimport new .ckpt files - if path := args.autoconvert: + if path := config.autoconvert: model_manager.autoconvert_weights( - conf_path=args.conf, + conf_path=config.conf, weights_directory=path, ) @@ -118,10 +102,10 @@ def report_model_error(opt: Namespace, e: Exception): # only the arguments accepted by the configuration script are parsed root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] config = ["--config", opt.conf] if opt.conf is not None else [] - previous_args = sys.argv + previous_config = sys.argv sys.argv = ["invokeai-configure"] sys.argv.extend(root_dir) - sys.argv.extend(config) + sys.argv.extend(config.to_dict()) if yes_to_all is not None: for arg in yes_to_all.split(): sys.argv.append(arg) From 3aa1ee121815e695aab2daca50c945c2f18a453e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 16:16:44 -0500 Subject: [PATCH 19/24] restore NSFW checker --- invokeai/backend/__init__.py | 1 + invokeai/backend/generate.py | 41 +++-------------------- invokeai/backend/safety_checker.py | 27 ++++++--------- tests/nodes/test_graph_execution_state.py | 2 +- tests/nodes/test_invoker.py | 2 +- 5 files changed, 18 insertions(+), 55 deletions(-) diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index a816486631..06066dd6b1 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -11,5 +11,6 @@ from .generator import ( Inpaint ) from .model_management import ModelManager +from .safety_checker import SafetyChecker from .args import Args from .globals import Globals diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py index 22e4ff177d..7db0c4a2ef 100644 --- a/invokeai/backend/generate.py +++ b/invokeai/backend/generate.py @@ -25,18 +25,19 @@ from accelerate.utils import set_seed from diffusers.pipeline_utils import DiffusionPipeline from diffusers.utils.import_utils import is_xformers_available from omegaconf import OmegaConf +from pathlib import Path from .args import metadata_from_png from .generator import infill_methods from .globals import Globals, global_cache_dir from .image_util import InitImageResizer, PngWriter, Txt2Mask, configure_model_padding from .model_management import ModelManager +from .safety_checker import SafetyChecker from .prompting import get_uc_and_c_and_ec from .prompting.conditioning import log_tokenization from .stable_diffusion import HuggingFaceConceptsLibrary from .util import choose_precision, choose_torch_device - def fix_func(orig): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): @@ -245,31 +246,8 @@ class Generate: # load safety checker if requested if safety_checker: - try: - print(">> Initializing NSFW checker") - from diffusers.pipelines.stable_diffusion.safety_checker import ( - StableDiffusionSafetyChecker, - ) - from transformers import AutoFeatureExtractor - - safety_model_id = "CompVis/stable-diffusion-safety-checker" - safety_model_path = global_cache_dir("hub") - self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( - safety_model_id, - local_files_only=True, - cache_dir=safety_model_path, - ) - self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained( - safety_model_id, - local_files_only=True, - cache_dir=safety_model_path, - ) - self.safety_checker.to(self.device) - except Exception: - print( - "** An error was encountered while installing the safety checker:" - ) - print(traceback.format_exc()) + print(">> Initializing NSFW checker") + self.safety_checker = SafetyChecker(self.device) else: print(">> NSFW checker is disabled") @@ -524,15 +502,6 @@ class Generate: generator.set_variation(self.seed, variation_amount, with_variations) generator.use_mps_noise = use_mps_noise - checker = ( - { - "checker": self.safety_checker, - "extractor": self.safety_feature_extractor, - } - if self.safety_checker - else None - ) - results = generator.generate( prompt, iterations=iterations, @@ -559,7 +528,7 @@ class Generate: embiggen_strength=embiggen_strength, inpaint_replace=inpaint_replace, mask_blur_radius=mask_blur_radius, - safety_checker=checker, + safety_checker=self.safety_checker, seam_size=seam_size, seam_blur=seam_blur, seam_strength=seam_strength, diff --git a/invokeai/backend/safety_checker.py b/invokeai/backend/safety_checker.py index 86cf31cc13..2e6c4fd479 100644 --- a/invokeai/backend/safety_checker.py +++ b/invokeai/backend/safety_checker.py @@ -15,14 +15,18 @@ from transformers import AutoFeatureExtractor import invokeai.assets.web as web_assets from .globals import global_cache_dir +from .util import CPU_DEVICE class SafetyChecker(object): CAUTION_IMG = "caution.png" def __init__(self, device: torch.device): + path = Path(web_assets.__path__[0]) / self.CAUTION_IMG + caution = Image.open(path) + self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) self.device = device + try: - print(">> Initializing NSFW checker") safety_model_id = "CompVis/stable-diffusion-safety-checker" safety_model_path = global_cache_dir("hub") self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( @@ -35,15 +39,11 @@ class SafetyChecker(object): local_files_only=True, cache_dir=safety_model_path, ) - self.safety_checker.to(device) - self.safety_feature_extractor.to(device) except Exception: print( "** An error was encountered while installing the safety checker:" ) print(traceback.format_exc()) - else: - print(">> NSFW checker is disabled") def check(self, image: Image.Image): """ @@ -51,7 +51,10 @@ class SafetyChecker(object): """ + self.safety_checker.to(self.device) features = self.safety_feature_extractor([image], return_tensors="pt") + features.to(self.device) + # unfortunately checker requires the numpy version, so we have to convert back x_image = np.array(image).astype(np.float32) / 255.0 x_image = x_image[None].transpose(0, 3, 1, 2) @@ -60,6 +63,7 @@ class SafetyChecker(object): checked_image, has_nsfw_concept = self.safety_checker( images=x_image, clip_input=features.pixel_values ) + self.safety_checker.to(CPU_DEVICE) # offload if has_nsfw_concept[0]: print( "** An image with potential non-safe content has been detected. A blurred image will be returned. **" @@ -71,19 +75,8 @@ class SafetyChecker(object): def blur(self, input): blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) try: - caution = self.get_caution_img() - if caution: + if caution := self.caution_img: blurry.paste(caution, (0, 0), caution) except FileNotFoundError: pass return blurry - - def get_caution_img(self): - path = None - if self.caution_img: - return self.caution_img - path = Path(web_assets.__path__[0]) / self.CAUTION_IMG - caution = Image.open(path) - self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) - return self.caution_img - diff --git a/tests/nodes/test_graph_execution_state.py b/tests/nodes/test_graph_execution_state.py index 4c22507098..8cd6baf010 100644 --- a/tests/nodes/test_graph_execution_state.py +++ b/tests/nodes/test_graph_execution_state.py @@ -21,7 +21,7 @@ def simple_graph(): def mock_services(): # NOTE: none of these are actually called by the test invocations return InvocationServices( - generate = None, + model_manager = None, events = None, images = None, queue = MemoryInvocationQueue(), diff --git a/tests/nodes/test_invoker.py b/tests/nodes/test_invoker.py index 6a7867bffe..a2cc92ce7a 100644 --- a/tests/nodes/test_invoker.py +++ b/tests/nodes/test_invoker.py @@ -21,7 +21,7 @@ def simple_graph(): def mock_services() -> InvocationServices: # NOTE: none of these are actually called by the test invocations return InvocationServices( - generate = None, # type: ignore + model_manager = None, # type: ignore events = TestEventService(), images = None, # type: ignore queue = MemoryInvocationQueue(), From 1c9d9e79d5f385ee30a3f10455a4389253e69a80 Mon Sep 17 00:00:00 2001 From: mauwii Date: Sat, 11 Mar 2023 22:32:13 +0100 Subject: [PATCH 20/24] raise operations-per-run to 500 - default value is 30 - limit per hour is 1000 --- .github/workflows/close-inactive-issues.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index dbb89cc8f2..58ec78c0b8 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -24,3 +24,4 @@ jobs: days-before-pr-stale: -1 days-before-pr-close: -1 repo-token: ${{ secrets.GITHUB_TOKEN }} + operations-per-run: 500 From 8ca91b177419065d01ebb8ed3940f236263f3338 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 17:00:00 -0500 Subject: [PATCH 21/24] add restoration services to nodes --- invokeai/app/api/dependencies.py | 2 + invokeai/app/cli_app.py | 2 + invokeai/app/invocations/reconstruct.py | 3 +- invokeai/app/invocations/upscale.py | 2 +- invokeai/app/services/invocation_services.py | 7 +- invokeai/app/services/restoration_services.py | 109 ++++++++++++++++++ invokeai/backend/generate.py | 2 +- tests/nodes/test_graph_execution_state.py | 3 +- tests/nodes/test_invoker.py | 3 +- 9 files changed, 125 insertions(+), 8 deletions(-) create mode 100644 invokeai/app/services/restoration_services.py diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 7d733ec47c..347fba7e97 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -5,6 +5,7 @@ from argparse import Namespace from ...backend import Globals from ..services.model_manager_initializer import get_model_manager +from ..services.restoration_services import RestorationServices from ..services.graph import GraphExecutionState from ..services.image_storage import DiskImageStorage from ..services.invocation_queue import MemoryInvocationQueue @@ -67,6 +68,7 @@ class ApiDependencies: filename=db_location, table_name="graph_executions" ), processor=DefaultInvocationProcessor(), + restoration=RestorationServices(config), ) ApiDependencies.invoker = Invoker(services) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 6e0664eebf..732a233cb4 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -18,6 +18,7 @@ from .invocations import * from .invocations.baseinvocation import BaseInvocation from .services.events import EventServiceBase from .services.model_manager_initializer import get_model_manager +from .services.restoration_services import RestorationServices from .services.graph import EdgeConnection, GraphExecutionState from .services.image_storage import DiskImageStorage from .services.invocation_queue import MemoryInvocationQueue @@ -148,6 +149,7 @@ def invoke_cli(): filename=db_location, table_name="graph_executions" ), processor=DefaultInvocationProcessor(), + restoration=RestorationServices(config), ) invoker = Invoker(services) diff --git a/invokeai/app/invocations/reconstruct.py b/invokeai/app/invocations/reconstruct.py index a90c33605e..c4d8f3ac7c 100644 --- a/invokeai/app/invocations/reconstruct.py +++ b/invokeai/app/invocations/reconstruct.py @@ -8,7 +8,6 @@ from ..services.invocation_services import InvocationServices from .baseinvocation import BaseInvocation, InvocationContext from .image import ImageField, ImageOutput - class RestoreFaceInvocation(BaseInvocation): """Restores faces in an image.""" #fmt: off @@ -23,7 +22,7 @@ class RestoreFaceInvocation(BaseInvocation): image = context.services.images.get( self.image.image_type, self.image.image_name ) - results = context.services.generate.upscale_and_reconstruct( + results = context.services.restoration.upscale_and_reconstruct( image_list=[[image, 0]], upscale=None, strength=self.strength, # GFPGAN strength diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index dcc39fc9ad..4079877fdb 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -26,7 +26,7 @@ class UpscaleInvocation(BaseInvocation): image = context.services.images.get( self.image.image_type, self.image.image_name ) - results = context.services.generate.upscale_and_reconstruct( + results = context.services.restoration.upscale_and_reconstruct( image_list=[[image, 0]], upscale=(self.level, self.strength), strength=0.0, # GFPGAN strength diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index c51299b688..7f24c34378 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -3,17 +3,18 @@ from invokeai.backend import ModelManager from .events import EventServiceBase from .image_storage import ImageStorageBase +from .restoration_services import RestorationServices from .invocation_queue import InvocationQueueABC from .item_storage import ItemStorageABC - class InvocationServices: """Services that can be used by invocations""" - model_manager: ModelManager events: EventServiceBase images: ImageStorageBase queue: InvocationQueueABC + model_manager: ModelManager + restoration: RestorationServices # NOTE: we must forward-declare any types that include invocations, since invocations can use services graph_execution_manager: ItemStorageABC["GraphExecutionState"] @@ -27,6 +28,7 @@ class InvocationServices: queue: InvocationQueueABC, graph_execution_manager: ItemStorageABC["GraphExecutionState"], processor: "InvocationProcessorABC", + restoration: RestorationServices, ): self.model_manager = model_manager self.events = events @@ -34,3 +36,4 @@ class InvocationServices: self.queue = queue self.graph_execution_manager = graph_execution_manager self.processor = processor + self.restoration = restoration diff --git a/invokeai/app/services/restoration_services.py b/invokeai/app/services/restoration_services.py new file mode 100644 index 0000000000..f5fc687c11 --- /dev/null +++ b/invokeai/app/services/restoration_services.py @@ -0,0 +1,109 @@ +import sys +import traceback +import torch +from ...backend.restoration import Restoration +from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE + +# This should be a real base class for postprocessing functions, +# but right now we just instantiate the existing gfpgan, esrgan +# and codeformer functions. +class RestorationServices: + '''Face restoration and upscaling''' + + def __init__(self,args): + try: + gfpgan, codeformer, esrgan = None, None, None + if args.restore or args.esrgan: + restoration = Restoration() + if args.restore: + gfpgan, codeformer = restoration.load_face_restore_models( + args.gfpgan_model_path + ) + else: + print(">> Face restoration disabled") + if args.esrgan: + esrgan = restoration.load_esrgan(args.esrgan_bg_tile) + else: + print(">> Upscaling disabled") + else: + print(">> Face restoration and upscaling disabled") + except (ModuleNotFoundError, ImportError): + print(traceback.format_exc(), file=sys.stderr) + print(">> You may need to install the ESRGAN and/or GFPGAN modules") + self.device = torch.device(choose_torch_device()) + self.gfpgan = gfpgan + self.codeformer = codeformer + self.esrgan = esrgan + + # note that this one method does gfpgan and codepath reconstruction, as well as + # esrgan upscaling + # TO DO: refactor into separate methods + def upscale_and_reconstruct( + self, + image_list, + facetool="gfpgan", + upscale=None, + upscale_denoise_str=0.75, + strength=0.0, + codeformer_fidelity=0.75, + save_original=False, + image_callback=None, + prefix=None, + ): + results = [] + for r in image_list: + image, seed = r + try: + if strength > 0: + if self.gfpgan is not None or self.codeformer is not None: + if facetool == "gfpgan": + if self.gfpgan is None: + print( + ">> GFPGAN not found. Face restoration is disabled." + ) + else: + image = self.gfpgan.process(image, strength, seed) + if facetool == "codeformer": + if self.codeformer is None: + print( + ">> CodeFormer not found. Face restoration is disabled." + ) + else: + cf_device = ( + CPU_DEVICE if self.device == MPS_DEVICE else self.device + ) + image = self.codeformer.process( + image=image, + strength=strength, + device=cf_device, + seed=seed, + fidelity=codeformer_fidelity, + ) + else: + print(">> Face Restoration is disabled.") + if upscale is not None: + if self.esrgan is not None: + if len(upscale) < 2: + upscale.append(0.75) + image = self.esrgan.process( + image, + upscale[1], + seed, + int(upscale[0]), + denoise_str=upscale_denoise_str, + ) + else: + print(">> ESRGAN is disabled. Image not upscaled.") + except Exception as e: + print( + f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}" + ) + + if image_callback is not None: + image_callback(image, seed, upscaled=True, use_prefix=prefix) + else: + r[0] = image + + results.append([image, seed]) + + return results diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py index 7db0c4a2ef..1b19a1aa7e 100644 --- a/invokeai/backend/generate.py +++ b/invokeai/backend/generate.py @@ -956,7 +956,7 @@ class Generate: ): results = [] for r in image_list: - image, seed = r + image, seed, _ = r try: if strength > 0: if self.gfpgan is not None or self.codeformer is not None: diff --git a/tests/nodes/test_graph_execution_state.py b/tests/nodes/test_graph_execution_state.py index 8cd6baf010..b722539935 100644 --- a/tests/nodes/test_graph_execution_state.py +++ b/tests/nodes/test_graph_execution_state.py @@ -26,7 +26,8 @@ def mock_services(): images = None, queue = MemoryInvocationQueue(), graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'), - processor = DefaultInvocationProcessor() + processor = DefaultInvocationProcessor(), + restoration = None, ) def invoke_next(g: GraphExecutionState, services: InvocationServices) -> tuple[BaseInvocation, BaseInvocationOutput]: diff --git a/tests/nodes/test_invoker.py b/tests/nodes/test_invoker.py index a2cc92ce7a..718baa7a1f 100644 --- a/tests/nodes/test_invoker.py +++ b/tests/nodes/test_invoker.py @@ -26,7 +26,8 @@ def mock_services() -> InvocationServices: images = None, # type: ignore queue = MemoryInvocationQueue(), graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'), - processor = DefaultInvocationProcessor() + processor = DefaultInvocationProcessor(), + restoration = None, ) @pytest.fixture() From 6a77634b34be4c996c3fafbe9bf594e51f1651bb Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 17:14:03 -0500 Subject: [PATCH 22/24] remove unneeded generate initializer routines --- invokeai/app/services/generate_initializer.py | 35 ------------------- .../backend/model_management/model_manager.py | 2 +- 2 files changed, 1 insertion(+), 36 deletions(-) delete mode 100644 invokeai/app/services/generate_initializer.py diff --git a/invokeai/app/services/generate_initializer.py b/invokeai/app/services/generate_initializer.py deleted file mode 100644 index 6dd65e69b1..0000000000 --- a/invokeai/app/services/generate_initializer.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import sys -import torch -import traceback -from argparse import Namespace -from omegaconf import OmegaConf - -import invokeai.version -from ...backend import Globals - -def load_face_restoration(opt): - try: - gfpgan, codeformer, esrgan = None, None, None - if opt.restore or opt.esrgan: - from invokeai.backend.restoration import Restoration - - restoration = Restoration() - if opt.restore: - gfpgan, codeformer = restoration.load_face_restore_models( - opt.gfpgan_model_path - ) - else: - print(">> Face restoration disabled") - if opt.esrgan: - esrgan = restoration.load_esrgan(opt.esrgan_bg_tile) - else: - print(">> Upscaling disabled") - else: - print(">> Face restoration and upscaling disabled") - except (ModuleNotFoundError, ImportError): - print(traceback.format_exc(), file=sys.stderr) - print(">> You may need to install the ESRGAN and/or GFPGAN modules") - return gfpgan, codeformer, esrgan - - diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 50ea13a2d0..06b1490c93 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -90,7 +90,7 @@ class ModelManager(object): """ return model_name in self.config - def get_model(self, model_name: str=None): + def get_model(self, model_name: str=None)->dict: """ Given a model named identified in models.yaml, return the model object. If in RAM will load into GPU VRAM. From 10cbf9931028cfe5625f2d0c47df89d42f0730c8 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 18:08:45 -0500 Subject: [PATCH 23/24] add TODO comments --- invokeai/app/services/model_manager_initializer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/services/model_manager_initializer.py b/invokeai/app/services/model_manager_initializer.py index 88a3ff6c43..3ef79f0b7e 100644 --- a/invokeai/app/services/model_manager_initializer.py +++ b/invokeai/app/services/model_manager_initializer.py @@ -11,7 +11,7 @@ from ...backend import ModelManager from ...backend.util import choose_precision, choose_torch_device from ...backend import Globals -# TODO: most of this code should be split into individual services as the Generate.py code is deprecated +# TODO: Replace with an abstract class base ModelManagerBase def get_model_manager(config: Args) -> ModelManager: if not config.conf: config_file = os.path.join(Globals.root, "configs", "models.yaml") From 74a480f94e01ef1edee6b67597df4c9bf4e1469e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Mar 2023 21:23:41 -0500 Subject: [PATCH 24/24] add back static web directory --- invokeai/app/api_app.py | 1 - static/dream_web/favicon.ico | Bin 0 -> 1150 bytes static/dream_web/index.css | 179 +++++++++++++++ static/dream_web/index.html | 187 ++++++++++++++++ static/dream_web/index.js | 396 ++++++++++++++++++++++++++++++++++ static/legacy_web/favicon.ico | Bin 0 -> 1150 bytes static/legacy_web/index.css | 152 +++++++++++++ static/legacy_web/index.html | 137 ++++++++++++ static/legacy_web/index.js | 213 ++++++++++++++++++ 9 files changed, 1264 insertions(+), 1 deletion(-) create mode 100644 static/dream_web/favicon.ico create mode 100644 static/dream_web/index.css create mode 100644 static/dream_web/index.html create mode 100644 static/dream_web/index.js create mode 100644 static/legacy_web/favicon.ico create mode 100644 static/legacy_web/index.css create mode 100644 static/legacy_web/index.html create mode 100644 static/legacy_web/index.js diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 238fb0180b..7bc38dc2dc 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -1,5 +1,4 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) - import asyncio from inspect import signature diff --git a/static/dream_web/favicon.ico b/static/dream_web/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..51eb844a6a4a9d4b13e17e38b0fc915e7e97d4b5 GIT binary patch literal 1150 zcmaiy%TE(g6vi*n1a-yAr5H_2eSt+l!2}h8?$p@n=nPJTglL%pit>^TL`+1D5hx&N z)!<{Tc1e&lvO-)*Ow^TsgK$#zJKYFEA;2&@TN?6A5C9Q()1;lGF^Sd zF~GSouqjvv->jVh^vZ3gw#sUXZQHSqR>WSmwCOtUf;BK6W$k#wMKX$aiq1TKiY)i0 zVAh_I80S)!qiamC2k7>K9QPINuKnap%uv%}j+#E^Jur4AXDJpbkvT6Ctz07yN&)Z7 znrGHFe)vUp?-<1^k5RnhDB0a3h^>+{H77oj<%hM0acGw^T{k?>wWp=8-IJ2<;2zkW z55$XEACugh&R(wZ1^nba=DC(TD08@HP|IVZ?1<#7_S=$s)|_Dd@;ZI;mZvYT`CA{Y z_Vq(y{pYvZf8ANnKfH$f+a32rZ=N(I_xgGd_x}n~fRYte5_cZWQRBiY+1KuqaiB`D zuiiy$g`D(znbUIcklw#ZXiGqz&xFs legend { + width: auto; + margin-left: 0; + margin-right: auto; + font-weight:bold; +} +select, input { + margin-right: 10px; + padding: 2px; +} +input:disabled { + cursor:auto; +} +input[type=submit] { + cursor: pointer; + background-color: #666; + color: white; +} +input[type=checkbox] { + cursor: pointer; + margin-right: 0px; + width: 20px; + height: 20px; + vertical-align: middle; +} +input#seed { + margin-right: 0px; +} +div { + padding: 10px 10px 10px 10px; +} +header { + margin-bottom: 16px; +} +header h1 { + margin-bottom: 0; + font-size: 2em; +} +#search-box { + display: flex; +} +#scaling-inprocess-message { + font-weight: bold; + font-style: italic; + display: none; +} +#prompt { + flex-grow: 1; + padding: 5px 10px 5px 10px; + border: 1px solid #999; + outline: none; +} +#submit { + padding: 5px 10px 5px 10px; + border: 1px solid #999; +} +#reset-all, #remove-image { + margin-top: 12px; + font-size: 0.8em; + background-color: pink; + border: 1px solid #999; + border-radius: 4px; +} +#results { + text-align: center; + margin: auto; + padding-top: 10px; +} +#results figure { + display: inline-block; + margin: 10px; +} +#results figcaption { + font-size: 0.8em; + padding: 3px; + color: #888; + cursor: pointer; +} +#results img { + border-radius: 5px; + object-fit: contain; + background-color: var(--fields-dark); +} +#fieldset-config { + line-height:2em; +} +input[type="number"] { + width: 60px; +} +#seed { + width: 150px; +} +button#reset-seed { + font-size: 1.7em; + background: #efefef; + border: 1px solid #999; + border-radius: 4px; + line-height: 0.8; + margin: 0 10px 0 0; + padding: 0 5px 3px; + vertical-align: middle; +} +label { + white-space: nowrap; +} +#progress-section { + display: none; +} +#progress-image { + width: 30vh; + height: 30vh; + object-fit: contain; + background-color: var(--fields-dark); +} +#cancel-button { + cursor: pointer; + color: red; +} +#txt2img { + background-color: var(--fields-dark); +} +#variations { + background-color: var(--fields-light); +} +#initimg { + background-color: var(--fields-dark); +} +#img2img { + background-color: var(--fields-light); +} +#initimg > :not(legend) { + background-color: var(--fields-light); + margin: .5em; +} + +#postprocess, #initimg { + display:flex; + flex-wrap:wrap; + padding: 0; + margin-top: 1em; + background-color: var(--fields-dark); +} +#postprocess > fieldset, #initimg > * { + flex-grow: 1; +} +#postprocess > fieldset { + background-color: var(--fields-dark); +} +#progress-section { + background-color: var(--fields-light); +} +#no-results-message:not(:only-child) { + display: none; +} diff --git a/static/dream_web/index.html b/static/dream_web/index.html new file mode 100644 index 0000000000..feb542adb2 --- /dev/null +++ b/static/dream_web/index.html @@ -0,0 +1,187 @@ + + + + Stable Diffusion Dream Server + + + + + + + + + + + +
+

Stable Diffusion Dream Server

+
+ For news and support for this web service, visit our GitHub + site +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + +
+
+ + + + +
+
+
+ + + + +
+ + + +
+
+ + + + + + + + +
+
+
+
+ + + + + + +
+
+ + + + + + + + +
+
+ +
+
+
+
+ + +
+ +
+ Postprocessing...1/3 +
+
+
+ +
+
+
+ + + diff --git a/static/dream_web/index.js b/static/dream_web/index.js new file mode 100644 index 0000000000..55bc4bdb8f --- /dev/null +++ b/static/dream_web/index.js @@ -0,0 +1,396 @@ +const socket = io(); + +var priorResultsLoadState = { + page: 0, + pages: 1, + per_page: 10, + total: 20, + offset: 0, // number of items generated since last load + loading: false, + initialized: false +}; + +function loadPriorResults() { + // Fix next page by offset + let offsetPages = priorResultsLoadState.offset / priorResultsLoadState.per_page; + priorResultsLoadState.page += offsetPages; + priorResultsLoadState.pages += offsetPages; + priorResultsLoadState.total += priorResultsLoadState.offset; + priorResultsLoadState.offset = 0; + + if (priorResultsLoadState.loading) { + return; + } + + if (priorResultsLoadState.page >= priorResultsLoadState.pages) { + return; // Nothing more to load + } + + // Load + priorResultsLoadState.loading = true + let url = new URL('/api/images', document.baseURI); + url.searchParams.append('page', priorResultsLoadState.initialized ? priorResultsLoadState.page + 1 : priorResultsLoadState.page); + url.searchParams.append('per_page', priorResultsLoadState.per_page); + fetch(url.href, { + method: 'GET', + headers: new Headers({'content-type': 'application/json'}) + }) + .then(response => response.json()) + .then(data => { + priorResultsLoadState.page = data.page; + priorResultsLoadState.pages = data.pages; + priorResultsLoadState.per_page = data.per_page; + priorResultsLoadState.total = data.total; + + data.items.forEach(function(dreamId, index) { + let src = 'api/images/' + dreamId; + fetch('/api/images/' + dreamId + '/metadata', { + method: 'GET', + headers: new Headers({'content-type': 'application/json'}) + }) + .then(response => response.json()) + .then(metadata => { + let seed = metadata.seed || 0; // TODO: Parse old metadata + appendOutput(src, seed, metadata, true); + }); + }); + + // Load until page is full + if (!priorResultsLoadState.initialized) { + if (document.body.scrollHeight <= window.innerHeight) { + loadPriorResults(); + } + } + }) + .finally(() => { + priorResultsLoadState.loading = false; + priorResultsLoadState.initialized = true; + }); +} + +function resetForm() { + var form = document.getElementById('generate-form'); + form.querySelector('fieldset').removeAttribute('disabled'); +} + +function initProgress(totalSteps, showProgressImages) { + // TODO: Progress could theoretically come from multiple jobs at the same time (in the future) + let progressSectionEle = document.querySelector('#progress-section'); + progressSectionEle.style.display = 'initial'; + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('max', totalSteps); + + let progressImageEle = document.querySelector('#progress-image'); + progressImageEle.src = BLANK_IMAGE_URL; + progressImageEle.style.display = showProgressImages ? 'initial': 'none'; +} + +function setProgress(step, totalSteps, src) { + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('value', step); + + if (src) { + let progressImageEle = document.querySelector('#progress-image'); + progressImageEle.src = src; + } +} + +function resetProgress(hide = true) { + if (hide) { + let progressSectionEle = document.querySelector('#progress-section'); + progressSectionEle.style.display = 'none'; + } + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('value', 0); +} + +function toBase64(file) { + return new Promise((resolve, reject) => { + const r = new FileReader(); + r.readAsDataURL(file); + r.onload = () => resolve(r.result); + r.onerror = (error) => reject(error); + }); +} + +function ondragdream(event) { + let dream = event.target.dataset.dream; + event.dataTransfer.setData("dream", dream); +} + +function seedClick(event) { + // Get element + var image = event.target.closest('figure').querySelector('img'); + var dream = JSON.parse(decodeURIComponent(image.dataset.dream)); + + let form = document.querySelector("#generate-form"); + for (const [k, v] of new FormData(form)) { + if (k == 'initimg') { continue; } + let formElem = form.querySelector(`*[name=${k}]`); + formElem.value = dream[k] !== undefined ? dream[k] : formElem.defaultValue; + } + + document.querySelector("#seed").value = dream.seed; + document.querySelector('#iterations').value = 1; // Reset to 1 iteration since we clicked a single image (not a full job) + + // NOTE: leaving this manual for the user for now - it was very confusing with this behavior + // document.querySelector("#with_variations").value = variations || ''; + // if (document.querySelector("#variation_amount").value <= 0) { + // document.querySelector("#variation_amount").value = 0.2; + // } + + saveFields(document.querySelector("#generate-form")); +} + +function appendOutput(src, seed, config, toEnd=false) { + let outputNode = document.createElement("figure"); + let altText = seed.toString() + " | " + config.prompt; + + // img needs width and height for lazy loading to work + // TODO: store the full config in a data attribute on the image? + const figureContents = ` + + ${altText} + +
${seed}
+ `; + + outputNode.innerHTML = figureContents; + + if (toEnd) { + document.querySelector("#results").append(outputNode); + } else { + document.querySelector("#results").prepend(outputNode); + } + document.querySelector("#no-results-message")?.remove(); +} + +function saveFields(form) { + for (const [k, v] of new FormData(form)) { + if (typeof v !== 'object') { // Don't save 'file' type + localStorage.setItem(k, v); + } + } +} + +function loadFields(form) { + for (const [k, v] of new FormData(form)) { + const item = localStorage.getItem(k); + if (item != null) { + form.querySelector(`*[name=${k}]`).value = item; + } + } +} + +function clearFields(form) { + localStorage.clear(); + let prompt = form.prompt.value; + form.reset(); + form.prompt.value = prompt; +} + +const BLANK_IMAGE_URL = 'data:image/svg+xml,'; +async function generateSubmit(form) { + // Convert file data to base64 + // TODO: Should probably uplaod files with formdata or something, and store them in the backend? + let formData = Object.fromEntries(new FormData(form)); + if (!formData.enable_generate && !formData.enable_init_image) { + gen_label = document.querySelector("label[for=enable_generate]").innerHTML; + initimg_label = document.querySelector("label[for=enable_init_image]").innerHTML; + alert(`Error: one of "${gen_label}" or "${initimg_label}" must be set`); + } + + + formData.initimg_name = formData.initimg.name + formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null; + + // Evaluate all checkboxes + let checkboxes = form.querySelectorAll('input[type=checkbox]'); + checkboxes.forEach(function (checkbox) { + if (checkbox.checked) { + formData[checkbox.name] = 'true'; + } + }); + + let strength = formData.strength; + let totalSteps = formData.initimg ? Math.floor(strength * formData.steps) : formData.steps; + let showProgressImages = formData.progress_images; + + // Set enabling flags + + + // Initialize the progress bar + initProgress(totalSteps, showProgressImages); + + // POST, use response to listen for events + fetch(form.action, { + method: form.method, + headers: new Headers({'content-type': 'application/json'}), + body: JSON.stringify(formData), + }) + .then(response => response.json()) + .then(data => { + var jobId = data.jobId; + socket.emit('join_room', { 'room': jobId }); + }); + + form.querySelector('fieldset').setAttribute('disabled',''); +} + +function fieldSetEnableChecked(event) { + cb = event.target; + fields = cb.closest('fieldset'); + fields.disabled = !cb.checked; +} + +// Socket listeners +socket.on('job_started', (data) => {}) + +socket.on('dream_result', (data) => { + var jobId = data.jobId; + var dreamId = data.dreamId; + var dreamRequest = data.dreamRequest; + var src = 'api/images/' + dreamId; + + priorResultsLoadState.offset += 1; + appendOutput(src, dreamRequest.seed, dreamRequest); + + resetProgress(false); +}) + +socket.on('dream_progress', (data) => { + // TODO: it'd be nice if we could get a seed reported here, but the generator would need to be updated + var step = data.step; + var totalSteps = data.totalSteps; + var jobId = data.jobId; + var dreamId = data.dreamId; + + var progressType = data.progressType + if (progressType === 'GENERATION') { + var src = data.hasProgressImage ? + 'api/intermediates/' + dreamId + '/' + step + : null; + setProgress(step, totalSteps, src); + } else if (progressType === 'UPSCALING_STARTED') { + // step and totalSteps are used for upscale count on this message + document.getElementById("processing_cnt").textContent = step; + document.getElementById("processing_total").textContent = totalSteps; + document.getElementById("scaling-inprocess-message").style.display = "block"; + } else if (progressType == 'UPSCALING_DONE') { + document.getElementById("scaling-inprocess-message").style.display = "none"; + } +}) + +socket.on('job_canceled', (data) => { + resetForm(); + resetProgress(); +}) + +socket.on('job_done', (data) => { + jobId = data.jobId + socket.emit('leave_room', { 'room': jobId }); + + resetForm(); + resetProgress(); +}) + +window.onload = async () => { + document.querySelector("#prompt").addEventListener("keydown", (e) => { + if (e.key === "Enter" && !e.shiftKey) { + const form = e.target.form; + generateSubmit(form); + } + }); + document.querySelector("#generate-form").addEventListener('submit', (e) => { + e.preventDefault(); + const form = e.target; + + generateSubmit(form); + }); + document.querySelector("#generate-form").addEventListener('change', (e) => { + saveFields(e.target.form); + }); + document.querySelector("#reset-seed").addEventListener('click', (e) => { + document.querySelector("#seed").value = 0; + saveFields(e.target.form); + }); + document.querySelector("#reset-all").addEventListener('click', (e) => { + clearFields(e.target.form); + }); + document.querySelector("#remove-image").addEventListener('click', (e) => { + initimg.value=null; + }); + loadFields(document.querySelector("#generate-form")); + + document.querySelector('#cancel-button').addEventListener('click', () => { + fetch('/api/cancel').catch(e => { + console.error(e); + }); + }); + document.documentElement.addEventListener('keydown', (e) => { + if (e.key === "Escape") + fetch('/api/cancel').catch(err => { + console.error(err); + }); + }); + + if (!config.gfpgan_model_exists) { + document.querySelector("#gfpgan").style.display = 'none'; + } + + window.addEventListener("scroll", () => { + if ((window.innerHeight + window.pageYOffset) >= document.body.offsetHeight) { + loadPriorResults(); + } + }); + + + + // Enable/disable forms by checkboxes + document.querySelectorAll("legend > input[type=checkbox]").forEach(function(cb) { + cb.addEventListener('change', fieldSetEnableChecked); + fieldSetEnableChecked({ target: cb}) + }); + + + // Load some of the previous results + loadPriorResults(); + + // Image drop/upload WIP + /* + let drop = document.getElementById('dropper'); + function ondrop(event) { + let dreamData = event.dataTransfer.getData('dream'); + if (dreamData) { + var dream = JSON.parse(decodeURIComponent(dreamData)); + alert(dream.dreamId); + } + }; + + function ondragenter(event) { + event.preventDefault(); + }; + + function ondragover(event) { + event.preventDefault(); + }; + + function ondragleave(event) { + + } + + drop.addEventListener('drop', ondrop); + drop.addEventListener('dragenter', ondragenter); + drop.addEventListener('dragover', ondragover); + drop.addEventListener('dragleave', ondragleave); + */ +}; diff --git a/static/legacy_web/favicon.ico b/static/legacy_web/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..51eb844a6a4a9d4b13e17e38b0fc915e7e97d4b5 GIT binary patch literal 1150 zcmaiy%TE(g6vi*n1a-yAr5H_2eSt+l!2}h8?$p@n=nPJTglL%pit>^TL`+1D5hx&N z)!<{Tc1e&lvO-)*Ow^TsgK$#zJKYFEA;2&@TN?6A5C9Q()1;lGF^Sd zF~GSouqjvv->jVh^vZ3gw#sUXZQHSqR>WSmwCOtUf;BK6W$k#wMKX$aiq1TKiY)i0 zVAh_I80S)!qiamC2k7>K9QPINuKnap%uv%}j+#E^Jur4AXDJpbkvT6Ctz07yN&)Z7 znrGHFe)vUp?-<1^k5RnhDB0a3h^>+{H77oj<%hM0acGw^T{k?>wWp=8-IJ2<;2zkW z55$XEACugh&R(wZ1^nba=DC(TD08@HP|IVZ?1<#7_S=$s)|_Dd@;ZI;mZvYT`CA{Y z_Vq(y{pYvZf8ANnKfH$f+a32rZ=N(I_xgGd_x}n~fRYte5_cZWQRBiY+1KuqaiB`D zuiiy$g`D(znbUIcklw#ZXiGqz&xFs + + Stable Diffusion Dream Server + + + + + + + + +
+

Stable Diffusion Dream Server

+
+ For news and support for this web service, visit our GitHub site +
+
+ +
+
+
+ +
+
+
Basic options
+ + + + + + + + + + +
+ + + + + + + + + +
+ + + + + +
+ + + + + + +
+
+
Image-to-image options
+ + + +
+ + + + +
+
+
Post-processing options
+ + + + + + +
+
+
+
+
+ + +
+ +
+ Postprocessing...1/3 +
+ +
+ +
+
+

No results...

+
+
+
+ + diff --git a/static/legacy_web/index.js b/static/legacy_web/index.js new file mode 100644 index 0000000000..57ad076062 --- /dev/null +++ b/static/legacy_web/index.js @@ -0,0 +1,213 @@ +function toBase64(file) { + return new Promise((resolve, reject) => { + const r = new FileReader(); + r.readAsDataURL(file); + r.onload = () => resolve(r.result); + r.onerror = (error) => reject(error); + }); +} + +function appendOutput(src, seed, config) { + let outputNode = document.createElement("figure"); + + let variations = config.with_variations; + if (config.variation_amount > 0) { + variations = (variations ? variations + ',' : '') + seed + ':' + config.variation_amount; + } + let baseseed = (config.with_variations || config.variation_amount > 0) ? config.seed : seed; + let altText = baseseed + ' | ' + (variations ? variations + ' | ' : '') + config.prompt; + + // img needs width and height for lazy loading to work + const figureContents = ` + + ${altText} + +
${seed}
+ `; + + outputNode.innerHTML = figureContents; + let figcaption = outputNode.querySelector('figcaption'); + + // Reload image config + figcaption.addEventListener('click', () => { + let form = document.querySelector("#generate-form"); + for (const [k, v] of new FormData(form)) { + if (k == 'initimg') { continue; } + form.querySelector(`*[name=${k}]`).value = config[k]; + } + + document.querySelector("#seed").value = baseseed; + document.querySelector("#with_variations").value = variations || ''; + if (document.querySelector("#variation_amount").value <= 0) { + document.querySelector("#variation_amount").value = 0.2; + } + + saveFields(document.querySelector("#generate-form")); + }); + + document.querySelector("#results").prepend(outputNode); +} + +function saveFields(form) { + for (const [k, v] of new FormData(form)) { + if (typeof v !== 'object') { // Don't save 'file' type + localStorage.setItem(k, v); + } + } +} + +function loadFields(form) { + for (const [k, v] of new FormData(form)) { + const item = localStorage.getItem(k); + if (item != null) { + form.querySelector(`*[name=${k}]`).value = item; + } + } +} + +function clearFields(form) { + localStorage.clear(); + let prompt = form.prompt.value; + form.reset(); + form.prompt.value = prompt; +} + +const BLANK_IMAGE_URL = 'data:image/svg+xml,'; +async function generateSubmit(form) { + const prompt = document.querySelector("#prompt").value; + + // Convert file data to base64 + let formData = Object.fromEntries(new FormData(form)); + formData.initimg_name = formData.initimg.name + formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null; + + let strength = formData.strength; + let totalSteps = formData.initimg ? Math.floor(strength * formData.steps) : formData.steps; + + let progressSectionEle = document.querySelector('#progress-section'); + progressSectionEle.style.display = 'initial'; + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('max', totalSteps); + let progressImageEle = document.querySelector('#progress-image'); + progressImageEle.src = BLANK_IMAGE_URL; + + progressImageEle.style.display = {}.hasOwnProperty.call(formData, 'progress_images') ? 'initial': 'none'; + + // Post as JSON, using Fetch streaming to get results + fetch(form.action, { + method: form.method, + body: JSON.stringify(formData), + }).then(async (response) => { + const reader = response.body.getReader(); + + let noOutputs = true; + while (true) { + let {value, done} = await reader.read(); + value = new TextDecoder().decode(value); + if (done) { + progressSectionEle.style.display = 'none'; + break; + } + + for (let event of value.split('\n').filter(e => e !== '')) { + const data = JSON.parse(event); + + if (data.event === 'result') { + noOutputs = false; + appendOutput(data.url, data.seed, data.config); + progressEle.setAttribute('value', 0); + progressEle.setAttribute('max', totalSteps); + } else if (data.event === 'upscaling-started') { + document.getElementById("processing_cnt").textContent=data.processed_file_cnt; + document.getElementById("scaling-inprocess-message").style.display = "block"; + } else if (data.event === 'upscaling-done') { + document.getElementById("scaling-inprocess-message").style.display = "none"; + } else if (data.event === 'step') { + progressEle.setAttribute('value', data.step); + if (data.url) { + progressImageEle.src = data.url; + } + } else if (data.event === 'canceled') { + // avoid alerting as if this were an error case + noOutputs = false; + } + } + } + + // Re-enable form, remove no-results-message + form.querySelector('fieldset').removeAttribute('disabled'); + document.querySelector("#prompt").value = prompt; + document.querySelector('progress').setAttribute('value', '0'); + + if (noOutputs) { + alert("Error occurred while generating."); + } + }); + + // Disable form while generating + form.querySelector('fieldset').setAttribute('disabled',''); + document.querySelector("#prompt").value = `Generating: "${prompt}"`; +} + +async function fetchRunLog() { + try { + let response = await fetch('/run_log.json') + const data = await response.json(); + for(let item of data.run_log) { + appendOutput(item.url, item.seed, item); + } + } catch (e) { + console.error(e); + } +} + +window.onload = async () => { + document.querySelector("#prompt").addEventListener("keydown", (e) => { + if (e.key === "Enter" && !e.shiftKey) { + const form = e.target.form; + generateSubmit(form); + } + }); + document.querySelector("#generate-form").addEventListener('submit', (e) => { + e.preventDefault(); + const form = e.target; + + generateSubmit(form); + }); + document.querySelector("#generate-form").addEventListener('change', (e) => { + saveFields(e.target.form); + }); + document.querySelector("#reset-seed").addEventListener('click', (e) => { + document.querySelector("#seed").value = -1; + saveFields(e.target.form); + }); + document.querySelector("#reset-all").addEventListener('click', (e) => { + clearFields(e.target.form); + }); + document.querySelector("#remove-image").addEventListener('click', (e) => { + initimg.value=null; + }); + loadFields(document.querySelector("#generate-form")); + + document.querySelector('#cancel-button').addEventListener('click', () => { + fetch('/cancel').catch(e => { + console.error(e); + }); + }); + document.documentElement.addEventListener('keydown', (e) => { + if (e.key === "Escape") + fetch('/cancel').catch(err => { + console.error(err); + }); + }); + + if (!config.gfpgan_model_exists) { + document.querySelector("#gfpgan").style.display = 'none'; + } + await fetchRunLog() +};