+
+
diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index dbb89cc8f2..58ec78c0b8 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -24,3 +24,4 @@ jobs: days-before-pr-stale: -1 days-before-pr-close: -1 repo-token: ${{ secrets.GITHUB_TOKEN }} + operations-per-run: 500 diff --git a/.github/workflows/test-invoke-pip-skip.yml b/.github/workflows/test-invoke-pip-skip.yml index 7f9758814e..c2347e5ce3 100644 --- a/.github/workflows/test-invoke-pip-skip.yml +++ b/.github/workflows/test-invoke-pip-skip.yml @@ -1,12 +1,12 @@ name: Test invoke.py pip on: pull_request: - paths-ignore: - - 'pyproject.toml' - - 'invokeai/**' - - 'invokeai/backend/**' - - 'invokeai/configs/**' - - 'invokeai/frontend/web/dist/**' + paths: + - '**' + - '!pyproject.toml' + - '!invokeai/**' + - 'invokeai/frontend/web/**' + - '!invokeai/frontend/web/dist/**' merge_group: workflow_dispatch: diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 20d7ac94c1..30ed05379c 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -6,15 +6,13 @@ on: paths: - 'pyproject.toml' - 'invokeai/**' - - 'invokeai/backend/**' - - 'invokeai/configs/**' + - '!invokeai/frontend/web/**' - 'invokeai/frontend/web/dist/**' pull_request: paths: - 'pyproject.toml' - 'invokeai/**' - - 'invokeai/backend/**' - - 'invokeai/configs/**' + - '!invokeai/frontend/web/**' - 'invokeai/frontend/web/dist/**' types: - 'ready_for_review' diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 271a2e3be3..347fba7e97 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -4,7 +4,8 @@ import os from argparse import Namespace from ...backend import Globals -from ..services.generate_initializer import get_generate +from ..services.model_manager_initializer import get_model_manager +from ..services.restoration_services import RestorationServices from ..services.graph import GraphExecutionState from ..services.image_storage import DiskImageStorage from ..services.invocation_queue import MemoryInvocationQueue @@ -37,18 +38,16 @@ class ApiDependencies: invoker: Invoker = None @staticmethod - def initialize(args, config, event_handler_id: int): - Globals.try_patchmatch = args.patchmatch - Globals.always_use_cpu = args.always_use_cpu - Globals.internet_available = args.internet_available and check_internet() - Globals.disable_xformers = not args.xformers - Globals.ckpt_convert = args.ckpt_convert + def initialize(config, event_handler_id: int): + Globals.try_patchmatch = config.patchmatch + Globals.always_use_cpu = config.always_use_cpu + Globals.internet_available = config.internet_available and check_internet() + Globals.disable_xformers = not config.xformers + Globals.ckpt_convert = config.ckpt_convert # TODO: Use a logger print(f">> Internet connectivity is {Globals.internet_available}") - generate = get_generate(args, config) - events = FastAPIEventService(event_handler_id) output_folder = os.path.abspath( @@ -61,7 +60,7 @@ class ApiDependencies: db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generate=generate, + model_manager=get_model_manager(config), events=events, images=images, queue=MemoryInvocationQueue(), @@ -69,6 +68,7 @@ class ApiDependencies: filename=db_location, table_name="graph_executions" ), processor=DefaultInvocationProcessor(), + restoration=RestorationServices(config), ) ApiDependencies.invoker = Invoker(services) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index fb64ca3b7a..7bc38dc2dc 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -1,5 +1,4 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) - import asyncio from inspect import signature @@ -53,11 +52,11 @@ config = {} # Add startup event to load dependencies @app.on_event("startup") async def startup_event(): - args = Args() - config = args.parse_args() + config = Args() + config.parse_args() ApiDependencies.initialize( - args=args, config=config, event_handler_id=event_handler_id + config=config, event_handler_id=event_handler_id ) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 9dc1429d92..732a233cb4 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -17,7 +17,8 @@ from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_gra from .invocations import * from .invocations.baseinvocation import BaseInvocation from .services.events import EventServiceBase -from .services.generate_initializer import get_generate +from .services.model_manager_initializer import get_model_manager +from .services.restoration_services import RestorationServices from .services.graph import EdgeConnection, GraphExecutionState from .services.image_storage import DiskImageStorage from .services.invocation_queue import MemoryInvocationQueue @@ -126,14 +127,9 @@ def invoke_all(context: CliContext): def invoke_cli(): - args = Args() - config = args.parse_args() - - generate = get_generate(args, config) - - # NOTE: load model on first use, uncomment to load at startup - # TODO: Make this a config option? - # generate.load_model() + config = Args() + config.parse_args() + model_manager = get_model_manager(config) events = EventServiceBase() @@ -145,7 +141,7 @@ def invoke_cli(): db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generate=generate, + model_manager=model_manager, events=events, images=DiskImageStorage(output_folder), queue=MemoryInvocationQueue(), @@ -153,6 +149,7 @@ def invoke_cli(): filename=db_location, table_name="graph_executions" ), processor=DefaultInvocationProcessor(), + restoration=RestorationServices(config), ) invoker = Invoker(services) diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index 15c5f17438..c1a0028293 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -12,12 +12,12 @@ from ..services.image_storage import ImageType from ..services.invocation_services import InvocationServices from .baseinvocation import BaseInvocation, InvocationContext from .image import ImageField, ImageOutput +from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator SAMPLER_NAME_VALUES = Literal[ - "ddim", "plms", "k_lms", "k_dpm_2", "k_dpm_2_a", "k_euler", "k_euler_a", "k_heun" + tuple(InvokeAIGenerator.schedulers()) ] - # Text to image class TextToImageInvocation(BaseInvocation): """Generates an image using text2img.""" @@ -57,19 +57,18 @@ class TextToImageInvocation(BaseInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == "": - self.model = context.services.generate.model_name - - # Set the model (if already cached, this does nothing) - context.services.generate.set_model(self.model) - - results = context.services.generate.prompt2image( + # (right now uses whatever current model is set in model manager) + model= context.services.model_manager.get_model() + outputs = Txt2Img(model).generate( prompt=self.prompt, step_callback=step_callback, **self.dict( exclude={"prompt"} ), # Shorthand for passing all of the parameters above manually ) + # Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object + # each time it is called. We only need the first one. + generate_output = next(outputs) # Results are image and seed, unwrap for now and ignore the seed # TODO: pre-seed? @@ -78,7 +77,7 @@ class TextToImageInvocation(BaseInvocation): image_name = context.services.images.create_name( context.graph_execution_state_id, self.id ) - context.services.images.save(image_type, image_name, results[0][0]) + context.services.images.save(image_type, image_name, generate_output.image) return ImageOutput( image=ImageField(image_type=image_type, image_name=image_name) ) @@ -115,23 +114,20 @@ class ImageToImageInvocation(TextToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == "": - self.model = context.services.generate.model_name - - # Set the model (if already cached, this does nothing) - context.services.generate.set_model(self.model) - - results = context.services.generate.prompt2image( - prompt=self.prompt, - init_img=image, - init_mask=mask, - step_callback=step_callback, - **self.dict( - exclude={"prompt", "image", "mask"} - ), # Shorthand for passing all of the parameters above manually + model = context.services.model_manager.get_model() + generator_output = next( + Img2Img(model).generate( + prompt=self.prompt, + init_img=image, + init_mask=mask, + step_callback=step_callback, + **self.dict( + exclude={"prompt", "image", "mask"} + ), # Shorthand for passing all of the parameters above manually + ) ) - result_image = results[0][0] + result_image = generator_output.image # Results are image and seed, unwrap for now and ignore the seed # TODO: pre-seed? @@ -145,7 +141,6 @@ class ImageToImageInvocation(TextToImageInvocation): image=ImageField(image_type=image_type, image_name=image_name) ) - class InpaintInvocation(ImageToImageInvocation): """Generates an image using inpaint.""" @@ -180,23 +175,20 @@ class InpaintInvocation(ImageToImageInvocation): # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == "": - self.model = context.services.generate.model_name - - # Set the model (if already cached, this does nothing) - context.services.generate.set_model(self.model) - - results = context.services.generate.prompt2image( - prompt=self.prompt, - init_img=image, - init_mask=mask, - step_callback=step_callback, - **self.dict( - exclude={"prompt", "image", "mask"} - ), # Shorthand for passing all of the parameters above manually + manager = context.services.model_manager.get_model() + generator_output = next( + Inpaint(model).generate( + prompt=self.prompt, + init_img=image, + init_mask=mask, + step_callback=step_callback, + **self.dict( + exclude={"prompt", "image", "mask"} + ), # Shorthand for passing all of the parameters above manually + ) ) - result_image = results[0][0] + result_image = generator_output.image # Results are image and seed, unwrap for now and ignore the seed # TODO: pre-seed? diff --git a/invokeai/app/invocations/reconstruct.py b/invokeai/app/invocations/reconstruct.py index a90c33605e..c4d8f3ac7c 100644 --- a/invokeai/app/invocations/reconstruct.py +++ b/invokeai/app/invocations/reconstruct.py @@ -8,7 +8,6 @@ from ..services.invocation_services import InvocationServices from .baseinvocation import BaseInvocation, InvocationContext from .image import ImageField, ImageOutput - class RestoreFaceInvocation(BaseInvocation): """Restores faces in an image.""" #fmt: off @@ -23,7 +22,7 @@ class RestoreFaceInvocation(BaseInvocation): image = context.services.images.get( self.image.image_type, self.image.image_name ) - results = context.services.generate.upscale_and_reconstruct( + results = context.services.restoration.upscale_and_reconstruct( image_list=[[image, 0]], upscale=None, strength=self.strength, # GFPGAN strength diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index dcc39fc9ad..4079877fdb 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -26,7 +26,7 @@ class UpscaleInvocation(BaseInvocation): image = context.services.images.get( self.image.image_type, self.image.image_name ) - results = context.services.generate.upscale_and_reconstruct( + results = context.services.restoration.upscale_and_reconstruct( image_list=[[image, 0]], upscale=(self.level, self.strength), strength=0.0, # GFPGAN strength diff --git a/invokeai/app/services/generate_initializer.py b/invokeai/app/services/generate_initializer.py deleted file mode 100644 index 9801909742..0000000000 --- a/invokeai/app/services/generate_initializer.py +++ /dev/null @@ -1,255 +0,0 @@ -import os -import sys -import traceback -from argparse import Namespace - -import invokeai.version -from invokeai.backend import Generate, ModelManager - -from ...backend import Globals - - -# TODO: most of this code should be split into individual services as the Generate.py code is deprecated -def get_generate(args, config) -> Generate: - if not args.conf: - config_file = os.path.join(Globals.root, "configs", "models.yaml") - if not os.path.exists(config_file): - report_model_error( - args, FileNotFoundError(f"The file {config_file} could not be found.") - ) - - print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}") - print(f'>> InvokeAI runtime directory is "{Globals.root}"') - - # these two lines prevent a horrible warning message from appearing - # when the frozen CLIP tokenizer is imported - import transformers # type: ignore - - transformers.logging.set_verbosity_error() - import diffusers - - diffusers.logging.set_verbosity_error() - - # Loading Face Restoration and ESRGAN Modules - gfpgan, codeformer, esrgan = load_face_restoration(args) - - # normalize the config directory relative to root - if not os.path.isabs(args.conf): - args.conf = os.path.normpath(os.path.join(Globals.root, args.conf)) - - if args.embeddings: - if not os.path.isabs(args.embedding_path): - embedding_path = os.path.normpath( - os.path.join(Globals.root, args.embedding_path) - ) - else: - embedding_path = args.embedding_path - else: - embedding_path = None - - # migrate legacy models - ModelManager.migrate_models() - - # load the infile as a list of lines - if args.infile: - try: - if os.path.isfile(args.infile): - infile = open(args.infile, "r", encoding="utf-8") - elif args.infile == "-": # stdin - infile = sys.stdin - else: - raise FileNotFoundError(f"{args.infile} not found.") - except (FileNotFoundError, IOError) as e: - print(f"{e}. Aborting.") - sys.exit(-1) - - # creating a Generate object: - try: - gen = Generate( - conf=args.conf, - model=args.model, - sampler_name=args.sampler_name, - embedding_path=embedding_path, - full_precision=args.full_precision, - precision=args.precision, - gfpgan=gfpgan, - codeformer=codeformer, - esrgan=esrgan, - free_gpu_mem=args.free_gpu_mem, - safety_checker=args.safety_checker, - max_loaded_models=args.max_loaded_models, - ) - except (FileNotFoundError, TypeError, AssertionError) as e: - report_model_error(opt, e) - except (IOError, KeyError) as e: - print(f"{e}. Aborting.") - sys.exit(-1) - - if args.seamless: - print(">> changed to seamless tiling mode") - - # preload the model - try: - gen.load_model() - except KeyError: - pass - except Exception as e: - report_model_error(args, e) - - # try to autoconvert new models - # autoimport new .ckpt files - if path := args.autoconvert: - gen.model_manager.autoconvert_weights( - conf_path=args.conf, - weights_directory=path, - ) - - return gen - - -def load_face_restoration(opt): - try: - gfpgan, codeformer, esrgan = None, None, None - if opt.restore or opt.esrgan: - from invokeai.backend.restoration import Restoration - - restoration = Restoration() - if opt.restore: - gfpgan, codeformer = restoration.load_face_restore_models( - opt.gfpgan_model_path - ) - else: - print(">> Face restoration disabled") - if opt.esrgan: - esrgan = restoration.load_esrgan(opt.esrgan_bg_tile) - else: - print(">> Upscaling disabled") - else: - print(">> Face restoration and upscaling disabled") - except (ModuleNotFoundError, ImportError): - print(traceback.format_exc(), file=sys.stderr) - print(">> You may need to install the ESRGAN and/or GFPGAN modules") - return gfpgan, codeformer, esrgan - - -def report_model_error(opt: Namespace, e: Exception): - print(f'** An error occurred while attempting to initialize the model: "{str(e)}"') - print( - "** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models." - ) - yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE") - if yes_to_all: - print( - "** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE" - ) - else: - response = input( - "Do you want to run invokeai-configure script to select and/or reinstall models? [y] " - ) - if response.startswith(("n", "N")): - return - - print("invokeai-configure is launching....\n") - - # Match arguments that were set on the CLI - # only the arguments accepted by the configuration script are parsed - root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] - config = ["--config", opt.conf] if opt.conf is not None else [] - previous_args = sys.argv - sys.argv = ["invokeai-configure"] - sys.argv.extend(root_dir) - sys.argv.extend(config) - if yes_to_all is not None: - for arg in yes_to_all.split(): - sys.argv.append(arg) - - from invokeai.frontend.install import invokeai_configure - - invokeai_configure() - # TODO: Figure out how to restart - # print('** InvokeAI will now restart') - # sys.argv = previous_args - # main() # would rather do a os.exec(), but doesn't exist? - # sys.exit(0) - - -# Temporary initializer for Generate until we migrate off of it -def old_get_generate(args, config) -> Generate: - # TODO: Remove the need for globals - from invokeai.backend.globals import Globals - - # alert - setting globals here - Globals.root = os.path.expanduser( - args.root_dir or os.environ.get("INVOKEAI_ROOT") or os.path.abspath(".") - ) - Globals.try_patchmatch = args.patchmatch - - print(f'>> InvokeAI runtime directory is "{Globals.root}"') - - # these two lines prevent a horrible warning message from appearing - # when the frozen CLIP tokenizer is imported - import transformers - - transformers.logging.set_verbosity_error() - - # Loading Face Restoration and ESRGAN Modules - gfpgan, codeformer, esrgan = None, None, None - try: - if config.restore or config.esrgan: - from ldm.invoke.restoration import Restoration - - restoration = Restoration() - if config.restore: - gfpgan, codeformer = restoration.load_face_restore_models( - config.gfpgan_model_path - ) - else: - print(">> Face restoration disabled") - if config.esrgan: - esrgan = restoration.load_esrgan(config.esrgan_bg_tile) - else: - print(">> Upscaling disabled") - else: - print(">> Face restoration and upscaling disabled") - except (ModuleNotFoundError, ImportError): - print(traceback.format_exc(), file=sys.stderr) - print(">> You may need to install the ESRGAN and/or GFPGAN modules") - - # normalize the config directory relative to root - if not os.path.isabs(config.conf): - config.conf = os.path.normpath(os.path.join(Globals.root, config.conf)) - - if config.embeddings: - if not os.path.isabs(config.embedding_path): - embedding_path = os.path.normpath( - os.path.join(Globals.root, config.embedding_path) - ) - else: - embedding_path = None - - # TODO: lazy-initialize this by wrapping it - try: - generate = Generate( - conf=config.conf, - model=config.model, - sampler_name=config.sampler_name, - embedding_path=embedding_path, - full_precision=config.full_precision, - precision=config.precision, - gfpgan=gfpgan, - codeformer=codeformer, - esrgan=esrgan, - free_gpu_mem=config.free_gpu_mem, - safety_checker=config.safety_checker, - max_loaded_models=config.max_loaded_models, - ) - except (FileNotFoundError, TypeError, AssertionError): - # emergency_model_reconfigure() # TODO? - sys.exit(-1) - except (IOError, KeyError) as e: - print(f"{e}. Aborting.") - sys.exit(-1) - - generate.free_gpu_mem = config.free_gpu_mem - - return generate diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 42cbd6c271..7f24c34378 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -1,36 +1,39 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from invokeai.backend import Generate +from invokeai.backend import ModelManager from .events import EventServiceBase from .image_storage import ImageStorageBase +from .restoration_services import RestorationServices from .invocation_queue import InvocationQueueABC from .item_storage import ItemStorageABC - class InvocationServices: """Services that can be used by invocations""" - generate: Generate # TODO: wrap Generate, or split it up from model? events: EventServiceBase images: ImageStorageBase queue: InvocationQueueABC + model_manager: ModelManager + restoration: RestorationServices # NOTE: we must forward-declare any types that include invocations, since invocations can use services graph_execution_manager: ItemStorageABC["GraphExecutionState"] processor: "InvocationProcessorABC" def __init__( - self, - generate: Generate, - events: EventServiceBase, - images: ImageStorageBase, - queue: InvocationQueueABC, - graph_execution_manager: ItemStorageABC["GraphExecutionState"], - processor: "InvocationProcessorABC", + self, + model_manager: ModelManager, + events: EventServiceBase, + images: ImageStorageBase, + queue: InvocationQueueABC, + graph_execution_manager: ItemStorageABC["GraphExecutionState"], + processor: "InvocationProcessorABC", + restoration: RestorationServices, ): - self.generate = generate + self.model_manager = model_manager self.events = events self.images = images self.queue = queue self.graph_execution_manager = graph_execution_manager self.processor = processor + self.restoration = restoration diff --git a/invokeai/app/services/model_manager_initializer.py b/invokeai/app/services/model_manager_initializer.py new file mode 100644 index 0000000000..3ef79f0b7e --- /dev/null +++ b/invokeai/app/services/model_manager_initializer.py @@ -0,0 +1,120 @@ +import os +import sys +import torch +from argparse import Namespace +from invokeai.backend import Args +from omegaconf import OmegaConf +from pathlib import Path + +import invokeai.version +from ...backend import ModelManager +from ...backend.util import choose_precision, choose_torch_device +from ...backend import Globals + +# TODO: Replace with an abstract class base ModelManagerBase +def get_model_manager(config: Args) -> ModelManager: + if not config.conf: + config_file = os.path.join(Globals.root, "configs", "models.yaml") + if not os.path.exists(config_file): + report_model_error( + config, FileNotFoundError(f"The file {config_file} could not be found.") + ) + + print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}") + print(f'>> InvokeAI runtime directory is "{Globals.root}"') + + # these two lines prevent a horrible warning message from appearing + # when the frozen CLIP tokenizer is imported + import transformers # type: ignore + + transformers.logging.set_verbosity_error() + import diffusers + + diffusers.logging.set_verbosity_error() + + # normalize the config directory relative to root + if not os.path.isabs(config.conf): + config.conf = os.path.normpath(os.path.join(Globals.root, config.conf)) + + if config.embeddings: + if not os.path.isabs(config.embedding_path): + embedding_path = os.path.normpath( + os.path.join(Globals.root, config.embedding_path) + ) + else: + embedding_path = config.embedding_path + else: + embedding_path = None + + # migrate legacy models + ModelManager.migrate_models() + + # creating the model manager + try: + device = torch.device(choose_torch_device()) + precision = 'float16' if config.precision=='float16' \ + else 'float32' if config.precision=='float32' \ + else choose_precision(device) + + model_manager = ModelManager( + OmegaConf.load(config.conf), + precision=precision, + device_type=device, + max_loaded_models=config.max_loaded_models, + embedding_path = Path(embedding_path), + ) + except (FileNotFoundError, TypeError, AssertionError) as e: + report_model_error(config, e) + except (IOError, KeyError) as e: + print(f"{e}. Aborting.") + sys.exit(-1) + + # try to autoconvert new models + # autoimport new .ckpt files + if path := config.autoconvert: + model_manager.autoconvert_weights( + conf_path=config.conf, + weights_directory=path, + ) + + return model_manager + +def report_model_error(opt: Namespace, e: Exception): + print(f'** An error occurred while attempting to initialize the model: "{str(e)}"') + print( + "** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models." + ) + yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE") + if yes_to_all: + print( + "** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE" + ) + else: + response = input( + "Do you want to run invokeai-configure script to select and/or reinstall models? [y] " + ) + if response.startswith(("n", "N")): + return + + print("invokeai-configure is launching....\n") + + # Match arguments that were set on the CLI + # only the arguments accepted by the configuration script are parsed + root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] + config = ["--config", opt.conf] if opt.conf is not None else [] + previous_config = sys.argv + sys.argv = ["invokeai-configure"] + sys.argv.extend(root_dir) + sys.argv.extend(config.to_dict()) + if yes_to_all is not None: + for arg in yes_to_all.split(): + sys.argv.append(arg) + + from invokeai.frontend.install import invokeai_configure + + invokeai_configure() + # TODO: Figure out how to restart + # print('** InvokeAI will now restart') + # sys.argv = previous_args + # main() # would rather do a os.exec(), but doesn't exist? + # sys.exit(0) diff --git a/invokeai/app/services/restoration_services.py b/invokeai/app/services/restoration_services.py new file mode 100644 index 0000000000..f5fc687c11 --- /dev/null +++ b/invokeai/app/services/restoration_services.py @@ -0,0 +1,109 @@ +import sys +import traceback +import torch +from ...backend.restoration import Restoration +from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE + +# This should be a real base class for postprocessing functions, +# but right now we just instantiate the existing gfpgan, esrgan +# and codeformer functions. +class RestorationServices: + '''Face restoration and upscaling''' + + def __init__(self,args): + try: + gfpgan, codeformer, esrgan = None, None, None + if args.restore or args.esrgan: + restoration = Restoration() + if args.restore: + gfpgan, codeformer = restoration.load_face_restore_models( + args.gfpgan_model_path + ) + else: + print(">> Face restoration disabled") + if args.esrgan: + esrgan = restoration.load_esrgan(args.esrgan_bg_tile) + else: + print(">> Upscaling disabled") + else: + print(">> Face restoration and upscaling disabled") + except (ModuleNotFoundError, ImportError): + print(traceback.format_exc(), file=sys.stderr) + print(">> You may need to install the ESRGAN and/or GFPGAN modules") + self.device = torch.device(choose_torch_device()) + self.gfpgan = gfpgan + self.codeformer = codeformer + self.esrgan = esrgan + + # note that this one method does gfpgan and codepath reconstruction, as well as + # esrgan upscaling + # TO DO: refactor into separate methods + def upscale_and_reconstruct( + self, + image_list, + facetool="gfpgan", + upscale=None, + upscale_denoise_str=0.75, + strength=0.0, + codeformer_fidelity=0.75, + save_original=False, + image_callback=None, + prefix=None, + ): + results = [] + for r in image_list: + image, seed = r + try: + if strength > 0: + if self.gfpgan is not None or self.codeformer is not None: + if facetool == "gfpgan": + if self.gfpgan is None: + print( + ">> GFPGAN not found. Face restoration is disabled." + ) + else: + image = self.gfpgan.process(image, strength, seed) + if facetool == "codeformer": + if self.codeformer is None: + print( + ">> CodeFormer not found. Face restoration is disabled." + ) + else: + cf_device = ( + CPU_DEVICE if self.device == MPS_DEVICE else self.device + ) + image = self.codeformer.process( + image=image, + strength=strength, + device=cf_device, + seed=seed, + fidelity=codeformer_fidelity, + ) + else: + print(">> Face Restoration is disabled.") + if upscale is not None: + if self.esrgan is not None: + if len(upscale) < 2: + upscale.append(0.75) + image = self.esrgan.process( + image, + upscale[1], + seed, + int(upscale[0]), + denoise_str=upscale_denoise_str, + ) + else: + print(">> ESRGAN is disabled. Image not upscaled.") + except Exception as e: + print( + f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}" + ) + + if image_callback is not None: + image_callback(image, seed, upscaled=True, use_prefix=prefix) + else: + r[0] = image + + results.append([image, seed]) + + return results diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index 06089369c2..06066dd6b1 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -2,6 +2,15 @@ Initialization file for invokeai.backend """ from .generate import Generate +from .generator import ( + InvokeAIGeneratorBasicParams, + InvokeAIGenerator, + InvokeAIGeneratorOutput, + Txt2Img, + Img2Img, + Inpaint +) from .model_management import ModelManager +from .safety_checker import SafetyChecker from .args import Args from .globals import Globals diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py index 35dba41ffb..1b19a1aa7e 100644 --- a/invokeai/backend/generate.py +++ b/invokeai/backend/generate.py @@ -25,18 +25,19 @@ from accelerate.utils import set_seed from diffusers.pipeline_utils import DiffusionPipeline from diffusers.utils.import_utils import is_xformers_available from omegaconf import OmegaConf +from pathlib import Path from .args import metadata_from_png from .generator import infill_methods from .globals import Globals, global_cache_dir from .image_util import InitImageResizer, PngWriter, Txt2Mask, configure_model_padding from .model_management import ModelManager +from .safety_checker import SafetyChecker from .prompting import get_uc_and_c_and_ec from .prompting.conditioning import log_tokenization from .stable_diffusion import HuggingFaceConceptsLibrary from .util import choose_precision, choose_torch_device - def fix_func(orig): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): @@ -222,6 +223,7 @@ class Generate: self.precision, max_loaded_models=max_loaded_models, sequential_offload=self.free_gpu_mem, + embedding_path=Path(self.embedding_path), ) # don't accept invalid models fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME @@ -244,31 +246,8 @@ class Generate: # load safety checker if requested if safety_checker: - try: - print(">> Initializing NSFW checker") - from diffusers.pipelines.stable_diffusion.safety_checker import ( - StableDiffusionSafetyChecker, - ) - from transformers import AutoFeatureExtractor - - safety_model_id = "CompVis/stable-diffusion-safety-checker" - safety_model_path = global_cache_dir("hub") - self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( - safety_model_id, - local_files_only=True, - cache_dir=safety_model_path, - ) - self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained( - safety_model_id, - local_files_only=True, - cache_dir=safety_model_path, - ) - self.safety_checker.to(self.device) - except Exception: - print( - "** An error was encountered while installing the safety checker:" - ) - print(traceback.format_exc()) + print(">> Initializing NSFW checker") + self.safety_checker = SafetyChecker(self.device) else: print(">> NSFW checker is disabled") @@ -523,15 +502,6 @@ class Generate: generator.set_variation(self.seed, variation_amount, with_variations) generator.use_mps_noise = use_mps_noise - checker = ( - { - "checker": self.safety_checker, - "extractor": self.safety_feature_extractor, - } - if self.safety_checker - else None - ) - results = generator.generate( prompt, iterations=iterations, @@ -558,7 +528,7 @@ class Generate: embiggen_strength=embiggen_strength, inpaint_replace=inpaint_replace, mask_blur_radius=mask_blur_radius, - safety_checker=checker, + safety_checker=self.safety_checker, seam_size=seam_size, seam_blur=seam_blur, seam_strength=seam_strength, @@ -940,18 +910,6 @@ class Generate: self.generators = {} set_seed(random.randrange(0, np.iinfo(np.uint32).max)) - if self.embedding_path is not None: - print(f">> Loading embeddings from {self.embedding_path}") - for root, _, files in os.walk(self.embedding_path): - for name in files: - ti_path = os.path.join(root, name) - self.model.textual_inversion_manager.load_textual_inversion( - ti_path, defer_injecting_tokens=True - ) - print( - f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}' - ) - self.model_name = model_name self._set_scheduler() # requires self.model_name to be set first return self.model @@ -998,7 +956,7 @@ class Generate: ): results = [] for r in image_list: - image, seed = r + image, seed, _ = r try: if strength > 0: if self.gfpgan is not None or self.codeformer is not None: diff --git a/invokeai/backend/generator/__init__.py b/invokeai/backend/generator/__init__.py index b01e93ad81..9d6263453a 100644 --- a/invokeai/backend/generator/__init__.py +++ b/invokeai/backend/generator/__init__.py @@ -1,5 +1,13 @@ """ Initialization file for the invokeai.generator package """ -from .base import Generator +from .base import ( + InvokeAIGenerator, + InvokeAIGeneratorBasicParams, + InvokeAIGeneratorOutput, + Txt2Img, + Img2Img, + Inpaint, + Generator, +) from .inpaint import infill_methods diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 881d3deaff..4ec0f9d54f 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -4,11 +4,15 @@ including img2img, txt2img, and inpaint """ from __future__ import annotations +import itertools +import dataclasses +import diffusers import os import random import traceback +from abc import ABCMeta +from argparse import Namespace from contextlib import nullcontext -from pathlib import Path import cv2 import numpy as np @@ -17,13 +21,258 @@ from PIL import Image, ImageChops, ImageFilter from accelerate.utils import set_seed from diffusers import DiffusionPipeline from tqdm import trange +from typing import List, Iterator, Type +from dataclasses import dataclass, field +from diffusers.schedulers import SchedulerMixin as Scheduler -import invokeai.assets.web as web_assets +from ..image_util import configure_model_padding from ..util.util import rand_perlin_2d +from ..safety_checker import SafetyChecker +from ..prompting.conditioning import get_uc_and_c_and_ec +from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline downsampling = 8 -CAUTION_IMG = "caution.png" +@dataclass +class InvokeAIGeneratorBasicParams: + seed: int=None + width: int=512 + height: int=512 + cfg_scale: int=7.5 + steps: int=20 + ddim_eta: float=0.0 + scheduler: int='ddim' + precision: str='float16' + perlin: float=0.0 + threshold: int=0.0 + seamless: bool=False + seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y']) + h_symmetry_time_pct: float=None + v_symmetry_time_pct: float=None + variation_amount: float = 0.0 + with_variations: list=field(default_factory=list) + safety_checker: SafetyChecker=None + +@dataclass +class InvokeAIGeneratorOutput: + ''' + InvokeAIGeneratorOutput is a dataclass that contains the outputs of a generation + operation, including the image, its seed, the model name used to generate the image + and the model hash, as well as all the generate() parameters that went into + generating the image (in .params, also available as attributes) + ''' + image: Image + seed: int + model_hash: str + attention_maps_images: List[Image] + params: Namespace + +# we are interposing a wrapper around the original Generator classes so that +# old code that calls Generate will continue to work. +class InvokeAIGenerator(metaclass=ABCMeta): + scheduler_map = dict( + ddim=diffusers.DDIMScheduler, + dpmpp_2=diffusers.DPMSolverMultistepScheduler, + k_dpm_2=diffusers.KDPM2DiscreteScheduler, + k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler, + k_dpmpp_2=diffusers.DPMSolverMultistepScheduler, + k_euler=diffusers.EulerDiscreteScheduler, + k_euler_a=diffusers.EulerAncestralDiscreteScheduler, + k_heun=diffusers.HeunDiscreteScheduler, + k_lms=diffusers.LMSDiscreteScheduler, + plms=diffusers.PNDMScheduler, + ) + + def __init__(self, + model_info: dict, + params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(), + ): + self.model_info=model_info + self.params=params + + def generate(self, + prompt: str='', + callback: callable=None, + step_callback: callable=None, + iterations: int=1, + **keyword_args, + )->Iterator[InvokeAIGeneratorOutput]: + ''' + Return an iterator across the indicated number of generations. + Each time the iterator is called it will return an InvokeAIGeneratorOutput + object. Use like this: + + outputs = txt2img.generate(prompt='banana sushi', iterations=5) + for result in outputs: + print(result.image, result.seed) + + In the typical case of wanting to get just a single image, iterations + defaults to 1 and do: + + output = next(txt2img.generate(prompt='banana sushi') + + Pass None to get an infinite iterator. + + outputs = txt2img.generate(prompt='banana sushi', iterations=None) + for o in outputs: + print(o.image, o.seed) + + ''' + generator_args = dataclasses.asdict(self.params) + generator_args.update(keyword_args) + + model_info = self.model_info + model_name = model_info['model_name'] + model:StableDiffusionGeneratorPipeline = model_info['model'] + model_hash = model_info['hash'] + scheduler: Scheduler = self.get_scheduler( + model=model, + scheduler_name=generator_args.get('scheduler') + ) + uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model) + gen_class = self._generator_class() + generator = gen_class(model, self.params.precision) + if self.params.variation_amount > 0: + generator.set_variation(generator_args.get('seed'), + generator_args.get('variation_amount'), + generator_args.get('with_variations') + ) + + if isinstance(model, DiffusionPipeline): + for component in [model.unet, model.vae]: + configure_model_padding(component, + generator_args.get('seamless',False), + generator_args.get('seamless_axes') + ) + else: + configure_model_padding(model, + generator_args.get('seamless',False), + generator_args.get('seamless_axes') + ) + + iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1) + for i in iteration_count: + results = generator.generate(prompt, + conditioning=(uc, c, extra_conditioning_info), + sampler=scheduler, + **generator_args, + ) + output = InvokeAIGeneratorOutput( + image=results[0][0], + seed=results[0][1], + attention_maps_images=results[0][2], + model_hash = model_hash, + params=Namespace(model_name=model_name,**generator_args), + ) + if callback: + callback(output) + yield output + + @classmethod + def schedulers(self)->List[str]: + ''' + Return list of all the schedulers that we currently handle. + ''' + return list(self.scheduler_map.keys()) + + def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]): + return generator_class(model, self.params.precision) + + def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler: + scheduler_class = self.scheduler_map.get(scheduler_name,'ddim') + scheduler = scheduler_class.from_config(model.scheduler.config) + # hack copied over from generate.py + if not hasattr(scheduler, 'uses_inpainting_model'): + scheduler.uses_inpainting_model = lambda: False + return scheduler + + @classmethod + def _generator_class(cls)->Type[Generator]: + ''' + In derived classes return the name of the generator to apply. + If you don't override will return the name of the derived + class, which nicely parallels the generator class names. + ''' + return Generator + +# ------------------------------------ +class Txt2Img(InvokeAIGenerator): + @classmethod + def _generator_class(cls): + from .txt2img import Txt2Img + return Txt2Img + +# ------------------------------------ +class Img2Img(InvokeAIGenerator): + def generate(self, + init_image: Image | torch.FloatTensor, + strength: float=0.75, + **keyword_args + )->List[InvokeAIGeneratorOutput]: + return super().generate(init_image=init_image, + strength=strength, + **keyword_args + ) + @classmethod + def _generator_class(cls): + from .img2img import Img2Img + return Img2Img + +# ------------------------------------ +# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff +class Inpaint(Img2Img): + def generate(self, + mask_image: Image | torch.FloatTensor, + # Seam settings - when 0, doesn't fill seam + seam_size: int = 0, + seam_blur: int = 0, + seam_strength: float = 0.7, + seam_steps: int = 10, + tile_size: int = 32, + inpaint_replace=False, + infill_method=None, + inpaint_width=None, + inpaint_height=None, + inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF), + **keyword_args + )->List[InvokeAIGeneratorOutput]: + return super().generate( + mask_image=mask_image, + seam_size=seam_size, + seam_blur=seam_blur, + seam_strength=seam_strength, + seam_steps=seam_steps, + tile_size=tile_size, + inpaint_replace=inpaint_replace, + infill_method=infill_method, + inpaint_width=inpaint_width, + inpaint_height=inpaint_height, + inpaint_fill=inpaint_fill, + **keyword_args + ) + @classmethod + def _generator_class(cls): + from .inpaint import Inpaint + return Inpaint + +# ------------------------------------ +class Embiggen(Txt2Img): + def generate( + self, + embiggen: list=None, + embiggen_tiles: list = None, + strength: float=0.75, + **kwargs)->List[InvokeAIGeneratorOutput]: + return super().generate(embiggen=embiggen, + embiggen_tiles=embiggen_tiles, + strength=strength, + **kwargs) + + @classmethod + def _generator_class(cls): + from .embiggen import Embiggen + return Embiggen + class Generator: downsampling_factor: int @@ -44,7 +293,6 @@ class Generator: self.with_variations = [] self.use_mps_noise = False self.free_gpu_mem = None - self.caution_img = None # this is going to be overridden in img2img.py, txt2img.py and inpaint.py def get_make_image(self, prompt, **kwargs): @@ -64,10 +312,10 @@ class Generator: def generate( self, prompt, - init_image, width, height, sampler, + init_image=None, iterations=1, seed=None, image_callback=None, @@ -76,7 +324,7 @@ class Generator: perlin=0.0, h_symmetry_time_pct=None, v_symmetry_time_pct=None, - safety_checker: dict = None, + safety_checker: SafetyChecker=None, free_gpu_mem: bool = False, **kwargs, ): @@ -130,9 +378,9 @@ class Generator: image = make_image(x_T) if self.safety_checker is not None: - image = self.safety_check(image) + image = self.safety_checker.check(image) - results.append([image, seed]) + results.append([image, seed, attention_maps_images]) if image_callback is not None: attention_maps_image = ( @@ -292,16 +540,6 @@ class Generator: seed = random.randrange(0, np.iinfo(np.uint32).max) return (seed, initial_noise) - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self, width, height): - """ - Returns a tensor filled with random numbers, either form a normal distribution - (txt2img) or from the latent image (img2img, inpaint) - """ - raise NotImplementedError( - "get_noise() must be implemented in a descendent class" - ) - def get_perlin_noise(self, width, height): fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device # limit noise to only the diffusion image channels, not the mask channels @@ -361,53 +599,6 @@ class Generator: return v2 - def safety_check(self, image: Image.Image): - """ - If the CompViz safety checker flags an NSFW image, we - blur it out. - """ - import diffusers - - checker = self.safety_checker["checker"] - extractor = self.safety_checker["extractor"] - features = extractor([image], return_tensors="pt") - features.to(self.model.device) - - # unfortunately checker requires the numpy version, so we have to convert back - x_image = np.array(image).astype(np.float32) / 255.0 - x_image = x_image[None].transpose(0, 3, 1, 2) - - diffusers.logging.set_verbosity_error() - checked_image, has_nsfw_concept = checker( - images=x_image, clip_input=features.pixel_values - ) - if has_nsfw_concept[0]: - print( - "** An image with potential non-safe content has been detected. A blurred image will be returned. **" - ) - return self.blur(image) - else: - return image - - def blur(self, input): - blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) - try: - caution = self.get_caution_img() - if caution: - blurry.paste(caution, (0, 0), caution) - except FileNotFoundError: - pass - return blurry - - def get_caution_img(self): - path = None - if self.caution_img: - return self.caution_img - path = Path(web_assets.__path__[0]) / CAUTION_IMG - caution = Image.open(path) - self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) - return self.caution_img - # this is a handy routine for debugging use. Given a generated sample, # convert it into a PNG image and store it at the indicated path def save_sample(self, sample, filepath): diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 4627f283f5..06b1490c93 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -34,8 +34,7 @@ from picklescan.scanner import scan_file_path from invokeai.backend.globals import Globals, global_cache_dir from ..stable_diffusion import StableDiffusionGeneratorPipeline -from ..util import CPU_DEVICE, ask_user, download_with_resume - +from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume class SDLegacyType(Enum): V1 = 1 @@ -51,23 +50,29 @@ VAE_TO_REPO_ID = { # hack, see note in convert_and_import() } class ModelManager(object): + ''' + Model manager handles loading, caching, importing, deleting, converting, and editing models. + ''' def __init__( - self, - config: OmegaConf, - device_type: torch.device = CPU_DEVICE, - precision: str = "float16", - max_loaded_models=DEFAULT_MAX_MODELS, - sequential_offload=False, + self, + config: OmegaConf|Path, + device_type: torch.device = CUDA_DEVICE, + precision: str = "float16", + max_loaded_models=DEFAULT_MAX_MODELS, + sequential_offload=False, + embedding_path: Path=None, ): """ - Initialize with the path to the models.yaml config file, - the torch device type, and precision. The optional - min_avail_mem argument specifies how much unused system - (CPU) memory to preserve. The cache of models in RAM will - grow until this value is approached. Default is 2G. + Initialize with the path to the models.yaml config file or + an initialized OmegaConf dictionary. Optional parameters + are the torch device type, precision, max_loaded_models, + and sequential_offload boolean. Note that the default device + type and precision are set up for a CUDA system running at half precision. """ # prevent nasty-looking CLIP log message transformers.logging.set_verbosity_error() + if not isinstance(config, DictConfig): + config = OmegaConf.load(config) self.config = config self.precision = precision self.device = torch.device(device_type) @@ -76,6 +81,7 @@ class ModelManager(object): self.stack = [] # this is an LRU FIFO self.current_model = None self.sequential_offload = sequential_offload + self.embedding_path = embedding_path def valid_model(self, model_name: str) -> bool: """ @@ -84,12 +90,15 @@ class ModelManager(object): """ return model_name in self.config - def get_model(self, model_name: str): + def get_model(self, model_name: str=None)->dict: """ Given a model named identified in models.yaml, return the model object. If in RAM will load into GPU VRAM. If on disk, will load from there. """ + if not model_name: + return self.current_model if self.current_model else self.get_model(self.default_model()) + if not self.valid_model(model_name): print( f'** "{model_name}" is not a known model name. Please check your models.yaml file' @@ -112,6 +121,7 @@ class ModelManager(object): else: # we're about to load a new model, so potentially offload the least recently used one requested_model, width, height, hash = self._load_model(model_name) self.models[model_name] = { + "model_name": model_name, "model": requested_model, "width": width, "height": height, @@ -121,6 +131,7 @@ class ModelManager(object): self.current_model = model_name self._push_newest_model(model_name) return { + "model_name": model_name, "model": requested_model, "width": width, "height": height, @@ -425,6 +436,7 @@ class ModelManager(object): height = width print(f" | Default image dimensions = {width} x {height}") + self._add_embeddings_to_model(pipeline) return pipeline, width, height, model_hash @@ -1061,6 +1073,19 @@ class ModelManager(object): self.stack.remove(model_name) self.stack.append(model_name) + def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline): + if self.embedding_path is not None: + print(f">> Loading embeddings from {self.embedding_path}") + for root, _, files in os.walk(self.embedding_path): + for name in files: + ti_path = os.path.join(root, name) + model.textual_inversion_manager.load_textual_inversion( + ti_path, defer_injecting_tokens=True + ) + print( + f'>> Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}' + ) + def _has_cuda(self) -> bool: return self.device.type == "cuda" diff --git a/invokeai/backend/safety_checker.py b/invokeai/backend/safety_checker.py new file mode 100644 index 0000000000..2e6c4fd479 --- /dev/null +++ b/invokeai/backend/safety_checker.py @@ -0,0 +1,82 @@ +''' +SafetyChecker class - checks images against the StabilityAI NSFW filter +and blurs images that contain potential NSFW content. +''' +import diffusers +import numpy as np +import torch +import traceback +from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, +) +from pathlib import Path +from PIL import Image, ImageFilter +from transformers import AutoFeatureExtractor + +import invokeai.assets.web as web_assets +from .globals import global_cache_dir +from .util import CPU_DEVICE + +class SafetyChecker(object): + CAUTION_IMG = "caution.png" + + def __init__(self, device: torch.device): + path = Path(web_assets.__path__[0]) / self.CAUTION_IMG + caution = Image.open(path) + self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) + self.device = device + + try: + safety_model_id = "CompVis/stable-diffusion-safety-checker" + safety_model_path = global_cache_dir("hub") + self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( + safety_model_id, + local_files_only=True, + cache_dir=safety_model_path, + ) + self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained( + safety_model_id, + local_files_only=True, + cache_dir=safety_model_path, + ) + except Exception: + print( + "** An error was encountered while installing the safety checker:" + ) + print(traceback.format_exc()) + + def check(self, image: Image.Image): + """ + Check provided image against the StabilityAI safety checker and return + + """ + + self.safety_checker.to(self.device) + features = self.safety_feature_extractor([image], return_tensors="pt") + features.to(self.device) + + # unfortunately checker requires the numpy version, so we have to convert back + x_image = np.array(image).astype(np.float32) / 255.0 + x_image = x_image[None].transpose(0, 3, 1, 2) + + diffusers.logging.set_verbosity_error() + checked_image, has_nsfw_concept = self.safety_checker( + images=x_image, clip_input=features.pixel_values + ) + self.safety_checker.to(CPU_DEVICE) # offload + if has_nsfw_concept[0]: + print( + "** An image with potential non-safe content has been detected. A blurred image will be returned. **" + ) + return self.blur(image) + else: + return image + + def blur(self, input): + blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) + try: + if caution := self.caution_img: + blurry.paste(caution, (0, 0), caution) + except FileNotFoundError: + pass + return blurry diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index a52d9c10ec..ad3fdaf3ed 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -63,7 +63,14 @@ "back": "Atrás", "statusConvertingModel": "Convertir el modelo", "statusModelConverted": "Modelo adaptado", - "statusMergingModels": "Fusionar modelos" + "statusMergingModels": "Fusionar modelos", + "oceanTheme": "Océano", + "langPortuguese": "Portugués", + "langKorean": "Coreano", + "langHebrew": "Hebreo", + "pinOptionsPanel": "Pin del panel de opciones", + "loading": "Cargando", + "loadingInvokeAI": "Cargando invocar a la IA" }, "gallery": { "generations": "Generaciones", @@ -385,14 +392,19 @@ "modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.", "modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.", "ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados", - "modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.", + "modelMergeHeaderHelp1": "Puede unir hasta tres modelos diferentes para crear una combinación que se adapte a sus necesidades.", "inverseSigmoid": "Sigmoideo inverso", "weightedSum": "Modelo de suma ponderada", "sigmoid": "Función sigmoide", "allModels": "Todos los modelos", "repo_id": "Identificador del repositorio", "pathToCustomConfig": "Ruta a la configuración personalizada", - "customConfig": "Configuración personalizada" + "customConfig": "Configuración personalizada", + "v2_base": "v2 (512px)", + "none": "ninguno", + "pickModelType": "Elige el tipo de modelo", + "v2_768": "v2 (768px)", + "addDifference": "Añadir una diferencia" }, "parameters": { "images": "Imágenes", @@ -588,5 +600,27 @@ "betaDarkenOutside": "Oscurecer fuera", "betaLimitToBox": "Limitar a caja", "betaPreserveMasked": "Preservar área enmascarada" + }, + "accessibility": { + "invokeProgressBar": "Activar la barra de progreso", + "modelSelect": "Seleccionar modelo", + "reset": "Reiniciar", + "uploadImage": "Cargar imagen", + "previousImage": "Imagen anterior", + "nextImage": "Siguiente imagen", + "useThisParameter": "Utiliza este parámetro", + "copyMetadataJson": "Copiar los metadatos JSON", + "exitViewer": "Salir del visor", + "zoomIn": "Acercar", + "zoomOut": "Alejar", + "rotateCounterClockwise": "Girar en sentido antihorario", + "rotateClockwise": "Girar en sentido horario", + "flipHorizontally": "Voltear horizontalmente", + "flipVertically": "Voltear verticalmente", + "modifyConfig": "Modificar la configuración", + "toggleAutoscroll": "Activar el autodesplazamiento", + "toggleLogViewer": "Alternar el visor de registros", + "showGallery": "Mostrar galería", + "showOptionsPanel": "Mostrar el panel de opciones" } } diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 9bdf5b7798..61aa5c6a08 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -63,7 +63,14 @@ "langSimplifiedChinese": "Cinese semplificato", "langDutch": "Olandese", "statusModelConverted": "Modello Convertito", - "statusConvertingModel": "Conversione Modello" + "statusConvertingModel": "Conversione Modello", + "langKorean": "Coreano", + "langPortuguese": "Portoghese", + "pinOptionsPanel": "Blocca il pannello Opzioni", + "loading": "Caricamento in corso", + "oceanTheme": "Oceano", + "langHebrew": "Ebraico", + "loadingInvokeAI": "Caricamento Invoke AI" }, "gallery": { "generations": "Generazioni", @@ -392,7 +399,12 @@ "customSaveLocation": "Ubicazione salvataggio personalizzata", "weightedSum": "Somma pesata", "sigmoid": "Sigmoide", - "inverseSigmoid": "Sigmoide inverso" + "inverseSigmoid": "Sigmoide inverso", + "v2_base": "v2 (512px)", + "v2_768": "v2 (768px)", + "none": "niente", + "addDifference": "Aggiungi differenza", + "pickModelType": "Scegli il tipo di modello" }, "parameters": { "images": "Immagini", @@ -588,5 +600,27 @@ "betaDarkenOutside": "Oscura all'esterno", "betaLimitToBox": "Limita al rettangolo", "betaPreserveMasked": "Conserva quanto mascherato" + }, + "accessibility": { + "modelSelect": "Seleziona modello", + "invokeProgressBar": "Barra di avanzamento generazione", + "uploadImage": "Carica immagine", + "previousImage": "Immagine precedente", + "nextImage": "Immagine successiva", + "useThisParameter": "Usa questo parametro", + "reset": "Reimposta", + "copyMetadataJson": "Copia i metadati JSON", + "exitViewer": "Esci dal visualizzatore", + "zoomIn": "Zoom avanti", + "zoomOut": "Zoom Indietro", + "rotateCounterClockwise": "Ruotare in senso antiorario", + "rotateClockwise": "Ruotare in senso orario", + "flipHorizontally": "Capovolgi orizzontalmente", + "toggleLogViewer": "Attiva/disattiva visualizzatore registro", + "showGallery": "Mostra la galleria immagini", + "showOptionsPanel": "Mostra il pannello opzioni", + "flipVertically": "Capovolgi verticalmente", + "toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico", + "modifyConfig": "Modifica configurazione" } } diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index 39d383e37f..6e26b9ea56 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -63,6 +63,560 @@ "statusGeneratingOutpainting": "Geração de Ampliação", "statusGenerationComplete": "Geração Completa", "statusMergingModels": "Mesclando Modelos", - "statusMergedModels": "Modelos Mesclados" + "statusMergedModels": "Modelos Mesclados", + "oceanTheme": "Oceano", + "pinOptionsPanel": "Fixar painel de opções", + "loading": "A carregar", + "loadingInvokeAI": "A carregar Invoke AI", + "langPortuguese": "Português" + }, + "gallery": { + "galleryImageResetSize": "Resetar Imagem", + "gallerySettings": "Configurações de Galeria", + "maintainAspectRatio": "Mater Proporções", + "autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente", + "pinGallery": "Fixar Galeria", + "singleColumnLayout": "Disposição em Coluna Única", + "allImagesLoaded": "Todas as Imagens Carregadas", + "loadMore": "Carregar Mais", + "noImagesInGallery": "Sem Imagens na Galeria", + "generations": "Gerações", + "showGenerations": "Mostrar Gerações", + "uploads": "Enviados", + "showUploads": "Mostrar Enviados", + "galleryImageSize": "Tamanho da Imagem" + }, + "hotkeys": { + "generalHotkeys": "Atalhos Gerais", + "galleryHotkeys": "Atalhos da Galeria", + "toggleViewer": { + "title": "Ativar Visualizador", + "desc": "Abrir e fechar o Visualizador de Imagens" + }, + "maximizeWorkSpace": { + "desc": "Fechar painéis e maximixar área de trabalho", + "title": "Maximizar a Área de Trabalho" + }, + "changeTabs": { + "title": "Mudar Guias", + "desc": "Trocar para outra área de trabalho" + }, + "consoleToggle": { + "desc": "Abrir e fechar console", + "title": "Ativar Console" + }, + "setPrompt": { + "title": "Definir Prompt", + "desc": "Usar o prompt da imagem atual" + }, + "sendToImageToImage": { + "desc": "Manda a imagem atual para Imagem Para Imagem", + "title": "Mandar para Imagem Para Imagem" + }, + "previousImage": { + "desc": "Mostra a imagem anterior na galeria", + "title": "Imagem Anterior" + }, + "nextImage": { + "title": "Próxima Imagem", + "desc": "Mostra a próxima imagem na galeria" + }, + "decreaseGalleryThumbSize": { + "desc": "Diminui o tamanho das thumbs na galeria", + "title": "Diminuir Tamanho da Galeria de Imagem" + }, + "selectBrush": { + "title": "Selecionar Pincel", + "desc": "Seleciona o pincel" + }, + "selectEraser": { + "title": "Selecionar Apagador", + "desc": "Seleciona o apagador" + }, + "decreaseBrushSize": { + "title": "Diminuir Tamanho do Pincel", + "desc": "Diminui o tamanho do pincel/apagador" + }, + "increaseBrushOpacity": { + "desc": "Aumenta a opacidade do pincel", + "title": "Aumentar Opacidade do Pincel" + }, + "moveTool": { + "title": "Ferramenta Mover", + "desc": "Permite navegar pela tela" + }, + "decreaseBrushOpacity": { + "desc": "Diminui a opacidade do pincel", + "title": "Diminuir Opacidade do Pincel" + }, + "toggleSnap": { + "title": "Ativar Encaixe", + "desc": "Ativa Encaixar na Grade" + }, + "quickToggleMove": { + "title": "Ativar Mover Rapidamente", + "desc": "Temporariamente ativa o modo Mover" + }, + "toggleLayer": { + "title": "Ativar Camada", + "desc": "Ativa a seleção de camada de máscara/base" + }, + "clearMask": { + "title": "Limpar Máscara", + "desc": "Limpa toda a máscara" + }, + "hideMask": { + "title": "Esconder Máscara", + "desc": "Esconde e Revela a máscara" + }, + "mergeVisible": { + "title": "Fundir Visível", + "desc": "Fundir todas as camadas visíveis das telas" + }, + "downloadImage": { + "desc": "Descarregar a tela atual", + "title": "Descarregar Imagem" + }, + "undoStroke": { + "title": "Desfazer Traço", + "desc": "Desfaz um traço de pincel" + }, + "redoStroke": { + "title": "Refazer Traço", + "desc": "Refaz o traço de pincel" + }, + "keyboardShortcuts": "Atalhos de Teclado", + "appHotkeys": "Atalhos do app", + "invoke": { + "title": "Invocar", + "desc": "Gerar uma imagem" + }, + "cancel": { + "title": "Cancelar", + "desc": "Cancelar geração de imagem" + }, + "focusPrompt": { + "title": "Foco do Prompt", + "desc": "Foco da área de texto do prompt" + }, + "toggleOptions": { + "title": "Ativar Opções", + "desc": "Abrir e fechar o painel de opções" + }, + "pinOptions": { + "title": "Fixar Opções", + "desc": "Fixar o painel de opções" + }, + "closePanels": { + "title": "Fechar Painéis", + "desc": "Fecha os painéis abertos" + }, + "unifiedCanvasHotkeys": "Atalhos da Tela Unificada", + "toggleGallery": { + "title": "Ativar Galeria", + "desc": "Abrir e fechar a gaveta da galeria" + }, + "setSeed": { + "title": "Definir Seed", + "desc": "Usar seed da imagem atual" + }, + "setParameters": { + "title": "Definir Parâmetros", + "desc": "Usar todos os parâmetros da imagem atual" + }, + "restoreFaces": { + "title": "Restaurar Rostos", + "desc": "Restaurar a imagem atual" + }, + "upscale": { + "title": "Redimensionar", + "desc": "Redimensionar a imagem atual" + }, + "showInfo": { + "title": "Mostrar Informações", + "desc": "Mostrar metadados de informações da imagem atual" + }, + "deleteImage": { + "title": "Apagar Imagem", + "desc": "Apaga a imagem atual" + }, + "toggleGalleryPin": { + "title": "Ativar Fixar Galeria", + "desc": "Fixa e desafixa a galeria na interface" + }, + "increaseGalleryThumbSize": { + "title": "Aumentar Tamanho da Galeria de Imagem", + "desc": "Aumenta o tamanho das thumbs na galeria" + }, + "increaseBrushSize": { + "title": "Aumentar Tamanho do Pincel", + "desc": "Aumenta o tamanho do pincel/apagador" + }, + "fillBoundingBox": { + "title": "Preencher Caixa Delimitadora", + "desc": "Preenche a caixa delimitadora com a cor do pincel" + }, + "eraseBoundingBox": { + "title": "Apagar Caixa Delimitadora", + "desc": "Apaga a área da caixa delimitadora" + }, + "colorPicker": { + "title": "Selecionar Seletor de Cor", + "desc": "Seleciona o seletor de cores" + }, + "showHideBoundingBox": { + "title": "Mostrar/Esconder Caixa Delimitadora", + "desc": "Ativa a visibilidade da caixa delimitadora" + }, + "saveToGallery": { + "title": "Gravara Na Galeria", + "desc": "Grava a tela atual na galeria" + }, + "copyToClipboard": { + "title": "Copiar para a Área de Transferência", + "desc": "Copia a tela atual para a área de transferência" + }, + "resetView": { + "title": "Resetar Visualização", + "desc": "Reseta Visualização da Tela" + }, + "previousStagingImage": { + "title": "Imagem de Preparação Anterior", + "desc": "Área de Imagem de Preparação Anterior" + }, + "nextStagingImage": { + "title": "Próxima Imagem de Preparação Anterior", + "desc": "Próxima Área de Imagem de Preparação Anterior" + }, + "acceptStagingImage": { + "title": "Aceitar Imagem de Preparação Anterior", + "desc": "Aceitar Área de Imagem de Preparação Anterior" + } + }, + "modelManager": { + "modelAdded": "Modelo Adicionado", + "modelUpdated": "Modelo Atualizado", + "modelEntryDeleted": "Entrada de modelo excluída", + "description": "Descrição", + "modelLocationValidationMsg": "Caminho para onde o seu modelo está localizado.", + "repo_id": "Repo ID", + "vaeRepoIDValidationMsg": "Repositório Online do seu VAE", + "width": "Largura", + "widthValidationMsg": "Largura padrão do seu modelo.", + "height": "Altura", + "heightValidationMsg": "Altura padrão do seu modelo.", + "findModels": "Encontrar Modelos", + "scanAgain": "Digitalize Novamente", + "deselectAll": "Deselecionar Tudo", + "showExisting": "Mostrar Existente", + "deleteConfig": "Apagar Config", + "convertToDiffusersHelpText6": "Deseja converter este modelo?", + "mergedModelName": "Nome do modelo mesclado", + "alpha": "Alpha", + "interpolationType": "Tipo de Interpolação", + "modelMergeHeaderHelp1": "Pode mesclar até três modelos diferentes para criar uma mistura que atenda às suas necessidades.", + "modelMergeHeaderHelp2": "Apenas Diffusers estão disponíveis para mesclagem. Se deseja mesclar um modelo de checkpoint, por favor, converta-o para Diffusers primeiro.", + "modelMergeInterpAddDifferenceHelp": "Neste modo, o Modelo 3 é primeiro subtraído do Modelo 2. A versão resultante é mesclada com o Modelo 1 com a taxa alpha definida acima.", + "nameValidationMsg": "Insira um nome para o seu modelo", + "descriptionValidationMsg": "Adicione uma descrição para o seu modelo", + "config": "Configuração", + "modelExists": "Modelo Existe", + "selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo", + "noModelsFound": "Nenhum Modelo Encontrado", + "v2_768": "v2 (768px)", + "inpainting": "v1 Inpainting", + "customConfig": "Configuração personalizada", + "pathToCustomConfig": "Caminho para configuração personalizada", + "statusConverting": "A converter", + "modelConverted": "Modelo Convertido", + "ignoreMismatch": "Ignorar Divergências entre Modelos Selecionados", + "addDifference": "Adicionar diferença", + "pickModelType": "Escolha o tipo de modelo", + "safetensorModels": "SafeTensors", + "cannotUseSpaces": "Não pode usar espaços", + "addNew": "Adicionar Novo", + "addManually": "Adicionar Manualmente", + "manual": "Manual", + "name": "Nome", + "configValidationMsg": "Caminho para o ficheiro de configuração do seu modelo.", + "modelLocation": "Localização do modelo", + "repoIDValidationMsg": "Repositório Online do seu Modelo", + "updateModel": "Atualizar Modelo", + "availableModels": "Modelos Disponíveis", + "load": "Carregar", + "active": "Ativado", + "notLoaded": "Não carregado", + "deleteModel": "Apagar modelo", + "deleteMsg1": "Tem certeza de que deseja apagar esta entrada do modelo de InvokeAI?", + "deleteMsg2": "Isso não vai apagar o ficheiro de modelo checkpoint do seu disco. Pode lê-los, se desejar.", + "convertToDiffusers": "Converter para Diffusers", + "convertToDiffusersHelpText1": "Este modelo será convertido ao formato 🧨 Diffusers.", + "convertToDiffusersHelpText2": "Este processo irá substituir a sua entrada de Gestor de Modelos por uma versão Diffusers do mesmo modelo.", + "convertToDiffusersHelpText3": "O seu ficheiro de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Pode adicionar o seu ponto de verificação ao Gestor de modelos novamente, se desejar.", + "convertToDiffusersSaveLocation": "Local para Gravar", + "v2_base": "v2 (512px)", + "mergeModels": "Mesclar modelos", + "modelOne": "Modelo 1", + "modelTwo": "Modelo 2", + "modelThree": "Modelo 3", + "mergedModelSaveLocation": "Local de Salvamento", + "merge": "Mesclar", + "modelsMerged": "Modelos mesclados", + "mergedModelCustomSaveLocation": "Caminho Personalizado", + "invokeAIFolder": "Pasta Invoke AI", + "inverseSigmoid": "Sigmóide Inversa", + "none": "nenhum", + "modelManager": "Gerente de Modelo", + "model": "Modelo", + "allModels": "Todos os Modelos", + "checkpointModels": "Checkpoints", + "diffusersModels": "Diffusers", + "addNewModel": "Adicionar Novo modelo", + "addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor", + "addDiffuserModel": "Adicionar Diffusers", + "vaeLocation": "Localização VAE", + "vaeLocationValidationMsg": "Caminho para onde o seu VAE está localizado.", + "vaeRepoID": "VAE Repo ID", + "addModel": "Adicionar Modelo", + "search": "Procurar", + "cached": "Em cache", + "checkpointFolder": "Pasta de Checkpoint", + "clearCheckpointFolder": "Apagar Pasta de Checkpoint", + "modelsFound": "Modelos Encontrados", + "selectFolder": "Selecione a Pasta", + "selected": "Selecionada", + "selectAll": "Selecionar Tudo", + "addSelected": "Adicione Selecionado", + "delete": "Apagar", + "formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers", + "formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.", + "formMessageDiffusersVAELocation": "Localização do VAE", + "formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo ficheiro VAE dentro do local do modelo.", + "convert": "Converter", + "convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, a depender das especificações do seu computador.", + "convertToDiffusersHelpText5": "Por favor, certifique-se de que tenha espaço suficiente no disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.", + "v1": "v1", + "sameFolder": "Mesma pasta", + "invokeRoot": "Pasta do InvokeAI", + "custom": "Personalizado", + "customSaveLocation": "Local de salvamento personalizado", + "modelMergeAlphaHelp": "Alpha controla a força da mistura dos modelos. Valores de alpha mais baixos resultam numa influência menor do segundo modelo.", + "sigmoid": "Sigmóide", + "weightedSum": "Soma Ponderada" + }, + "parameters": { + "width": "Largura", + "seed": "Seed", + "hiresStrength": "Força da Alta Resolução", + "negativePrompts": "Indicações negativas", + "general": "Geral", + "randomizeSeed": "Seed Aleatório", + "shuffle": "Embaralhar", + "noiseThreshold": "Limite de Ruído", + "perlinNoise": "Ruído de Perlin", + "variations": "Variatções", + "seedWeights": "Pesos da Seed", + "restoreFaces": "Restaurar Rostos", + "faceRestoration": "Restauração de Rosto", + "type": "Tipo", + "denoisingStrength": "A força de remoção de ruído", + "scale": "Escala", + "otherOptions": "Outras Opções", + "seamlessTiling": "Ladrilho Sem Fronteira", + "hiresOptim": "Otimização de Alta Res", + "imageFit": "Caber Imagem Inicial No Tamanho de Saída", + "codeformerFidelity": "Fidelidade", + "seamSize": "Tamanho da Fronteira", + "seamBlur": "Desfoque da Fronteira", + "seamStrength": "Força da Fronteira", + "seamSteps": "Passos da Fronteira", + "tileSize": "Tamanho do Ladrilho", + "boundingBoxHeader": "Caixa Delimitadora", + "seamCorrectionHeader": "Correção de Fronteira", + "infillScalingHeader": "Preencimento e Escala", + "img2imgStrength": "Força de Imagem Para Imagem", + "toggleLoopback": "Ativar Loopback", + "symmetry": "Simetria", + "promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)", + "sendTo": "Mandar para", + "openInViewer": "Abrir No Visualizador", + "closeViewer": "Fechar Visualizador", + "usePrompt": "Usar Prompt", + "deleteImage": "Apagar Imagem", + "initialImage": "Imagem inicial", + "showOptionsPanel": "Mostrar Painel de Opções", + "strength": "Força", + "upscaling": "Redimensionando", + "upscale": "Redimensionar", + "upscaleImage": "Redimensionar Imagem", + "scaleBeforeProcessing": "Escala Antes do Processamento", + "invoke": "Invocar", + "images": "Imagems", + "steps": "Passos", + "cfgScale": "Escala CFG", + "height": "Altura", + "sampler": "Amostrador", + "imageToImage": "Imagem para Imagem", + "variationAmount": "Quntidade de Variatções", + "scaledWidth": "L Escalada", + "scaledHeight": "A Escalada", + "infillMethod": "Método de Preenchimento", + "hSymmetryStep": "H Passo de Simetria", + "vSymmetryStep": "V Passo de Simetria", + "cancel": { + "immediate": "Cancelar imediatamente", + "schedule": "Cancelar após a iteração atual", + "isScheduled": "A cancelar", + "setType": "Definir tipo de cancelamento" + }, + "sendToImg2Img": "Mandar para Imagem Para Imagem", + "sendToUnifiedCanvas": "Mandar para Tela Unificada", + "copyImage": "Copiar imagem", + "copyImageToLink": "Copiar Imagem Para a Ligação", + "downloadImage": "Descarregar Imagem", + "useSeed": "Usar Seed", + "useAll": "Usar Todos", + "useInitImg": "Usar Imagem Inicial", + "info": "Informações" + }, + "settings": { + "confirmOnDelete": "Confirmar Antes de Apagar", + "displayHelpIcons": "Mostrar Ícones de Ajuda", + "useCanvasBeta": "Usar Layout de Telas Beta", + "enableImageDebugging": "Ativar Depuração de Imagem", + "useSlidersForAll": "Usar deslizadores para todas as opções", + "resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.", + "models": "Modelos", + "displayInProgress": "Mostrar Progresso de Imagens Em Andamento", + "saveSteps": "Gravar imagens a cada n passos", + "resetWebUI": "Reiniciar Interface", + "resetWebUIDesc2": "Se as imagens não estão a aparecer na galeria ou algo mais não está a funcionar, favor tentar reiniciar antes de postar um problema no GitHub.", + "resetComplete": "A interface foi reiniciada. Atualize a página para carregar." + }, + "toast": { + "uploadFailed": "Envio Falhou", + "uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez", + "uploadFailedUnableToLoadDesc": "Não foj possível carregar o ficheiro", + "downloadImageStarted": "Download de Imagem Começou", + "imageNotLoadedDesc": "Nenhuma imagem encontrada a enviar para o módulo de imagem para imagem", + "imageLinkCopied": "Ligação de Imagem Copiada", + "imageNotLoaded": "Nenhuma Imagem Carregada", + "parametersFailed": "Problema ao carregar parâmetros", + "parametersFailedDesc": "Não foi possível carregar imagem incial.", + "seedSet": "Seed Definida", + "upscalingFailed": "Redimensionamento Falhou", + "promptNotSet": "Prompt Não Definido", + "tempFoldersEmptied": "Pasta de Ficheiros Temporários Esvaziada", + "imageCopied": "Imagem Copiada", + "imageSavedToGallery": "Imagem Salva na Galeria", + "canvasMerged": "Tela Fundida", + "sentToImageToImage": "Mandar Para Imagem Para Imagem", + "sentToUnifiedCanvas": "Enviada para a Tela Unificada", + "parametersSet": "Parâmetros Definidos", + "parametersNotSet": "Parâmetros Não Definidos", + "parametersNotSetDesc": "Nenhum metadado foi encontrado para essa imagem.", + "seedNotSet": "Seed Não Definida", + "seedNotSetDesc": "Não foi possível achar a seed para a imagem.", + "promptSet": "Prompt Definido", + "promptNotSetDesc": "Não foi possível achar prompt para essa imagem.", + "faceRestoreFailed": "Restauração de Rosto Falhou", + "metadataLoadFailed": "Falha ao tentar carregar metadados", + "initialImageSet": "Imagem Inicial Definida", + "initialImageNotSet": "Imagem Inicial Não Definida", + "initialImageNotSetDesc": "Não foi possível carregar imagem incial" + }, + "tooltip": { + "feature": { + "prompt": "Este é o campo de prompt. O prompt inclui objetos de geração e termos estilísticos. Também pode adicionar peso (importância do token) no prompt, mas comandos e parâmetros de CLI não funcionarão.", + "other": "Essas opções ativam modos alternativos de processamento para o Invoke. 'Seamless tiling' criará padrões repetidos na saída. 'High resolution' é uma geração em duas etapas com img2img: use essa configuração quando desejar uma imagem maior e mais coerente sem artefatos. Levará mais tempo do que o txt2img usual.", + "seed": "O valor da semente afeta o ruído inicial a partir do qual a imagem é formada. Pode usar as sementes já existentes de imagens anteriores. 'Limiar de ruído' é usado para mitigar artefatos em valores CFG altos (experimente a faixa de 0-10) e o Perlin para adicionar ruído Perlin durante a geração: ambos servem para adicionar variação às suas saídas.", + "imageToImage": "Image to Image carrega qualquer imagem como inicial, que é então usada para gerar uma nova junto com o prompt. Quanto maior o valor, mais a imagem resultante mudará. Valores de 0.0 a 1.0 são possíveis, a faixa recomendada é de 0.25 a 0.75", + "faceCorrection": "Correção de rosto com GFPGAN ou Codeformer: o algoritmo detecta rostos na imagem e corrige quaisquer defeitos. Um valor alto mudará mais a imagem, a resultar em rostos mais atraentes. Codeformer com uma fidelidade maior preserva a imagem original às custas de uma correção de rosto mais forte.", + "seamCorrection": "Controla o tratamento das emendas visíveis que ocorrem entre as imagens geradas no canvas.", + "gallery": "A galeria exibe as gerações da pasta de saída conforme elas são criadas. As configurações são armazenadas em ficheiros e acessadas pelo menu de contexto.", + "variations": "Experimente uma variação com um valor entre 0,1 e 1,0 para mudar o resultado para uma determinada semente. Variações interessantes da semente estão entre 0,1 e 0,3.", + "upscale": "Use o ESRGAN para ampliar a imagem imediatamente após a geração.", + "boundingBox": "A caixa delimitadora é a mesma que as configurações de largura e altura para Texto para Imagem ou Imagem para Imagem. Apenas a área na caixa será processada.", + "infillAndScaling": "Gira os métodos de preenchimento (usados em áreas mascaradas ou apagadas do canvas) e a escala (útil para tamanhos de caixa delimitadora pequenos)." + } + }, + "unifiedCanvas": { + "emptyTempImagesFolderMessage": "Esvaziar a pasta de ficheiros de imagem temporários também reseta completamente a Tela Unificada. Isso inclui todo o histórico de desfazer/refazer, imagens na área de preparação e a camada base da tela.", + "scaledBoundingBox": "Caixa Delimitadora Escalada", + "boundingBoxPosition": "Posição da Caixa Delimitadora", + "next": "Próximo", + "accept": "Aceitar", + "showHide": "Mostrar/Esconder", + "discardAll": "Descartar Todos", + "betaClear": "Limpar", + "betaDarkenOutside": "Escurecer Externamente", + "base": "Base", + "brush": "Pincel", + "showIntermediates": "Mostrar Intermediários", + "showGrid": "Mostrar Grade", + "clearCanvasHistoryConfirm": "Tem certeza que quer limpar o histórico de tela?", + "boundingBox": "Caixa Delimitadora", + "canvasDimensions": "Dimensões da Tela", + "canvasPosition": "Posição da Tela", + "cursorPosition": "Posição do cursor", + "previous": "Anterior", + "betaLimitToBox": "Limitar á Caixa", + "layer": "Camada", + "mask": "Máscara", + "maskingOptions": "Opções de Mascaramento", + "enableMask": "Ativar Máscara", + "preserveMaskedArea": "Preservar Área da Máscara", + "clearMask": "Limpar Máscara", + "eraser": "Apagador", + "fillBoundingBox": "Preencher Caixa Delimitadora", + "eraseBoundingBox": "Apagar Caixa Delimitadora", + "colorPicker": "Seletor de Cor", + "brushOptions": "Opções de Pincel", + "brushSize": "Tamanho", + "move": "Mover", + "resetView": "Resetar Visualização", + "mergeVisible": "Fundir Visível", + "saveToGallery": "Gravar na Galeria", + "copyToClipboard": "Copiar para a Área de Transferência", + "downloadAsImage": "Descarregar Como Imagem", + "undo": "Desfazer", + "redo": "Refazer", + "clearCanvas": "Limpar Tela", + "canvasSettings": "Configurações de Tela", + "snapToGrid": "Encaixar na Grade", + "darkenOutsideSelection": "Escurecer Seleção Externa", + "autoSaveToGallery": "Gravar Automaticamente na Galeria", + "saveBoxRegionOnly": "Gravar Apenas a Região da Caixa", + "limitStrokesToBox": "Limitar Traços à Caixa", + "showCanvasDebugInfo": "Mostrar Informações de Depuração daTela", + "clearCanvasHistory": "Limpar o Histórico da Tela", + "clearHistory": "Limpar Históprico", + "clearCanvasHistoryMessage": "Limpar o histórico de tela deixa a sua tela atual intacta, mas limpa de forma irreversível o histórico de desfazer e refazer.", + "emptyTempImageFolder": "Esvaziar a Pasta de Ficheiros de Imagem Temporários", + "emptyFolder": "Esvaziar Pasta", + "emptyTempImagesFolderConfirm": "Tem certeza que quer esvaziar a pasta de ficheiros de imagem temporários?", + "activeLayer": "Camada Ativa", + "canvasScale": "Escala da Tela", + "betaPreserveMasked": "Preservar Máscarado" + }, + "accessibility": { + "invokeProgressBar": "Invocar barra de progresso", + "reset": "Repôr", + "nextImage": "Próxima imagem", + "useThisParameter": "Usar este parâmetro", + "copyMetadataJson": "Copiar metadados JSON", + "zoomIn": "Ampliar", + "zoomOut": "Reduzir", + "rotateCounterClockwise": "Girar no sentido anti-horário", + "rotateClockwise": "Girar no sentido horário", + "flipVertically": "Espelhar verticalmente", + "modifyConfig": "Modificar config", + "toggleAutoscroll": "Alternar rolagem automática", + "showGallery": "Mostrar galeria", + "showOptionsPanel": "Mostrar painel de opções", + "uploadImage": "Enviar imagem", + "previousImage": "Imagem anterior", + "flipHorizontally": "Espelhar horizontalmente", + "toggleLogViewer": "Alternar visualizador de registo" } } diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index 7d433aa430..18b7ab57e1 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -63,7 +63,10 @@ "statusMergingModels": "Mesclando Modelos", "statusMergedModels": "Modelos Mesclados", "langRussian": "Russo", - "langSpanish": "Espanhol" + "langSpanish": "Espanhol", + "pinOptionsPanel": "Fixar painel de opções", + "loadingInvokeAI": "Carregando Invoke AI", + "loading": "Carregando" }, "gallery": { "generations": "Gerações", diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index e08540809b..d4178119e4 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -46,7 +46,15 @@ "statusLoadingModel": "Загрузка модели", "statusModelChanged": "Модель изменена", "githubLabel": "Github", - "discordLabel": "Discord" + "discordLabel": "Discord", + "statusMergingModels": "Слияние моделей", + "statusModelConverted": "Модель сконвертирована", + "statusMergedModels": "Модели объединены", + "pinOptionsPanel": "Закрепить панель настроек", + "loading": "Загрузка", + "loadingInvokeAI": "Загрузка Invoke AI", + "back": "Назад", + "statusConvertingModel": "Конвертация модели" }, "gallery": { "generations": "Генерации", @@ -323,7 +331,30 @@ "deleteConfig": "Удалить конфигурацию", "deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?", "deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.", - "repoIDValidationMsg": "Онлайн-репозиторий модели" + "repoIDValidationMsg": "Онлайн-репозиторий модели", + "convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 4 – 7 Гб.", + "invokeAIFolder": "Каталог InvokeAI", + "ignoreMismatch": "Игнорировать несоответствия между выбранными моделями", + "addCheckpointModel": "Добавить модель Checkpoint/Safetensor", + "formMessageDiffusersModelLocationDesc": "Укажите хотя бы одно.", + "convertToDiffusersHelpText3": "Файл модели на диске НЕ будет удалён или изменён. Вы сможете заново добавить его в Model Manager при необходимости.", + "vaeRepoID": "ID репозитория VAE", + "mergedModelName": "Название объединенной модели", + "checkpointModels": "Checkpoints", + "allModels": "Все модели", + "addDiffuserModel": "Добавить Diffusers", + "repo_id": "ID репозитория", + "formMessageDiffusersVAELocationDesc": "Если не указано, InvokeAI будет искать файл VAE рядом с моделью.", + "convert": "Преобразовать", + "convertToDiffusers": "Преобразовать в Diffusers", + "convertToDiffusersHelpText1": "Модель будет преобразована в формат 🧨 Diffusers.", + "convertToDiffusersHelpText4": "Это единоразовое действие. Оно может занять 30—60 секунд в зависимости от характеристик вашего компьютера.", + "convertToDiffusersHelpText6": "Вы хотите преобразовать эту модель?", + "statusConverting": "Преобразование", + "modelConverted": "Модель преобразована", + "invokeRoot": "Каталог InvokeAI", + "modelsMerged": "Модели объединены", + "mergeModels": "Объединить модели" }, "parameters": { "images": "Изображения", @@ -503,5 +534,8 @@ "betaDarkenOutside": "Затемнить снаружи", "betaLimitToBox": "Ограничить выделением", "betaPreserveMasked": "Сохранять маскируемую область" + }, + "accessibility": { + "modelSelect": "Выбор модели" } } diff --git a/invokeai/frontend/web/public/locales/zh_Hant.json b/invokeai/frontend/web/public/locales/zh_Hant.json index b0ae670e01..af7b0cf328 100644 --- a/invokeai/frontend/web/public/locales/zh_Hant.json +++ b/invokeai/frontend/web/public/locales/zh_Hant.json @@ -19,6 +19,21 @@ "discordLabel": "Discord", "nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。", "reportBugLabel": "回報錯誤", - "githubLabel": "GitHub" + "githubLabel": "GitHub", + "langKorean": "韓語", + "langPortuguese": "葡萄牙語", + "hotkeysLabel": "快捷鍵", + "languagePickerLabel": "切換語言", + "langDutch": "荷蘭語", + "langFrench": "法語", + "langGerman": "德語", + "langItalian": "義大利語", + "langJapanese": "日語", + "langPolish": "波蘭語", + "langBrPortuguese": "巴西葡萄牙語", + "langRussian": "俄語", + "langSpanish": "西班牙語", + "text2img": "文字到圖像", + "unifiedCanvas": "統一畫布" } } diff --git a/static/dream_web/favicon.ico b/static/dream_web/favicon.ico new file mode 100644 index 0000000000..51eb844a6a Binary files /dev/null and b/static/dream_web/favicon.ico differ diff --git a/static/dream_web/index.css b/static/dream_web/index.css new file mode 100644 index 0000000000..25a0994a3d --- /dev/null +++ b/static/dream_web/index.css @@ -0,0 +1,179 @@ +:root { + --fields-dark:#DCDCDC; + --fields-light:#F5F5F5; +} + +* { + font-family: 'Arial'; + font-size: 100%; +} +body { + font-size: 1em; +} +textarea { + font-size: 0.95em; +} +header, form, #progress-section { + margin-left: auto; + margin-right: auto; + max-width: 1024px; + text-align: center; +} +fieldset { + border: none; + line-height: 2.2em; +} +fieldset > legend { + width: auto; + margin-left: 0; + margin-right: auto; + font-weight:bold; +} +select, input { + margin-right: 10px; + padding: 2px; +} +input:disabled { + cursor:auto; +} +input[type=submit] { + cursor: pointer; + background-color: #666; + color: white; +} +input[type=checkbox] { + cursor: pointer; + margin-right: 0px; + width: 20px; + height: 20px; + vertical-align: middle; +} +input#seed { + margin-right: 0px; +} +div { + padding: 10px 10px 10px 10px; +} +header { + margin-bottom: 16px; +} +header h1 { + margin-bottom: 0; + font-size: 2em; +} +#search-box { + display: flex; +} +#scaling-inprocess-message { + font-weight: bold; + font-style: italic; + display: none; +} +#prompt { + flex-grow: 1; + padding: 5px 10px 5px 10px; + border: 1px solid #999; + outline: none; +} +#submit { + padding: 5px 10px 5px 10px; + border: 1px solid #999; +} +#reset-all, #remove-image { + margin-top: 12px; + font-size: 0.8em; + background-color: pink; + border: 1px solid #999; + border-radius: 4px; +} +#results { + text-align: center; + margin: auto; + padding-top: 10px; +} +#results figure { + display: inline-block; + margin: 10px; +} +#results figcaption { + font-size: 0.8em; + padding: 3px; + color: #888; + cursor: pointer; +} +#results img { + border-radius: 5px; + object-fit: contain; + background-color: var(--fields-dark); +} +#fieldset-config { + line-height:2em; +} +input[type="number"] { + width: 60px; +} +#seed { + width: 150px; +} +button#reset-seed { + font-size: 1.7em; + background: #efefef; + border: 1px solid #999; + border-radius: 4px; + line-height: 0.8; + margin: 0 10px 0 0; + padding: 0 5px 3px; + vertical-align: middle; +} +label { + white-space: nowrap; +} +#progress-section { + display: none; +} +#progress-image { + width: 30vh; + height: 30vh; + object-fit: contain; + background-color: var(--fields-dark); +} +#cancel-button { + cursor: pointer; + color: red; +} +#txt2img { + background-color: var(--fields-dark); +} +#variations { + background-color: var(--fields-light); +} +#initimg { + background-color: var(--fields-dark); +} +#img2img { + background-color: var(--fields-light); +} +#initimg > :not(legend) { + background-color: var(--fields-light); + margin: .5em; +} + +#postprocess, #initimg { + display:flex; + flex-wrap:wrap; + padding: 0; + margin-top: 1em; + background-color: var(--fields-dark); +} +#postprocess > fieldset, #initimg > * { + flex-grow: 1; +} +#postprocess > fieldset { + background-color: var(--fields-dark); +} +#progress-section { + background-color: var(--fields-light); +} +#no-results-message:not(:only-child) { + display: none; +} diff --git a/static/dream_web/index.html b/static/dream_web/index.html new file mode 100644 index 0000000000..feb542adb2 --- /dev/null +++ b/static/dream_web/index.html @@ -0,0 +1,187 @@ + + +
+No results...
+