diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml
index 0fabbdf038..67c63b4b4b 100644
--- a/.github/workflows/build-container.yml
+++ b/.github/workflows/build-container.yml
@@ -16,6 +16,9 @@ on:
- 'v*.*.*'
workflow_dispatch:
+permissions:
+ contents: write
+
jobs:
docker:
if: github.event.pull_request.draft == false
diff --git a/.github/workflows/mkdocs-material.yml b/.github/workflows/mkdocs-material.yml
index f8f58a6b0c..c8e55f0b1b 100644
--- a/.github/workflows/mkdocs-material.yml
+++ b/.github/workflows/mkdocs-material.yml
@@ -5,6 +5,9 @@ on:
- 'main'
- 'development'
+permissions:
+ contents: write
+
jobs:
mkdocs-material:
if: github.event.pull_request.draft == false
@@ -34,8 +37,6 @@ jobs:
- name: deploy to gh-pages
if: ${{ github.ref == 'refs/heads/main' }}
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
run: |
python -m \
mkdocs gh-deploy \
diff --git a/installer/templates/invoke.sh.in b/installer/templates/invoke.sh.in
index 812bcba458..4576c7172f 100644
--- a/installer/templates/invoke.sh.in
+++ b/installer/templates/invoke.sh.in
@@ -24,9 +24,9 @@ if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
-while true
-do
if [ "$0" != "bash" ]; then
+ while true
+ do
echo "Do you want to generate images using the"
echo "1. command-line interface"
echo "2. browser-based UI"
@@ -67,29 +67,29 @@ if [ "$0" != "bash" ]; then
;;
7)
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
- ;;
- 8)
- echo "Developer Console:"
+ ;;
+ 8)
+ echo "Developer Console:"
file_name=$(basename "${BASH_SOURCE[0]}")
bash --init-file "$file_name"
;;
9)
- echo "Update:"
+ echo "Update:"
invokeai-update
;;
10)
invokeai --help
;;
- [qQ])
+ [qQ])
exit 0
;;
*)
echo "Invalid selection"
exit;;
esac
+ done
else # in developer console
python --version
echo "Press ^D to exit"
export PS1="(InvokeAI) \u@\h \w> "
fi
-done
diff --git a/invokeai/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py
index 67e3c840c0..dc8fa03fc4 100644
--- a/invokeai/app/api/routers/sessions.py
+++ b/invokeai/app/api/routers/sessions.py
@@ -270,3 +270,18 @@ async def invoke_session(
ApiDependencies.invoker.invoke(session, invoke_all=all)
return Response(status_code=202)
+
+
+@session_router.delete(
+ "/{session_id}/invoke",
+ operation_id="cancel_session_invoke",
+ responses={
+ 202: {"description": "The invocation is canceled"}
+ },
+)
+async def cancel_session_invoke(
+ session_id: str = Path(description="The id of the session to cancel"),
+) -> None:
+ """Invokes a session"""
+ ApiDependencies.invoker.cancel(session_id)
+ return Response(status_code=202)
diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py
index b8140b11e9..d6e624b325 100644
--- a/invokeai/app/invocations/generate.py
+++ b/invokeai/app/invocations/generate.py
@@ -1,22 +1,19 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
-from datetime import datetime, timezone
-from typing import Any, Literal, Optional, Union
+from functools import partial
+from typing import Literal, Optional, Union
import numpy as np
-
from torch import Tensor
-from PIL import Image
+
from pydantic import Field
-from skimage.exposure.histogram_matching import match_histograms
from ..services.image_storage import ImageType
-from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
-from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator, Generator
+from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
from ...backend.stable_diffusion import PipelineIntermediateState
-from ...backend.util.util import image_to_dataURL
+from ..util.util import diffusers_step_callback_adapter, CanceledException
SAMPLER_NAME_VALUES = Literal[
tuple(InvokeAIGenerator.schedulers())
@@ -45,32 +42,26 @@ class TextToImageInvocation(BaseInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress(
- self, context: InvocationContext, sample: Tensor, step: int
- ) -> None:
- # TODO: only output a preview image when requested
- image = Generator.sample_to_lowres_estimated_image(sample)
+ self, context: InvocationContext, intermediate_state: PipelineIntermediateState
+ ) -> None:
+ if (context.services.queue.is_canceled(context.graph_execution_state_id)):
+ raise CanceledException
- (width, height) = image.size
- width *= 8
- height *= 8
-
- dataURL = image_to_dataURL(image, image_format="JPEG")
-
- context.services.events.emit_generator_progress(
- context.graph_execution_state_id,
- self.id,
- {
- "width": width,
- "height": height,
- "dataURL": dataURL
- },
- step,
- self.steps,
- )
+ step = intermediate_state.step
+ if intermediate_state.predicted_original is not None:
+ # Some schedulers report not only the noisy latents at the current timestep,
+ # but also their estimate so far of what the de-noised latents will be.
+ sample = intermediate_state.predicted_original
+ else:
+ sample = intermediate_state.latents
+
+ diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
- def step_callback(state: PipelineIntermediateState):
- self.dispatch_progress(context, state.latents, state.step)
+ # def step_callback(state: PipelineIntermediateState):
+ # if (context.services.queue.is_canceled(context.graph_execution_state_id)):
+ # raise CanceledException
+ # self.dispatch_progress(context, state.latents, state.step)
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
@@ -79,7 +70,7 @@ class TextToImageInvocation(BaseInvocation):
model= context.services.model_manager.get_model()
outputs = Txt2Img(model).generate(
prompt=self.prompt,
- step_callback=step_callback,
+ step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt"}
), # Shorthand for passing all of the parameters above manually
@@ -116,6 +107,22 @@ class ImageToImageInvocation(TextToImageInvocation):
description="Whether or not the result should be fit to the aspect ratio of the input image",
)
+ def dispatch_progress(
+ self, context: InvocationContext, intermediate_state: PipelineIntermediateState
+ ) -> None:
+ if (context.services.queue.is_canceled(context.graph_execution_state_id)):
+ raise CanceledException
+
+ step = intermediate_state.step
+ if intermediate_state.predicted_original is not None:
+ # Some schedulers report not only the noisy latents at the current timestep,
+ # but also their estimate so far of what the de-noised latents will be.
+ sample = intermediate_state.predicted_original
+ else:
+ sample = intermediate_state.latents
+
+ diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
+
def invoke(self, context: InvocationContext) -> ImageOutput:
image = (
None
@@ -126,24 +133,23 @@ class ImageToImageInvocation(TextToImageInvocation):
)
mask = None
- def step_callback(sample, step=0):
- self.dispatch_progress(context, sample, step)
-
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
model = context.services.model_manager.get_model()
- generator_output = next(
- Img2Img(model).generate(
+ outputs = Img2Img(model).generate(
prompt=self.prompt,
init_image=image,
init_mask=mask,
- step_callback=step_callback,
+ step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
- )
+
+ # Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
+ # each time it is called. We only need the first one.
+ generator_output = next(outputs)
result_image = generator_output.image
@@ -173,6 +179,22 @@ class InpaintInvocation(ImageToImageInvocation):
description="The amount by which to replace masked areas with latent noise",
)
+ def dispatch_progress(
+ self, context: InvocationContext, intermediate_state: PipelineIntermediateState
+ ) -> None:
+ if (context.services.queue.is_canceled(context.graph_execution_state_id)):
+ raise CanceledException
+
+ step = intermediate_state.step
+ if intermediate_state.predicted_original is not None:
+ # Some schedulers report not only the noisy latents at the current timestep,
+ # but also their estimate so far of what the de-noised latents will be.
+ sample = intermediate_state.predicted_original
+ else:
+ sample = intermediate_state.latents
+
+ diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
+
def invoke(self, context: InvocationContext) -> ImageOutput:
image = (
None
@@ -187,24 +209,23 @@ class InpaintInvocation(ImageToImageInvocation):
else context.services.images.get(self.mask.image_type, self.mask.image_name)
)
- def step_callback(sample, step=0):
- self.dispatch_progress(context, sample, step)
-
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
- manager = context.services.model_manager.get_model()
- generator_output = next(
- Inpaint(model).generate(
+ model = context.services.model_manager.get_model()
+ outputs = Inpaint(model).generate(
prompt=self.prompt,
- init_image=image,
- mask_image=mask,
- step_callback=step_callback,
+ init_img=image,
+ init_mask=mask,
+ step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
- )
+
+ # Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
+ # each time it is called. We only need the first one.
+ generator_output = next(outputs)
result_image = generator_output.image
diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py
index 7330cd73be..65ea4c3edb 100644
--- a/invokeai/app/invocations/image.py
+++ b/invokeai/app/invocations/image.py
@@ -28,12 +28,28 @@ class ImageOutput(BaseInvocationOutput):
image: ImageField = Field(default=None, description="The output image")
#fmt: on
+ class Config:
+ schema_extra = {
+ 'required': [
+ 'type',
+ 'image',
+ ]
+ }
+
class MaskOutput(BaseInvocationOutput):
"""Base class for invocations that output a mask"""
#fmt: off
type: Literal["mask"] = "mask"
mask: ImageField = Field(default=None, description="The output mask")
- #fomt: on
+ #fmt: on
+
+ class Config:
+ schema_extra = {
+ 'required': [
+ 'type',
+ 'mask',
+ ]
+ }
# TODO: this isn't really necessary anymore
class LoadImageInvocation(BaseInvocation):
diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py
index 3544f30859..0c7e3069df 100644
--- a/invokeai/app/invocations/prompt.py
+++ b/invokeai/app/invocations/prompt.py
@@ -12,3 +12,11 @@ class PromptOutput(BaseInvocationOutput):
prompt: str = Field(default=None, description="The output prompt")
#fmt: on
+
+ class Config:
+ schema_extra = {
+ 'required': [
+ 'type',
+ 'prompt',
+ ]
+ }
diff --git a/invokeai/app/services/graph.py b/invokeai/app/services/graph.py
index 0d4102c416..171d86c9e3 100644
--- a/invokeai/app/services/graph.py
+++ b/invokeai/app/services/graph.py
@@ -127,6 +127,13 @@ class NodeAlreadyExecutedError(Exception):
class GraphInvocationOutput(BaseInvocationOutput):
type: Literal["graph_output"] = "graph_output"
+ class Config:
+ schema_extra = {
+ 'required': [
+ 'type',
+ 'image',
+ ]
+ }
# TODO: Fill this out and move to invocations
class GraphInvocation(BaseInvocation):
@@ -147,6 +154,13 @@ class IterateInvocationOutput(BaseInvocationOutput):
item: Any = Field(description="The item being iterated over")
+ class Config:
+ schema_extra = {
+ 'required': [
+ 'type',
+ 'item',
+ ]
+ }
# TODO: Fill this out and move to invocations
class IterateInvocation(BaseInvocation):
@@ -169,6 +183,13 @@ class CollectInvocationOutput(BaseInvocationOutput):
collection: list[Any] = Field(description="The collection of input items")
+ class Config:
+ schema_extra = {
+ 'required': [
+ 'type',
+ 'collection',
+ ]
+ }
class CollectInvocation(BaseInvocation):
"""Collects values into a collection"""
diff --git a/invokeai/app/services/invocation_queue.py b/invokeai/app/services/invocation_queue.py
index 88a4f8708d..4a42789b12 100644
--- a/invokeai/app/services/invocation_queue.py
+++ b/invokeai/app/services/invocation_queue.py
@@ -2,6 +2,7 @@
from abc import ABC, abstractmethod
from queue import Queue
+import time
# TODO: make this serializable
@@ -10,6 +11,7 @@ class InvocationQueueItem:
graph_execution_state_id: str
invocation_id: str
invoke_all: bool
+ timestamp: float
def __init__(
self,
@@ -22,6 +24,7 @@ class InvocationQueueItem:
self.graph_execution_state_id = graph_execution_state_id
self.invocation_id = invocation_id
self.invoke_all = invoke_all
+ self.timestamp = time.time()
class InvocationQueueABC(ABC):
@@ -35,15 +38,44 @@ class InvocationQueueABC(ABC):
def put(self, item: InvocationQueueItem | None) -> None:
pass
+ @abstractmethod
+ def cancel(self, graph_execution_state_id: str) -> None:
+ pass
+
+ @abstractmethod
+ def is_canceled(self, graph_execution_state_id: str) -> bool:
+ pass
+
class MemoryInvocationQueue(InvocationQueueABC):
__queue: Queue
+ __cancellations: dict[str, float]
def __init__(self):
self.__queue = Queue()
+ self.__cancellations = dict()
def get(self) -> InvocationQueueItem:
- return self.__queue.get()
+ item = self.__queue.get()
+
+ while isinstance(item, InvocationQueueItem) \
+ and item.graph_execution_state_id in self.__cancellations \
+ and self.__cancellations[item.graph_execution_state_id] > item.timestamp:
+ item = self.__queue.get()
+
+ # Clear old items
+ for graph_execution_state_id in list(self.__cancellations.keys()):
+ if self.__cancellations[graph_execution_state_id] < item.timestamp:
+ del self.__cancellations[graph_execution_state_id]
+
+ return item
def put(self, item: InvocationQueueItem | None) -> None:
self.__queue.put(item)
+
+ def cancel(self, graph_execution_state_id: str) -> None:
+ if graph_execution_state_id not in self.__cancellations:
+ self.__cancellations[graph_execution_state_id] = time.time()
+
+ def is_canceled(self, graph_execution_state_id: str) -> bool:
+ return graph_execution_state_id in self.__cancellations
diff --git a/invokeai/app/services/invoker.py b/invokeai/app/services/invoker.py
index f234cd827b..594477ed0f 100644
--- a/invokeai/app/services/invoker.py
+++ b/invokeai/app/services/invoker.py
@@ -50,6 +50,10 @@ class Invoker:
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
self.services.graph_execution_manager.set(new_state)
return new_state
+
+ def cancel(self, graph_execution_state_id: str) -> None:
+ """Cancels the given execution state"""
+ self.services.queue.cancel(graph_execution_state_id)
def __start_service(self, service) -> None:
# Call start() method on any services that have it
diff --git a/invokeai/app/services/processor.py b/invokeai/app/services/processor.py
index 5baa64503c..b460563278 100644
--- a/invokeai/app/services/processor.py
+++ b/invokeai/app/services/processor.py
@@ -4,7 +4,7 @@ from threading import Event, Thread
from ..invocations.baseinvocation import InvocationContext
from .invocation_queue import InvocationQueueItem
from .invoker import InvocationProcessorABC, Invoker
-
+from ..util.util import CanceledException
class DefaultInvocationProcessor(InvocationProcessorABC):
__invoker_thread: Thread
@@ -58,6 +58,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
)
)
+ # Check queue to see if this is canceled, and skip if so
+ if self.__invoker.services.queue.is_canceled(
+ graph_execution_state.id
+ ):
+ continue
+
# Save outputs and history
graph_execution_state.complete(invocation.id, outputs)
@@ -76,6 +82,9 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
except KeyboardInterrupt:
pass
+ except CanceledException:
+ pass
+
except Exception as e:
error = traceback.format_exc()
@@ -95,6 +104,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
)
pass
+
+ # Check queue to see if this is canceled, and skip if so
+ if self.__invoker.services.queue.is_canceled(
+ graph_execution_state.id
+ ):
+ continue
# Queue any further commands if invoking all
is_complete = graph_execution_state.is_complete()
diff --git a/invokeai/app/util/util.py b/invokeai/app/util/util.py
new file mode 100644
index 0000000000..60a5072cb0
--- /dev/null
+++ b/invokeai/app/util/util.py
@@ -0,0 +1,42 @@
+import torch
+from PIL import Image
+from ..invocations.baseinvocation import InvocationContext
+from ...backend.util.util import image_to_dataURL
+from ...backend.generator.base import Generator
+from ...backend.stable_diffusion import PipelineIntermediateState
+
+class CanceledException(Exception):
+ pass
+
+def fast_latents_step_callback(sample: torch.Tensor, step: int, steps: int, id: str, context: InvocationContext, ):
+ # TODO: only output a preview image when requested
+ image = Generator.sample_to_lowres_estimated_image(sample)
+
+ (width, height) = image.size
+ width *= 8
+ height *= 8
+
+ dataURL = image_to_dataURL(image, image_format="JPEG")
+
+ context.services.events.emit_generator_progress(
+ context.graph_execution_state_id,
+ id,
+ {
+ "width": width,
+ "height": height,
+ "dataURL": dataURL
+ },
+ step,
+ steps,
+ )
+
+def diffusers_step_callback_adapter(*cb_args, **kwargs):
+ """
+ txt2img gives us a Tensor in the step_callbak, while img2img gives us a PipelineIntermediateState.
+ This adapter grabs the needed data and passes it along to the callback function.
+ """
+ if isinstance(cb_args[0], PipelineIntermediateState):
+ progress_state: PipelineIntermediateState = cb_args[0]
+ return fast_latents_step_callback(progress_state.latents, progress_state.step, **kwargs)
+ else:
+ return fast_latents_step_callback(*cb_args, **kwargs)
diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py
index e30b77ec33..ee56077fa8 100644
--- a/invokeai/backend/generator/base.py
+++ b/invokeai/backend/generator/base.py
@@ -21,7 +21,7 @@ from PIL import Image, ImageChops, ImageFilter
from accelerate.utils import set_seed
from diffusers import DiffusionPipeline
from tqdm import trange
-from typing import List, Iterator, Type
+from typing import Callable, List, Iterator, Optional, Type
from dataclasses import dataclass, field
from diffusers.schedulers import SchedulerMixin as Scheduler
@@ -35,23 +35,23 @@ downsampling = 8
@dataclass
class InvokeAIGeneratorBasicParams:
- seed: int=None
+ seed: Optional[int]=None
width: int=512
height: int=512
- cfg_scale: int=7.5
+ cfg_scale: float=7.5
steps: int=20
ddim_eta: float=0.0
- scheduler: int='ddim'
+ scheduler: str='ddim'
precision: str='float16'
perlin: float=0.0
- threshold: int=0.0
+ threshold: float=0.0
seamless: bool=False
seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y'])
- h_symmetry_time_pct: float=None
- v_symmetry_time_pct: float=None
+ h_symmetry_time_pct: Optional[float]=None
+ v_symmetry_time_pct: Optional[float]=None
variation_amount: float = 0.0
with_variations: list=field(default_factory=list)
- safety_checker: SafetyChecker=None
+ safety_checker: Optional[SafetyChecker]=None
@dataclass
class InvokeAIGeneratorOutput:
@@ -61,10 +61,10 @@ class InvokeAIGeneratorOutput:
and the model hash, as well as all the generate() parameters that went into
generating the image (in .params, also available as attributes)
'''
- image: Image
+ image: Image.Image
seed: int
model_hash: str
- attention_maps_images: List[Image]
+ attention_maps_images: List[Image.Image]
params: Namespace
# we are interposing a wrapper around the original Generator classes so that
@@ -92,8 +92,8 @@ class InvokeAIGenerator(metaclass=ABCMeta):
def generate(self,
prompt: str='',
- callback: callable=None,
- step_callback: callable=None,
+ callback: Optional[Callable]=None,
+ step_callback: Optional[Callable]=None,
iterations: int=1,
**keyword_args,
)->Iterator[InvokeAIGeneratorOutput]:
@@ -206,10 +206,10 @@ class Txt2Img(InvokeAIGenerator):
# ------------------------------------
class Img2Img(InvokeAIGenerator):
def generate(self,
- init_image: Image | torch.FloatTensor,
+ init_image: Image.Image | torch.FloatTensor,
strength: float=0.75,
**keyword_args
- )->List[InvokeAIGeneratorOutput]:
+ )->Iterator[InvokeAIGeneratorOutput]:
return super().generate(init_image=init_image,
strength=strength,
**keyword_args
@@ -223,7 +223,7 @@ class Img2Img(InvokeAIGenerator):
# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff
class Inpaint(Img2Img):
def generate(self,
- mask_image: Image | torch.FloatTensor,
+ mask_image: Image.Image | torch.FloatTensor,
# Seam settings - when 0, doesn't fill seam
seam_size: int = 0,
seam_blur: int = 0,
@@ -236,7 +236,7 @@ class Inpaint(Img2Img):
inpaint_height=None,
inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF),
**keyword_args
- )->List[InvokeAIGeneratorOutput]:
+ )->Iterator[InvokeAIGeneratorOutput]:
return super().generate(
mask_image=mask_image,
seam_size=seam_size,
@@ -263,7 +263,7 @@ class Embiggen(Txt2Img):
embiggen: list=None,
embiggen_tiles: list = None,
strength: float=0.75,
- **kwargs)->List[InvokeAIGeneratorOutput]:
+ **kwargs)->Iterator[InvokeAIGeneratorOutput]:
return super().generate(embiggen=embiggen,
embiggen_tiles=embiggen_tiles,
strength=strength,
diff --git a/invokeai/backend/web/invoke_ai_web_server.py b/invokeai/backend/web/invoke_ai_web_server.py
index dc77ff4723..7209e31449 100644
--- a/invokeai/backend/web/invoke_ai_web_server.py
+++ b/invokeai/backend/web/invoke_ai_web_server.py
@@ -1022,7 +1022,7 @@ class InvokeAIWebServer:
"RGB"
)
- def image_progress(sample, step):
+ def image_progress(intermediate_state: PipelineIntermediateState):
if self.canceled.is_set():
raise CanceledException
@@ -1030,6 +1030,14 @@ class InvokeAIWebServer:
nonlocal generation_parameters
nonlocal progress
+ step = intermediate_state.step
+ if intermediate_state.predicted_original is not None:
+ # Some schedulers report not only the noisy latents at the current timestep,
+ # but also their estimate so far of what the de-noised latents will be.
+ sample = intermediate_state.predicted_original
+ else:
+ sample = intermediate_state.latents
+
generation_messages = {
"txt2img": "common.statusGeneratingTextToImage",
"img2img": "common.statusGeneratingImageToImage",
@@ -1302,16 +1310,9 @@ class InvokeAIWebServer:
progress.set_current_iteration(progress.current_iteration + 1)
- def diffusers_step_callback_adapter(*cb_args, **kwargs):
- if isinstance(cb_args[0], PipelineIntermediateState):
- progress_state: PipelineIntermediateState = cb_args[0]
- return image_progress(progress_state.latents, progress_state.step)
- else:
- return image_progress(*cb_args, **kwargs)
-
self.generate.prompt2image(
**generation_parameters,
- step_callback=diffusers_step_callback_adapter,
+ step_callback=image_progress,
image_callback=image_done,
)
diff --git a/invokeai/frontend/web/src/features/lightbox/components/ReactPanZoomButtons.tsx b/invokeai/frontend/web/src/features/lightbox/components/ReactPanZoomButtons.tsx
index ee9be65cc1..2e592e83d7 100644
--- a/invokeai/frontend/web/src/features/lightbox/components/ReactPanZoomButtons.tsx
+++ b/invokeai/frontend/web/src/features/lightbox/components/ReactPanZoomButtons.tsx
@@ -34,7 +34,7 @@ const ReactPanZoomButtons = ({
}
aria-label={t('accessibility.zoomIn')}
- tooltip="Zoom In"
+ tooltip={t('accessibility.zoomIn')}
onClick={() => zoomIn()}
fontSize={20}
/>
@@ -42,7 +42,7 @@ const ReactPanZoomButtons = ({
}
aria-label={t('accessibility.zoomOut')}
- tooltip="Zoom Out"
+ tooltip={t('accessibility.zoomOut')}
onClick={() => zoomOut()}
fontSize={20}
/>
@@ -50,7 +50,7 @@ const ReactPanZoomButtons = ({
}
aria-label={t('accessibility.rotateCounterClockwise')}
- tooltip="Rotate Counter-Clockwise"
+ tooltip={t('accessibility.rotateCounterClockwise')}
onClick={rotateCounterClockwise}
fontSize={20}
/>
@@ -58,7 +58,7 @@ const ReactPanZoomButtons = ({
}
aria-label={t('accessibility.rotateClockwise')}
- tooltip="Rotate Clockwise"
+ tooltip={t('accessibility.rotateClockwise')}
onClick={rotateClockwise}
fontSize={20}
/>
@@ -66,7 +66,7 @@ const ReactPanZoomButtons = ({
}
aria-label={t('accessibility.flipHorizontally')}
- tooltip="Flip Horizontally"
+ tooltip={t('accessibility.flipHorizontally')}
onClick={flipHorizontally}
fontSize={20}
/>
@@ -74,7 +74,7 @@ const ReactPanZoomButtons = ({
}
aria-label={t('accessibility.flipVertically')}
- tooltip="Flip Vertically"
+ tooltip={t('accessibility.flipVertically')}
onClick={flipVertically}
fontSize={20}
/>
@@ -82,7 +82,7 @@ const ReactPanZoomButtons = ({
}
aria-label={t('accessibility.reset')}
- tooltip="Reset"
+ tooltip={t('accessibility.reset')}
onClick={() => {
resetTransform();
reset();