Merge branch 'main' into nodepromptsize

This commit is contained in:
mickr777
2023-07-29 12:43:29 +10:00
committed by GitHub
239 changed files with 10360 additions and 8889 deletions

View File

@@ -1,3 +1,3 @@
'''
"""
Initialization file for invokeai.frontend
'''
"""

View File

@@ -1,7 +1,7 @@
'''
"""
Minimalist updater script. Prompts user for the tag or branch to update to and runs
pip install <path_to_git_source>.
'''
"""
import os
import platform
import pkg_resources
@@ -15,10 +15,10 @@ from rich.style import Style
from invokeai.version import __version__
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_TAG="https://github.com/invoke-ai/InvokeAI/archive/refs/tags"
INVOKE_AI_BRANCH="https://github.com/invoke-ai/InvokeAI/archive/refs/heads"
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"
INVOKE_AI_SRC = "https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_TAG = "https://github.com/invoke-ai/InvokeAI/archive/refs/tags"
INVOKE_AI_BRANCH = "https://github.com/invoke-ai/InvokeAI/archive/refs/heads"
INVOKE_AI_REL = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
OS = platform.uname().system
ARCH = platform.uname().machine
@@ -29,34 +29,38 @@ if OS == "Windows":
else:
console = Console(style=Style(color="grey74", bgcolor="grey19"))
def get_versions()->dict:
def get_versions() -> dict:
return requests.get(url=INVOKE_AI_REL).json()
def invokeai_is_running()->bool:
def invokeai_is_running() -> bool:
for p in psutil.process_iter():
try:
cmdline = p.cmdline()
matches = [x for x in cmdline if x.endswith(('invokeai','invokeai.exe'))]
matches = [x for x in cmdline if x.endswith(("invokeai", "invokeai.exe"))]
if matches:
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
print(
f":exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]"
)
return True
except (psutil.AccessDenied,psutil.NoSuchProcess):
except (psutil.AccessDenied, psutil.NoSuchProcess):
continue
return False
def welcome(versions: dict):
@group()
def text():
yield f'InvokeAI Version: [bold yellow]{__version__}'
yield ''
yield 'This script will update InvokeAI to the latest release, or to a development version of your choice.'
yield ''
yield '[bold yellow]Options:'
yield f'''[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
yield f"InvokeAI Version: [bold yellow]{__version__}"
yield ""
yield "This script will update InvokeAI to the latest release, or to a development version of your choice."
yield ""
yield "[bold yellow]Options:"
yield f"""[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
[2] Update to the bleeding-edge development version ([italic]main[/italic])
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to'''
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to"""
console.rule()
print(
@@ -72,20 +76,22 @@ def welcome(versions: dict):
)
console.line()
def get_extras():
extras = ''
extras = ""
try:
dist = pkg_resources.get_distribution('xformers')
extras = '[xformers]'
dist = pkg_resources.get_distribution("xformers")
extras = "[xformers]"
except pkg_resources.DistributionNotFound:
pass
return extras
def main():
versions = get_versions()
if invokeai_is_running():
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
input('Press any key to continue...')
print(f":exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]")
input("Press any key to continue...")
return
welcome(versions)
@@ -93,36 +99,36 @@ def main():
tag = None
branch = None
release = None
choice = Prompt.ask('Choice:',choices=['1','2','3','4'],default='1')
if choice=='1':
release = versions[0]['tag_name']
elif choice=='2':
release = 'main'
elif choice=='3':
tag = Prompt.ask('Enter an InvokeAI tag name')
elif choice=='4':
branch = Prompt.ask('Enter an InvokeAI branch name')
choice = Prompt.ask("Choice:", choices=["1", "2", "3", "4"], default="1")
if choice == "1":
release = versions[0]["tag_name"]
elif choice == "2":
release = "main"
elif choice == "3":
tag = Prompt.ask("Enter an InvokeAI tag name")
elif choice == "4":
branch = Prompt.ask("Enter an InvokeAI branch name")
extras = get_extras()
print(f':crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]')
print(f":crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]")
if release:
cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_SRC}/{release}.zip" --use-pep517 --upgrade'
elif tag:
cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_TAG}/{tag}.zip" --use-pep517 --upgrade'
else:
cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_BRANCH}/{branch}.zip" --use-pep517 --upgrade'
print('')
print('')
if os.system(cmd)==0:
print(f':heavy_check_mark: Upgrade successful')
print("")
print("")
if os.system(cmd) == 0:
print(f":heavy_check_mark: Upgrade successful")
else:
print(f':exclamation: [bold red]Upgrade failed[/red bold]')
print(f":exclamation: [bold red]Upgrade failed[/red bold]")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass

View File

@@ -56,18 +56,18 @@ logger = InvokeAILogger.getLogger()
# build a table mapping all non-printable characters to None
# for stripping control characters
# from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
NOPRINT_TRANS_TABLE = {
i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()
}
NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()}
def make_printable(s:str)->str:
'''Replace non-printable characters in a string'''
def make_printable(s: str) -> str:
"""Replace non-printable characters in a string"""
return s.translate(NOPRINT_TRANS_TABLE)
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# for responsive resizing set to False, but this seems to cause a crash!
FIX_MINIMUM_SIZE_WHEN_CREATED = True
# for persistence
current_tab = 0
@@ -82,24 +82,18 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.subprocess_connection = None
if not config.model_conf_path.exists():
with open(config.model_conf_path,'w') as file:
print('# InvokeAI model configuration file',file=file)
with open(config.model_conf_path, "w") as file:
print("# InvokeAI model configuration file", file=file)
self.installer = ModelInstall(config)
self.all_models = self.installer.all_models()
self.starter_models = self.installer.starter_models()
self.model_labels = self._get_model_labels()
self.model_labels = self._get_model_labels()
window_width, window_height = get_terminal_size()
self.nextrely -= 1
self.add_widget_intelligent(
npyscreen.FixedText,
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields,",
editable=False,
color="CAUTION",
)
self.add_widget_intelligent(
npyscreen.FixedText,
value="Use cursor arrows to make a selection, and space to toggle checkboxes.",
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
editable=False,
color="CAUTION",
)
@@ -107,17 +101,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.tabs = self.add_widget_intelligent(
SingleSelectColumns,
values=[
'STARTER MODELS',
'MORE MODELS',
'CONTROLNETS',
'LORA/LYCORIS',
'TEXTUAL INVERSION',
"STARTER MODELS",
"MORE MODELS",
"CONTROLNETS",
"LORA/LYCORIS",
"TEXTUAL INVERSION",
],
value=[self.current_tab],
columns = 5,
max_height = 2,
columns=5,
max_height=2,
relx=8,
scroll_exit = True,
scroll_exit=True,
)
self.tabs.on_changed = self._toggle_tables
@@ -127,104 +121,105 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.nextrely = top_of_table
self.pipeline_models = self.add_pipeline_widgets(
model_type=ModelType.Main,
window_width=window_width,
exclude = self.starter_models
model_type=ModelType.Main, window_width=window_width, exclude=self.starter_models
)
# self.pipeline_models['autoload_pending'] = True
bottom_of_table = max(bottom_of_table,self.nextrely)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.controlnet_models = self.add_model_widgets(
model_type=ModelType.ControlNet,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table,self.nextrely)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.lora_models = self.add_model_widgets(
model_type=ModelType.Lora,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table,self.nextrely)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.ti_models = self.add_model_widgets(
model_type=ModelType.TextualInversion,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table,self.nextrely)
self.nextrely = bottom_of_table+1
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = bottom_of_table + 1
self.monitor = self.add_widget_intelligent(
BufferBox,
name='Log Messages',
name="Log Messages",
editable=False,
max_height = 10,
max_height=8,
)
self.nextrely += 1
done_label = "APPLY CHANGES"
back_label = "BACK"
cancel_label = "CANCEL"
current_position = self.nextrely
if self.multipage:
self.back_button = self.add_widget_intelligent(
npyscreen.ButtonPress,
name=back_label,
rely=-3,
when_pressed_function=self.on_back,
)
else:
self.nextrely = current_position
self.cancel_button = self.add_widget_intelligent(
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
)
self.nextrely = current_position
self.ok_button = self.add_widget_intelligent(
npyscreen.ButtonPress,
name=done_label,
relx=(window_width - len(done_label)) // 2,
rely=-3,
when_pressed_function=self.on_execute
when_pressed_function=self.on_execute,
)
label = "APPLY CHANGES & EXIT"
self.nextrely = current_position
self.done = self.add_widget_intelligent(
npyscreen.ButtonPress,
name=label,
rely=-3,
relx=window_width-len(label)-15,
relx=window_width - len(label) - 15,
when_pressed_function=self.on_done,
)
# This restores the selected page on return from an installation
for i in range(1,self.current_tab+1):
for i in range(1, self.current_tab + 1):
self.tabs.h_cursor_line_down(1)
self._toggle_tables([self.current_tab])
############# diffusers tab ##########
def add_starter_pipelines(self)->dict[str, npyscreen.widget]:
'''Add widgets responsible for selecting diffusers models'''
############# diffusers tab ##########
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
"""Add widgets responsible for selecting diffusers models"""
widgets = dict()
models = self.all_models
starters = self.starter_models
starter_model_labels = self.model_labels
self.installed_models = sorted(
[x for x in starters if models[x].installed]
)
self.installed_models = sorted([x for x in starters if models[x].installed])
widgets.update(
label1 = self.add_widget_intelligent(
label1=self.add_widget_intelligent(
CenteredTitleText,
name="Select from a starter set of Stable Diffusion models from HuggingFace.",
editable=False,
labelColor="CAUTION",
)
)
self.nextrely -= 1
# if user has already installed some initial models, then don't patronize them
# by showing more recommendations
show_recommended = len(self.installed_models)==0
show_recommended = len(self.installed_models) == 0
keys = [x for x in models.keys() if x in starters]
widgets.update(
models_selected = self.add_widget_intelligent(
models_selected=self.add_widget_intelligent(
MultiSelectColumns,
columns=1,
name="Install Starter Models",
@@ -232,40 +227,43 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
value=[
keys.index(x)
for x in keys
if (show_recommended and models[x].recommended) \
or (x in self.installed_models)
if (show_recommended and models[x].recommended) or (x in self.installed_models)
],
max_height=len(starters) + 1,
relx=4,
scroll_exit=True,
),
models = keys,
models=keys,
)
self.nextrely += 1
return widgets
############# Add a set of model install widgets ########
def add_model_widgets(self,
model_type: ModelType,
window_width: int=120,
install_prompt: str=None,
exclude: set=set(),
)->dict[str,npyscreen.widget]:
'''Generic code to create model selection widgets'''
def add_model_widgets(
self,
model_type: ModelType,
window_width: int = 120,
install_prompt: str = None,
exclude: set = set(),
) -> dict[str, npyscreen.widget]:
"""Generic code to create model selection widgets"""
widgets = dict()
model_list = [x for x in self.all_models if self.all_models[x].model_type==model_type and not x in exclude]
model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and not x in exclude]
model_labels = [self.model_labels[x] for x in model_list]
show_recommended = len(self.installed_models)==0
show_recommended = len(self.installed_models) == 0
if len(model_list) > 0:
max_width = max([len(x) for x in model_labels])
columns = window_width // (max_width+8) # 8 characters for "[x] " and padding
columns = min(len(model_list),columns) or 1
prompt = install_prompt or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk."
columns = window_width // (max_width + 8) # 8 characters for "[x] " and padding
columns = min(len(model_list), columns) or 1
prompt = (
install_prompt
or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk."
)
widgets.update(
label1 = self.add_widget_intelligent(
label1=self.add_widget_intelligent(
CenteredTitleText,
name=prompt,
editable=False,
@@ -274,7 +272,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
)
widgets.update(
models_selected = self.add_widget_intelligent(
models_selected=self.add_widget_intelligent(
MultiSelectColumns,
columns=columns,
name=f"Install {model_type} Models",
@@ -282,21 +280,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
value=[
model_list.index(x)
for x in model_list
if (show_recommended and self.all_models[x].recommended) \
or self.all_models[x].installed
if (show_recommended and self.all_models[x].recommended) or self.all_models[x].installed
],
max_height=len(model_list)//columns + 1,
max_height=len(model_list) // columns + 1,
relx=4,
scroll_exit=True,
),
models = model_list,
models=model_list,
)
self.nextrely += 1
widgets.update(
download_ids = self.add_widget_intelligent(
download_ids=self.add_widget_intelligent(
TextBox,
name = "Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
max_height=4,
scroll_exit=True,
editable=True,
@@ -305,16 +302,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
return widgets
### Tab for arbitrary diffusers widgets ###
def add_pipeline_widgets(self,
model_type: ModelType=ModelType.Main,
window_width: int=120,
**kwargs,
)->dict[str,npyscreen.widget]:
'''Similar to add_model_widgets() but adds some additional widgets at the bottom
to support the autoload directory'''
def add_pipeline_widgets(
self,
model_type: ModelType = ModelType.Main,
window_width: int = 120,
**kwargs,
) -> dict[str, npyscreen.widget]:
"""Similar to add_model_widgets() but adds some additional widgets at the bottom
to support the autoload directory"""
widgets = self.add_model_widgets(
model_type = model_type,
window_width = window_width,
model_type=model_type,
window_width=window_width,
install_prompt=f"Additional {model_type.value.title()} models already installed.",
**kwargs,
)
@@ -323,7 +321,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
def resize(self):
super().resize()
if (s := self.starter_pipelines.get("models_selected")):
if s := self.starter_pipelines.get("models_selected"):
keys = [x for x in self.all_models.keys() if x in self.starter_models]
s.values = [self.model_labels[x] for x in keys]
@@ -338,27 +336,27 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
]
for group in widgets:
for k,v in group.items():
for k, v in group.items():
try:
v.hidden = True
v.editable = False
except:
pass
for k,v in widgets[selected_tab].items():
for k, v in widgets[selected_tab].items():
try:
v.hidden = False
if not isinstance(v,(npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
v.editable = True
except:
pass
self.__class__.current_tab = selected_tab # for persistence
self.display()
def _get_model_labels(self) -> dict[str,str]:
def _get_model_labels(self) -> dict[str, str]:
window_width, window_height = get_terminal_size()
checkbox_width = 4
spacing_width = 2
models = self.all_models
label_width = max([len(models[x].name) for x in models])
description_width = window_width - label_width - checkbox_width - spacing_width
@@ -366,30 +364,28 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
result = dict()
for x in models.keys():
description = models[x].description
description = description[0 : description_width - 3] + "..." \
if description and len(description) > description_width \
else description if description else ''
result[x] = f"%-{label_width}s %s" % (models[x].name, description)
description = (
description[0 : description_width - 3] + "..."
if description and len(description) > description_width
else description
if description
else ""
)
result[x] = f"%-{label_width}s %s" % (models[x].name, description)
return result
def _get_columns(self) -> int:
window_width, window_height = get_terminal_size()
cols = (
4
if window_width > 240
else 3
if window_width > 160
else 2
if window_width > 80
else 1
)
cols = 4 if window_width > 240 else 3 if window_width > 160 else 2 if window_width > 80 else 1
return min(cols, len(self.installed_models))
def confirm_deletions(self, selections: InstallSelections)->bool:
def confirm_deletions(self, selections: InstallSelections) -> bool:
remove_models = selections.remove_models
if len(remove_models) > 0:
mods = "\n".join([ModelManager.parse_key(x)[0] for x in remove_models])
return npyscreen.notify_ok_cancel(f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}")
return npyscreen.notify_ok_cancel(
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
)
else:
return True
@@ -398,20 +394,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
app = self.parentApp
if not self.confirm_deletions(app.install_selections):
return
self.monitor.entry_widget.buffer(['Processing...'],scroll_end=True)
self.monitor.entry_widget.buffer(["Processing..."], scroll_end=True)
self.ok_button.hidden = True
self.display()
# for communication with the subprocess
parent_conn, child_conn = Pipe()
p = Process(
target = process_and_execute,
target=process_and_execute,
kwargs=dict(
opt = app.program_opts,
selections = app.install_selections,
conn_out = child_conn,
)
opt=app.program_opts,
selections=app.install_selections,
conn_out=child_conn,
),
)
p.start()
child_conn.close()
@@ -428,7 +424,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.parentApp.setNextForm(None)
self.parentApp.user_cancelled = True
self.editing = False
def on_done(self):
self.marshall_arguments()
if not self.confirm_deletions(self.parentApp.install_selections):
@@ -436,75 +432,79 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.parentApp.setNextForm(None)
self.parentApp.user_cancelled = False
self.editing = False
########## This routine monitors the child process that is performing model installation and removal #####
def while_waiting(self):
'''Called during idle periods. Main task is to update the Log Messages box with messages
from the child process that does the actual installation/removal'''
"""Called during idle periods. Main task is to update the Log Messages box with messages
from the child process that does the actual installation/removal"""
c = self.subprocess_connection
if not c:
return
monitor_widget = self.monitor.entry_widget
while c.poll():
try:
data = c.recv_bytes().decode('utf-8')
data.strip('\n')
data = c.recv_bytes().decode("utf-8")
data.strip("\n")
# processing child is requesting user input to select the
# right configuration file
if data.startswith('*need v2 config'):
_,model_path,*_ = data.split(":",2)
if data.startswith("*need v2 config"):
_, model_path, *_ = data.split(":", 2)
self._return_v2_config(model_path)
# processing child is done
elif data=='*done*':
elif data == "*done*":
self._close_subprocess_and_regenerate_form()
break
# update the log message box
else:
data=make_printable(data)
data=data.replace('[A','')
data = make_printable(data)
data = data.replace("[A", "")
monitor_widget.buffer(
textwrap.wrap(data,
width=monitor_widget.width,
subsequent_indent=' ',
),
scroll_end=True
textwrap.wrap(
data,
width=monitor_widget.width,
subsequent_indent=" ",
),
scroll_end=True,
)
self.display()
except (EOFError,OSError):
except (EOFError, OSError):
self.subprocess_connection = None
def _return_v2_config(self,model_path: str):
def _return_v2_config(self, model_path: str):
c = self.subprocess_connection
model_name = Path(model_path).name
message = select_stable_diffusion_config_file(model_name=model_name)
c.send_bytes(message.encode('utf-8'))
c.send_bytes(message.encode("utf-8"))
def _close_subprocess_and_regenerate_form(self):
app = self.parentApp
self.subprocess_connection.close()
self.subprocess_connection = None
self.monitor.entry_widget.buffer(['** Action Complete **'])
self.monitor.entry_widget.buffer(["** Action Complete **"])
self.display()
# rebuild the form, saving and restoring some of the fields that need to be preserved.
saved_messages = self.monitor.entry_widget.values
# autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value)
# autoscan = self.pipeline_models['autoscan_on_startup'].value
app.main_form = app.addForm(
"MAIN", addModelsForm, name="Install Stable Diffusion Models", multipage=self.multipage,
"MAIN",
addModelsForm,
name="Install Stable Diffusion Models",
multipage=self.multipage,
)
app.switchForm("MAIN")
app.main_form.monitor.entry_widget.values = saved_messages
app.main_form.monitor.entry_widget.buffer([''],scroll_end=True)
app.main_form.monitor.entry_widget.buffer([""], scroll_end=True)
# app.main_form.pipeline_models['autoload_directory'].value = autoload_dir
# app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan
def marshall_arguments(self):
"""
Assemble arguments and store as attributes of the application:
@@ -519,21 +519,29 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
all_models = self.all_models
# Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove
ui_sections = [self.starter_pipelines, self.pipeline_models,
self.controlnet_models, self.lora_models, self.ti_models]
ui_sections = [
self.starter_pipelines,
self.pipeline_models,
self.controlnet_models,
self.lora_models,
self.ti_models,
]
for section in ui_sections:
if not 'models_selected' in section:
if not "models_selected" in section:
continue
selected = set([section['models'][x] for x in section['models_selected'].value])
selected = set([section["models"][x] for x in section["models_selected"].value])
models_to_install = [x for x in selected if not self.all_models[x].installed]
models_to_remove = [x for x in section['models'] if x not in selected and self.all_models[x].installed]
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
selections.remove_models.extend(models_to_remove)
selections.install_models.extend(all_models[x].path or all_models[x].repo_id \
for x in models_to_install if all_models[x].path or all_models[x].repo_id)
selections.install_models.extend(
all_models[x].path or all_models[x].repo_id
for x in models_to_install
if all_models[x].path or all_models[x].repo_id
)
# models located in the 'download_ids" section
for section in ui_sections:
if downloads := section.get('download_ids'):
if downloads := section.get("download_ids"):
selections.install_models.extend(downloads.value.split())
# load directory and whether to scan on startup
@@ -542,8 +550,9 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# self.parentApp.autoload_pending = False
# selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value
class AddModelApplication(npyscreen.NPSAppManaged):
def __init__(self,opt):
def __init__(self, opt):
super().__init__()
self.program_opts = opt
self.user_cancelled = False
@@ -553,76 +562,83 @@ class AddModelApplication(npyscreen.NPSAppManaged):
def onStart(self):
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
self.main_form = self.addForm(
"MAIN", addModelsForm, name="Install Stable Diffusion Models", cycle_widgets=True,
"MAIN",
addModelsForm,
name="Install Stable Diffusion Models",
cycle_widgets=False,
)
class StderrToMessage():
class StderrToMessage:
def __init__(self, connection: Connection):
self.connection = connection
def write(self, data:str):
self.connection.send_bytes(data.encode('utf-8'))
def write(self, data: str):
self.connection.send_bytes(data.encode("utf-8"))
def flush(self):
pass
# --------------------------------------------------------
def ask_user_for_prediction_type(model_path: Path,
tui_conn: Connection=None
)->SchedulerPredictionType:
def ask_user_for_prediction_type(model_path: Path, tui_conn: Connection = None) -> SchedulerPredictionType:
if tui_conn:
logger.debug('Waiting for user response...')
return _ask_user_for_pt_tui(model_path, tui_conn)
logger.debug("Waiting for user response...")
return _ask_user_for_pt_tui(model_path, tui_conn)
else:
return _ask_user_for_pt_cmdline(model_path)
def _ask_user_for_pt_cmdline(model_path: Path)->SchedulerPredictionType:
def _ask_user_for_pt_cmdline(model_path: Path) -> SchedulerPredictionType:
choices = [SchedulerPredictionType.Epsilon, SchedulerPredictionType.VPrediction, None]
print(
f"""
f"""
Please select the type of the V2 checkpoint named {model_path.name}:
[1] A model based on Stable Diffusion v2 trained on 512 pixel images (SD-2-base)
[2] A model based on Stable Diffusion v2 trained on 768 pixel images (SD-2-768)
[3] Skip this model and come back later.
"""
)
)
choice = None
ok = False
while not ok:
try:
choice = input('select> ').strip()
choice = choices[int(choice)-1]
choice = input("select> ").strip()
choice = choices[int(choice) - 1]
ok = True
except (ValueError, IndexError):
print(f'{choice} is not a valid choice')
print(f"{choice} is not a valid choice")
except EOFError:
return
return choice
def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection)->SchedulerPredictionType:
def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPredictionType:
try:
tui_conn.send_bytes(f'*need v2 config for:{model_path}'.encode('utf-8'))
tui_conn.send_bytes(f"*need v2 config for:{model_path}".encode("utf-8"))
# note that we don't do any status checking here
response = tui_conn.recv_bytes().decode('utf-8')
response = tui_conn.recv_bytes().decode("utf-8")
if response is None:
return None
elif response == 'epsilon':
elif response == "epsilon":
return SchedulerPredictionType.epsilon
elif response == 'v':
elif response == "v":
return SchedulerPredictionType.VPrediction
elif response == 'abort':
logger.info('Conversion aborted')
elif response == "abort":
logger.info("Conversion aborted")
return None
else:
return response
except:
return None
# --------------------------------------------------------
def process_and_execute(opt: Namespace,
selections: InstallSelections,
conn_out: Connection=None,
):
def process_and_execute(
opt: Namespace,
selections: InstallSelections,
conn_out: Connection = None,
):
# set up so that stderr is sent to conn_out
if conn_out:
translator = StderrToMessage(conn_out)
@@ -632,66 +648,57 @@ def process_and_execute(opt: Namespace,
logger.handlers.clear()
logger.addHandler(logging.StreamHandler(translator))
installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x,conn_out))
installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out))
installer.install(selections)
if conn_out:
conn_out.send_bytes('*done*'.encode('utf-8'))
conn_out.send_bytes("*done*".encode("utf-8"))
conn_out.close()
def do_listings(opt)->bool:
def do_listings(opt) -> bool:
"""List installed models of various sorts, and return
True if any were requested."""
model_manager = ModelManager(config.model_conf_path)
if opt.list_models == 'diffusers':
if opt.list_models == "diffusers":
print("Diffuser models:")
model_manager.print_models()
elif opt.list_models == 'controlnets':
elif opt.list_models == "controlnets":
print("Installed Controlnet Models:")
cnm = model_manager.list_controlnet_models()
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]),prefix=' '))
elif opt.list_models == 'loras':
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
elif opt.list_models == "loras":
print("Installed LoRA/LyCORIS Models:")
cnm = model_manager.list_lora_models()
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]),prefix=' '))
elif opt.list_models == 'tis':
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
elif opt.list_models == "tis":
print("Installed Textual Inversion Embeddings:")
cnm = model_manager.list_ti_models()
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]),prefix=' '))
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
else:
return False
return True
# --------------------------------------------------------
def select_and_download_models(opt: Namespace):
precision = (
"float32"
if opt.full_precision
else choose_precision(torch.device(choose_torch_device()))
)
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
config.precision = precision
helper = lambda x: ask_user_for_prediction_type(x)
# if do_listings(opt):
# pass
installer = ModelInstall(config, prediction_type_helper=helper)
if opt.list_models:
installer.list_models(opt.list_models)
elif opt.add or opt.delete:
selections = InstallSelections(
install_models = opt.add or [],
remove_models = opt.delete or []
)
selections = InstallSelections(install_models=opt.add or [], remove_models=opt.delete or [])
installer.install(selections)
elif opt.default_only:
selections = InstallSelections(
install_models = installer.default_model()
)
selections = InstallSelections(install_models=installer.default_model())
installer.install(selections)
elif opt.yes_to_all:
selections = InstallSelections(
install_models = installer.recommended_models()
)
selections = InstallSelections(install_models=installer.recommended_models())
installer.install(selections)
# this is where the TUI is called
@@ -706,15 +713,15 @@ def select_and_download_models(opt: Namespace):
try:
installApp.run()
except KeyboardInterrupt as e:
if hasattr(installApp,'main_form'):
if installApp.main_form.subprocess \
and installApp.main_form.subprocess.is_alive():
logger.info('Terminating subprocesses')
if hasattr(installApp, "main_form"):
if installApp.main_form.subprocess and installApp.main_form.subprocess.is_alive():
logger.info("Terminating subprocesses")
installApp.main_form.subprocess.terminate()
installApp.main_form.subprocess = None
raise e
process_and_execute(opt, installApp.install_selections)
# -------------------------------------
def main():
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
@@ -769,19 +776,17 @@ def main():
help="path to root of install directory",
)
opt = parser.parse_args()
invoke_args = []
if opt.root:
invoke_args.extend(['--root',opt.root])
invoke_args.extend(["--root", opt.root])
if opt.full_precision:
invoke_args.extend(['--precision','float32'])
invoke_args.extend(["--precision", "float32"])
config.parse_args(invoke_args)
logger = InvokeAILogger().getLogger(config=config)
if not config.model_conf_path.exists():
logger.info(
"Your InvokeAI root directory is not set up. Calling invokeai-configure."
)
logger.info("Your InvokeAI root directory is not set up. Calling invokeai-configure.")
from invokeai.frontend.install import invokeai_configure
invokeai_configure()
@@ -799,20 +804,18 @@ def main():
logger.info("Goodbye! Come back soon.")
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
logger.error(
"Insufficient vertical space for the interface. Please make your window taller and try again"
)
input('Press any key to continue...')
logger.error("Insufficient vertical space for the interface. Please make your window taller and try again")
input("Press any key to continue...")
except Exception as e:
if str(e).startswith("addwstr"):
logger.error(
"Insufficient horizontal space for the interface. Please make your window wider and try again."
)
else:
print(f'An exception has occurred: {str(e)} Details:')
print(f"An exception has occurred: {str(e)} Details:")
print(traceback.format_exc(), file=sys.stderr)
input('Press any key to continue...')
input("Press any key to continue...")
# -------------------------------------
if __name__ == "__main__":

View File

@@ -14,40 +14,42 @@ import textwrap
import npyscreen.wgmultiline as wgmultiline
from npyscreen import fmPopup
from shutil import get_terminal_size
from curses import BUTTON2_CLICKED,BUTTON3_CLICKED
from curses import BUTTON2_CLICKED, BUTTON3_CLICKED
# minimum size for UIs
MIN_COLS = 136
MIN_LINES = 45
MIN_COLS = 130
MIN_LINES = 38
# -------------------------------------
def set_terminal_size(columns: int, lines: int):
ts = get_terminal_size()
width = max(columns,ts.columns)
height = max(lines,ts.lines)
width = max(columns, ts.columns)
height = max(lines, ts.lines)
OS = platform.uname().system
if OS == "Windows":
pass
# not working reliably - ask user to adjust the window
#_set_terminal_size_powershell(width,height)
# _set_terminal_size_powershell(width,height)
elif OS in ["Darwin", "Linux"]:
_set_terminal_size_unix(width,height)
_set_terminal_size_unix(width, height)
# check whether it worked....
ts = get_terminal_size()
pause = False
if ts.columns < columns:
print('\033[1mThis window is too narrow for the user interface. Please make it wider.\033[0m')
print("\033[1mThis window is too narrow for the user interface.\033[0m")
pause = True
if ts.lines < lines:
print('\033[1mThis window is too short for the user interface. Please make it taller.\033[0m')
print("\033[1mThis window is too short for the user interface.\033[0m")
pause = True
if pause:
input('Press any key to continue..')
input("Maximize the window then press any key to continue..")
def _set_terminal_size_powershell(width: int, height: int):
script=f'''
script = f"""
$pshost = get-host
$pswindow = $pshost.ui.rawui
$newsize = $pswindow.buffersize
@@ -58,8 +60,9 @@ $newsize = $pswindow.windowsize
$newsize.height = {height}
$newsize.width = {width}
$pswindow.windowsize = $newsize
'''
subprocess.run(["powershell","-Command","-"],input=script,text=True)
"""
subprocess.run(["powershell", "-Command", "-"], input=script, text=True)
def _set_terminal_size_unix(width: int, height: int):
import fcntl
@@ -67,15 +70,16 @@ def _set_terminal_size_unix(width: int, height: int):
# These terminals accept the size command and report that the
# size changed, but they lie!!!
for bad_terminal in ['TERMINATOR_UUID', 'ALACRITTY_WINDOW_ID']:
for bad_terminal in ["TERMINATOR_UUID", "ALACRITTY_WINDOW_ID"]:
if os.environ.get(bad_terminal):
return
winsize = struct.pack("HHHH", height, width, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
sys.stdout.write("\x1b[8;{height};{width}t".format(height=height, width=width))
sys.stdout.flush()
def set_min_terminal_size(min_cols: int, min_lines: int):
# make sure there's enough room for the ui
term_cols, term_lines = get_terminal_size()
@@ -85,6 +89,7 @@ def set_min_terminal_size(min_cols: int, min_lines: int):
lines = max(term_lines, min_lines)
set_terminal_size(cols, lines)
class IntSlider(npyscreen.Slider):
def translate_value(self):
stri = "%2d / %2d" % (self.value, self.out_of)
@@ -92,23 +97,25 @@ class IntSlider(npyscreen.Slider):
stri = stri.rjust(l)
return stri
# -------------------------------------
# fix npyscreen form so that cursor wraps both forward and backward
class CyclingForm(object):
def find_previous_editable(self, *args):
done = False
n = self.editw-1
n = self.editw - 1
while not done:
if self._widgets__[n].editable and not self._widgets__[n].hidden:
self.editw = n
done = True
n -= 1
if n<0:
if n < 0:
if self.cycle_widgets:
n = len(self._widgets__)-1
n = len(self._widgets__) - 1
else:
done = True
# -------------------------------------
class CenteredTitleText(npyscreen.TitleText):
def __init__(self, *args, **keywords):
@@ -159,7 +166,8 @@ class FloatSlider(npyscreen.Slider):
class FloatTitleSlider(npyscreen.TitleText):
_entry_type = FloatSlider
class SelectColumnBase():
class SelectColumnBase:
def make_contained_widgets(self):
self._my_widgets = []
column_width = self.width // self.columns
@@ -217,11 +225,12 @@ class SelectColumnBase():
row_no = rel_y // self._contained_widget_height
self.cursor_line = column_no * column_height + row_no
if bstate & curses.BUTTON1_DOUBLE_CLICKED:
if hasattr(self,'on_mouse_double_click'):
if hasattr(self, "on_mouse_double_click"):
self.on_mouse_double_click(self.cursor_line)
self.display()
class MultiSelectColumns( SelectColumnBase, npyscreen.MultiSelect):
class MultiSelectColumns(SelectColumnBase, npyscreen.MultiSelect):
def __init__(self, screen, columns: int = 1, values: list = [], **keywords):
self.columns = columns
self.value_cnt = len(values)
@@ -231,15 +240,17 @@ class MultiSelectColumns( SelectColumnBase, npyscreen.MultiSelect):
def on_mouse_double_click(self, cursor_line):
self.h_select_toggle(cursor_line)
class SingleSelectWithChanged(npyscreen.SelectOne):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
def h_select(self,ch):
class SingleSelectWithChanged(npyscreen.SelectOne):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def h_select(self, ch):
super().h_select(ch)
if self.on_changed:
self.on_changed(self.value)
class SingleSelectColumns(SelectColumnBase, SingleSelectWithChanged):
def __init__(self, screen, columns: int = 1, values: list = [], **keywords):
self.columns = columns
@@ -254,23 +265,25 @@ class SingleSelectColumns(SelectColumnBase, SingleSelectWithChanged):
def when_cursor_moved(self):
self.h_select(self.cursor_line)
def h_cursor_line_right(self,ch):
self.h_exit_down('bye bye')
def h_cursor_line_right(self, ch):
self.h_exit_down("bye bye")
class TextBoxInner(npyscreen.MultiLineEdit):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.yank = None
self.handlers.update({
"^A": self.h_cursor_to_start,
"^E": self.h_cursor_to_end,
"^K": self.h_kill,
"^F": self.h_cursor_right,
"^B": self.h_cursor_left,
"^Y": self.h_yank,
"^V": self.h_paste,
})
self.handlers.update(
{
"^A": self.h_cursor_to_start,
"^E": self.h_cursor_to_end,
"^K": self.h_kill,
"^F": self.h_cursor_right,
"^B": self.h_cursor_left,
"^Y": self.h_yank,
"^V": self.h_paste,
}
)
def h_cursor_to_start(self, input):
self.cursor_position = 0
@@ -279,27 +292,27 @@ class TextBoxInner(npyscreen.MultiLineEdit):
self.cursor_position = len(self.value)
def h_kill(self, input):
self.yank = self.value[self.cursor_position:]
self.value = self.value[:self.cursor_position]
self.yank = self.value[self.cursor_position :]
self.value = self.value[: self.cursor_position]
def h_yank(self, input):
if self.yank:
self.paste(self.yank)
def paste(self, text: str):
self.value = self.value[:self.cursor_position] + text + self.value[self.cursor_position:]
self.value = self.value[: self.cursor_position] + text + self.value[self.cursor_position :]
self.cursor_position += len(text)
def h_paste(self, input: int=0):
def h_paste(self, input: int = 0):
try:
text = pyperclip.paste()
except ModuleNotFoundError:
text = "To paste with the mouse on Linux, please install the 'xclip' program."
self.paste(text)
def handle_mouse_event(self, mouse_event):
mouse_id, rel_x, rel_y, z, bstate = self.interpret_mouse_event(mouse_event)
if bstate & (BUTTON2_CLICKED|BUTTON3_CLICKED):
if bstate & (BUTTON2_CLICKED | BUTTON3_CLICKED):
self.h_paste()
# def update(self, clear=True):
@@ -320,77 +333,87 @@ class TextBoxInner(npyscreen.MultiLineEdit):
# self.rely, self.relx + WIDTH, curses.ACS_VLINE, HEIGHT
# )
# # draw corners
# self.parent.curses_pad.addch(
# self.rely,
# self.relx,
# curses.ACS_ULCORNER,
# )
# self.parent.curses_pad.addch(
# self.rely,
# self.relx + WIDTH,
# curses.ACS_URCORNER,
# )
# self.parent.curses_pad.addch(
# self.rely + HEIGHT,
# self.relx,
# curses.ACS_LLCORNER,
# )
# self.parent.curses_pad.addch(
# self.rely + HEIGHT,
# self.relx + WIDTH,
# curses.ACS_LRCORNER,
# )
# # draw corners
# self.parent.curses_pad.addch(
# self.rely,
# self.relx,
# curses.ACS_ULCORNER,
# )
# self.parent.curses_pad.addch(
# self.rely,
# self.relx + WIDTH,
# curses.ACS_URCORNER,
# )
# self.parent.curses_pad.addch(
# self.rely + HEIGHT,
# self.relx,
# curses.ACS_LLCORNER,
# )
# self.parent.curses_pad.addch(
# self.rely + HEIGHT,
# self.relx + WIDTH,
# curses.ACS_LRCORNER,
# )
# # fool our superclass into thinking drawing area is smaller - this is really hacky but it seems to work
# (relx, rely, height, width) = (self.relx, self.rely, self.height, self.width)
# self.relx += 1
# self.rely += 1
# self.height -= 1
# self.width -= 1
# super().update(clear=False)
# (self.relx, self.rely, self.height, self.width) = (relx, rely, height, width)
# # fool our superclass into thinking drawing area is smaller - this is really hacky but it seems to work
# (relx, rely, height, width) = (self.relx, self.rely, self.height, self.width)
# self.relx += 1
# self.rely += 1
# self.height -= 1
# self.width -= 1
# super().update(clear=False)
# (self.relx, self.rely, self.height, self.width) = (relx, rely, height, width)
class TextBox(npyscreen.BoxTitle):
_contained_widget = TextBoxInner
class BufferBox(npyscreen.BoxTitle):
_contained_widget = npyscreen.BufferPager
class ConfirmCancelPopup(fmPopup.ActionPopup):
DEFAULT_COLUMNS = 100
def on_ok(self):
self.value = True
def on_cancel(self):
self.value = False
class FileBox(npyscreen.BoxTitle):
_contained_widget = npyscreen.Filename
class PrettyTextBox(npyscreen.BoxTitle):
_contained_widget = TextBox
def _wrap_message_lines(message, line_length):
lines = []
for line in message.split('\n'):
for line in message.split("\n"):
lines.extend(textwrap.wrap(line.rstrip(), line_length))
return lines
def _prepare_message(message):
if isinstance(message, list) or isinstance(message, tuple):
return "\n".join([ s.rstrip() for s in message])
#return "\n".join(message)
return "\n".join([s.rstrip() for s in message])
# return "\n".join(message)
else:
return message
def select_stable_diffusion_config_file(
form_color: str='DANGER',
wrap:bool =True,
model_name:str='Unknown',
form_color: str = "DANGER",
wrap: bool = True,
model_name: str = "Unknown",
):
message = f"Please select the correct base model for the V2 checkpoint named '{model_name}'. Press <CANCEL> to skip installation."
title = "CONFIG FILE SELECTION"
options=[
options = [
"An SD v2.x base model (512 pixels; no 'parameterization:' line in its yaml file)",
"An SD v2.x v-predictive model (768 pixels; 'parameterization: \"v\"' line in its yaml file)",
"Skip installation for now and come back later",
@@ -403,22 +426,22 @@ def select_stable_diffusion_config_file(
lines=16,
)
F.preserve_selected_widget = True
mlw = F.add(
wgmultiline.Pager,
max_height=4,
editable=False,
)
mlw_width = mlw.width-1
mlw_width = mlw.width - 1
if wrap:
message = _wrap_message_lines(message, mlw_width)
mlw.values = message
choice = F.add(
npyscreen.SelectOne,
values = options,
value = [0],
max_height = len(options)+1,
values=options,
value=[0],
max_height=len(options) + 1,
scroll_exit=True,
)
@@ -426,6 +449,6 @@ def select_stable_diffusion_config_file(
F.edit()
if not F.value:
return None
assert choice.value[0] in range(0,3),'invalid choice'
choices = ['epsilon','v','abort']
assert choice.value[0] in range(0, 3), "invalid choice"
choices = ["epsilon", "v", "abort"]
return choices[choice.value[0]]

View File

@@ -2,18 +2,22 @@ import os
import sys
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--web', action='store_true')
opts,_ = parser.parse_known_args()
parser.add_argument("--web", action="store_true")
opts, _ = parser.parse_known_args()
if opts.web:
sys.argv.pop(sys.argv.index('--web'))
sys.argv.pop(sys.argv.index("--web"))
from invokeai.app.api_app import invoke_api
invoke_api()
else:
from invokeai.app.cli_app import invoke_cli
invoke_cli()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -2,4 +2,3 @@
Initialization file for invokeai.frontend.merge
"""
from .merge_diffusers import main as invokeai_merge_diffusers

View File

@@ -20,13 +20,17 @@ from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import (
ModelMerger, MergeInterpolationMethod,
ModelManager, ModelType, BaseModelType,
ModelMerger,
MergeInterpolationMethod,
ModelManager,
ModelType,
BaseModelType,
)
from invokeai.frontend.install.widgets import FloatTitleSlider, TextBox, SingleSelectColumns
config = InvokeAIAppConfig.get_config()
def _parse_args() -> Namespace:
parser = argparse.ArgumentParser(description="InvokeAI model merging")
parser.add_argument(
@@ -134,14 +138,14 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
self.base_select = self.add_widget_intelligent(
SingleSelectColumns,
values=[
'Models Built on SD-1.x',
'Models Built on SD-2.x',
"Models Built on SD-1.x",
"Models Built on SD-2.x",
],
value=[self.current_base],
columns = 4,
max_height = 2,
columns=4,
max_height=2,
relx=8,
scroll_exit = True,
scroll_exit=True,
)
self.base_select.on_changed = self._populate_models
self.add_widget_intelligent(
@@ -300,15 +304,11 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
def validate_field_values(self) -> bool:
bad_fields = []
model_names = self.model_names
selected_models = set(
(model_names[self.model1.value[0]], model_names[self.model2.value[0]])
)
selected_models = set((model_names[self.model1.value[0]], model_names[self.model2.value[0]]))
if self.model3.value[0] > 0:
selected_models.add(model_names[self.model3.value[0] - 1])
if len(selected_models) < 2:
bad_fields.append(
f"Please select two or three DIFFERENT models to compare. You selected {selected_models}"
)
bad_fields.append(f"Please select two or three DIFFERENT models to compare. You selected {selected_models}")
if len(bad_fields) > 0:
message = "The following problems were detected and must be corrected:"
for problem in bad_fields:
@@ -318,7 +318,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
else:
return True
def get_model_names(self, base_model: BaseModelType=None) -> List[str]:
def get_model_names(self, base_model: BaseModelType = None) -> List[str]:
model_names = [
info["name"]
for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model)
@@ -326,20 +326,21 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
]
return sorted(model_names)
def _populate_models(self,value=None):
def _populate_models(self, value=None):
base_model = tuple(BaseModelType)[value[0]]
self.model_names = self.get_model_names(base_model)
models_plus_none = self.model_names.copy()
models_plus_none.insert(0, "None")
self.model1.values = self.model_names
self.model2.values = self.model_names
self.model3.values = models_plus_none
self.display()
class Mergeapp(npyscreen.NPSAppManaged):
def __init__(self, model_manager:ModelManager):
def __init__(self, model_manager: ModelManager):
super().__init__()
self.model_manager = model_manager
@@ -367,9 +368,7 @@ def run_cli(args: Namespace):
if not args.merged_model_name:
args.merged_model_name = "+".join(args.model_names)
logger.info(
f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
)
logger.info(f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"')
model_manager = ModelManager(config.model_conf_path)
assert (
@@ -383,7 +382,7 @@ def run_cli(args: Namespace):
def main():
args = _parse_args()
config.parse_args(['--root',str(args.root_dir)])
config.parse_args(["--root", str(args.root_dir)])
try:
if args.front_end:
@@ -392,13 +391,9 @@ def main():
run_cli(args)
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
logger.error(
"You need to have at least two diffusers models defined in models.yaml in order to merge"
)
logger.error("You need to have at least two diffusers models defined in models.yaml in order to merge")
else:
logger.error(
"Not enough room for the user interface. Try making this window larger."
)
logger.error("Not enough room for the user interface. Try making this window larger.")
sys.exit(-1)
except Exception as e:
logger.error(e)

View File

@@ -23,16 +23,14 @@ from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from ...backend.training import (
do_textual_inversion_training,
parse_args
)
from ...backend.training import do_textual_inversion_training, parse_args
TRAINING_DATA = "text-inversion-training-data"
TRAINING_DIR = "text-inversion-output"
CONF_FILE = "preferences.conf"
config = None
class textualInversionForm(npyscreen.FormMultiPageAction):
resolutions = [512, 768, 1024]
lr_schedulers = [
@@ -111,9 +109,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
npyscreen.TitleSelectOne,
name="Learnable property:",
values=self.learnable_properties,
value=self.learnable_properties.index(
saved_args.get("learnable_property", "object")
),
value=self.learnable_properties.index(saved_args.get("learnable_property", "object")),
max_height=4,
scroll_exit=True,
)
@@ -243,9 +239,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
def initializer_changed(self):
placeholder = self.placeholder_token.value
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
self.train_data_dir.value = str(
config.root_dir / TRAINING_DATA / placeholder
)
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
self.output_dir.value = str(config.root_dir / TRAINING_DIR / placeholder)
self.resume_from_checkpoint.value = Path(self.output_dir.value).exists()
@@ -254,9 +248,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
self.parentApp.setNextForm(None)
self.editing = False
self.parentApp.ti_arguments = self.marshall_arguments()
npyscreen.notify(
"Launching textual inversion training. This will take a while..."
)
npyscreen.notify("Launching textual inversion training. This will take a while...")
else:
self.editing = True
@@ -266,13 +258,9 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
def validate_field_values(self) -> bool:
bad_fields = []
if self.model.value is None:
bad_fields.append(
"Model Name must correspond to a known model in models.yaml"
)
bad_fields.append("Model Name must correspond to a known model in models.yaml")
if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value):
bad_fields.append(
"Trigger term must only contain alphanumeric characters, the dot and hyphen"
)
bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen")
if self.train_data_dir.value is None:
bad_fields.append("Data Training Directory cannot be empty")
if self.output_dir.value is None:
@@ -288,16 +276,8 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
def get_model_names(self) -> Tuple[List[str], int]:
conf = OmegaConf.load(config.root_dir / "configs/models.yaml")
model_names = [
idx
for idx in sorted(list(conf.keys()))
if conf[idx].get("format", None) == "diffusers"
]
defaults = [
idx
for idx in range(len(model_names))
if "default" in conf[model_names[idx]]
]
model_names = [idx for idx in sorted(list(conf.keys())) if conf[idx].get("format", None) == "diffusers"]
defaults = [idx for idx in range(len(model_names)) if "default" in conf[model_names[idx]]]
default = defaults[0] if len(defaults) > 0 else 0
return (model_names, default)
@@ -310,9 +290,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
resolution=self.resolutions[self.resolution.value[0]],
lr_scheduler=self.lr_schedulers[self.lr_scheduler.value[0]],
mixed_precision=self.precisions[self.mixed_precision.value[0]],
learnable_property=self.learnable_properties[
self.learnable_property.value[0]
],
learnable_property=self.learnable_properties[self.learnable_property.value[0]],
)
# all the strings and booleans
@@ -374,9 +352,7 @@ def copy_to_embeddings_folder(args: dict):
os.makedirs(destination, exist_ok=True)
logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
shutil.copy(source, destination)
if (
input("Delete training logs and intermediate checkpoints? [y] ") or "y"
).startswith(("y", "Y")):
if (input("Delete training logs and intermediate checkpoints? [y] ") or "y").startswith(("y", "Y")):
shutil.rmtree(Path(args["output_dir"]))
else:
logger.info(f'Keeping {args["output_dir"]}')
@@ -423,7 +399,7 @@ def do_front_end(args: Namespace):
save_args(args)
try:
do_textual_inversion_training(InvokeAIAppConfig.get_config(),**args)
do_textual_inversion_training(InvokeAIAppConfig.get_config(), **args)
copy_to_embeddings_folder(args)
except Exception as e:
logger.error("An exception occurred during training. The exception was:")
@@ -434,19 +410,19 @@ def do_front_end(args: Namespace):
def main():
global config
args = parse_args()
config = InvokeAIAppConfig.get_config()
# change root if needed
if args.root_dir:
config.root = args.root_dir
try:
if args.front_end:
do_front_end(args)
else:
do_textual_inversion_training(config,**vars(args))
do_textual_inversion_training(config, **vars(args))
except AssertionError as e:
logger.error(e)
sys.exit(-1)
@@ -454,13 +430,9 @@ def main():
pass
except (widget.NotEnoughSpaceForWidget, Exception) as e:
if str(e).startswith("Height of 1 allocated"):
logger.error(
"You need to have at least one diffusers models defined in models.yaml in order to train"
)
logger.error("You need to have at least one diffusers models defined in models.yaml in order to train")
elif str(e).startswith("addwstr"):
logger.error(
"Not enough window space for the interface. Please make your window larger and try again."
)
logger.error("Not enough window space for the interface. Please make your window larger and try again.")
else:
logger.error(e)
sys.exit(-1)

View File

@@ -1,3 +1,3 @@
'''
"""
Initialization file for invokeai.frontend.web
'''
"""

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -12,7 +12,7 @@
margin: 0;
}
</style>
<script type="module" crossorigin src="./assets/index-e2437518.js"></script>
<script type="module" crossorigin src="./assets/index-bad7ff83.js"></script>
</head>
<body dir="ltr">

View File

@@ -23,6 +23,7 @@
"menu": "Menu"
},
"common": {
"communityLabel": "Community",
"hotkeysLabel": "Hotkeys",
"darkMode": "Dark Mode",
"lightMode": "Light Mode",
@@ -102,8 +103,7 @@
"openInNewTab": "Open in New Tab",
"dontAskMeAgain": "Don't ask me again",
"areYouSure": "Are you sure?",
"imagePrompt": "Image Prompt",
"clearNodes": "Are you sure you want to clear all nodes?"
"imagePrompt": "Image Prompt"
},
"gallery": {
"generations": "Generations",
@@ -340,6 +340,7 @@
"allModels": "All Models",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"loraModels": "LoRAs",
"safetensorModels": "SafeTensors",
"modelAdded": "Model Added",
"modelUpdated": "Model Updated",
@@ -615,6 +616,11 @@
"initialImageNotSetDesc": "Could not load initial image",
"nodesSaved": "Nodes Saved",
"nodesLoaded": "Nodes Loaded",
"nodesNotValidGraph": "Not a valid InvokeAI Node Graph",
"nodesNotValidJSON": "Not a valid JSON",
"nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
"nodesUnrecognizedTypes": "Cannot load. Graph has unrecognized types",
"nodesBrokenConnections": "Cannot load. Some connections are broken.",
"nodesLoadedFailed": "Failed To Load Nodes",
"nodesCleared": "Nodes Cleared"
},
@@ -700,9 +706,10 @@
},
"nodes": {
"reloadSchema": "Reload Schema",
"saveNodes": "Save Nodes",
"loadNodes": "Load Nodes",
"clearNodes": "Clear Nodes",
"saveGraph": "Save Graph",
"loadGraph": "Load Graph (saved from Node Editor) (Do not copy-paste metadata)",
"clearGraph": "Clear Graph",
"clearGraphDesc": "Are you sure you want to clear all nodes?",
"zoomInNodes": "Zoom In",
"zoomOutNodes": "Zoom Out",
"fitViewportNodes": "Fit View",

View File

@@ -53,11 +53,11 @@
]
},
"dependencies": {
"@chakra-ui/anatomy": "^2.1.1",
"@chakra-ui/anatomy": "^2.2.0",
"@chakra-ui/icons": "^2.0.19",
"@chakra-ui/react": "^2.7.1",
"@chakra-ui/react": "^2.8.0",
"@chakra-ui/styled-system": "^2.9.1",
"@chakra-ui/theme-tools": "^2.0.18",
"@chakra-ui/theme-tools": "^2.1.0",
"@dagrejs/graphlib": "^2.1.13",
"@dnd-kit/core": "^6.0.8",
"@dnd-kit/modifiers": "^6.0.1",

View File

@@ -23,6 +23,7 @@
"menu": "Menu"
},
"common": {
"communityLabel": "Community",
"hotkeysLabel": "Hotkeys",
"darkMode": "Dark Mode",
"lightMode": "Light Mode",
@@ -339,6 +340,7 @@
"allModels": "All Models",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"loraModels": "LoRAs",
"safetensorModels": "SafeTensors",
"modelAdded": "Model Added",
"modelUpdated": "Model Updated",

View File

@@ -1,4 +1,8 @@
import { setInfillMethod } from 'features/parameters/store/generationSlice';
import {
shouldUseNSFWCheckerChanged,
shouldUseWatermarkerChanged,
} from 'features/system/store/systemSlice';
import { appInfoApi } from 'services/api/endpoints/appInfo';
import { startAppListening } from '..';
@@ -6,12 +10,24 @@ export const addAppConfigReceivedListener = () => {
startAppListening({
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
effect: async (action, { getState, dispatch }) => {
const { infill_methods } = action.payload;
const {
infill_methods = [],
nsfw_methods = [],
watermarking_methods = [],
} = action.payload;
const infillMethod = getState().generation.infillMethod;
if (!infill_methods.includes(infillMethod)) {
dispatch(setInfillMethod(infill_methods[0]));
}
if (!nsfw_methods.includes('nsfw_checker')) {
dispatch(shouldUseNSFWCheckerChanged(false));
}
if (!watermarking_methods.includes('invisible_watermark')) {
dispatch(shouldUseWatermarkerChanged(false));
}
},
});
};

View File

@@ -1,13 +1,10 @@
import { logger } from 'app/logging/logger';
import { LIST_TAG } from 'services/api';
import { appInfoApi } from 'services/api/endpoints/appInfo';
import { modelsApi } from 'services/api/endpoints/models';
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
import { appSocketConnected, socketConnected } from 'services/events/actions';
import { startAppListening } from '../..';
import {
ALL_BASE_MODELS,
NON_REFINER_BASE_MODELS,
REFINER_BASE_MODELS,
} from 'services/api/constants';
export const addSocketConnectedEventListener = () => {
startAppListening({
@@ -29,15 +26,18 @@ export const addSocketConnectedEventListener = () => {
dispatch(appSocketConnected(action.payload));
// update all server state
dispatch(modelsApi.endpoints.getMainModels.initiate(REFINER_BASE_MODELS));
dispatch(
modelsApi.endpoints.getMainModels.initiate(NON_REFINER_BASE_MODELS)
modelsApi.util.invalidateTags([
{ type: 'MainModel', id: LIST_TAG },
{ type: 'SDXLRefinerModel', id: LIST_TAG },
{ type: 'LoRAModel', id: LIST_TAG },
{ type: 'ControlNetModel', id: LIST_TAG },
{ type: 'VaeModel', id: LIST_TAG },
{ type: 'TextualInversionModel', id: LIST_TAG },
{ type: 'ScannedModels', id: LIST_TAG },
])
);
dispatch(modelsApi.endpoints.getMainModels.initiate(ALL_BASE_MODELS));
dispatch(modelsApi.endpoints.getControlNetModels.initiate());
dispatch(modelsApi.endpoints.getLoRAModels.initiate());
dispatch(modelsApi.endpoints.getTextualInversionModels.initiate());
dispatch(modelsApi.endpoints.getVaeModels.initiate());
dispatch(appInfoApi.util.invalidateTags(['AppConfig', 'AppVersion']));
},
});
};

View File

@@ -114,6 +114,11 @@ const IAISlider = (props: IAIFullSliderProps) => {
setLocalInputValue(value);
}, [value]);
const numberInputMin = useMemo(
() => (sliderNumberInputProps?.min ? sliderNumberInputProps.min : min),
[min, sliderNumberInputProps?.min]
);
const numberInputMax = useMemo(
() => (sliderNumberInputProps?.max ? sliderNumberInputProps.max : max),
[max, sliderNumberInputProps?.max]
@@ -129,24 +134,23 @@ const IAISlider = (props: IAIFullSliderProps) => {
const handleInputBlur = useCallback(
(e: FocusEvent<HTMLInputElement>) => {
if (e.target.value === '') {
e.target.value = String(min);
e.target.value = String(numberInputMin);
}
const clamped = clamp(
isInteger
? Math.floor(Number(e.target.value))
: Number(localInputValue),
min,
numberInputMin,
numberInputMax
);
const quantized = roundDownToMultiple(clamped, step);
onChange(quantized);
setLocalInputValue(quantized);
},
[isInteger, localInputValue, min, numberInputMax, onChange, step]
[isInteger, localInputValue, numberInputMin, numberInputMax, onChange, step]
);
const handleInputChange = useCallback((v: number | string) => {
console.log('input');
setLocalInputValue(v);
}, []);
@@ -310,7 +314,7 @@ const IAISlider = (props: IAIFullSliderProps) => {
{withInput && (
<NumberInput
min={min}
min={numberInputMin}
max={numberInputMax}
step={step}
value={localInputValue}

View File

@@ -5,8 +5,8 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
// import { validateSeedWeights } from 'common/util/seedWeightPairs';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { forEach } from 'lodash-es';
import { NON_REFINER_BASE_MODELS } from 'services/api/constants';
import { modelsApi } from '../../services/api/endpoints/models';
import { ALL_BASE_MODELS } from 'services/api/constants';
const readinessSelector = createSelector(
[stateSelector, activeTabNameSelector],
@@ -25,7 +25,7 @@ const readinessSelector = createSelector(
}
const { isSuccess: mainModelsSuccessfullyLoaded } =
modelsApi.endpoints.getMainModels.select(ALL_BASE_MODELS)(state);
modelsApi.endpoints.getMainModels.select(NON_REFINER_BASE_MODELS)(state);
if (!mainModelsSuccessfullyLoaded) {
isReady = false;
reasonsWhyNotReady.push('Models are not loaded');

View File

@@ -57,6 +57,11 @@ const ParamEmbeddingPopover = (props: Props) => {
});
});
// Sort Alphabetically
data.sort((a, b) =>
a.label && b.label ? (a.label?.localeCompare(b.label) ? -1 : 1) : -1
);
return data.sort((a, b) => (a.disabled && !b.disabled ? 1 : -1));
}, [embeddingQueryData, currentMainModel?.base_model]);

View File

@@ -1,5 +1,6 @@
import { Link, MenuItem } from '@chakra-ui/react';
import { MenuItem } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { skipToken } from '@reduxjs/toolkit/dist/query';
import { useAppToaster } from 'app/components/Toaster';
import { stateSelector } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
@@ -33,10 +34,9 @@ import {
useRemoveImageFromBoardMutation,
} from 'services/api/endpoints/images';
import { ImageDTO } from 'services/api/types';
import { useDebounce } from 'use-debounce';
import { AddImageToBoardContext } from '../../../../app/contexts/AddImageToBoardContext';
import { sentImageToCanvas, sentImageToImg2Img } from '../../store/actions';
import { useDebounce } from 'use-debounce';
import { skipToken } from '@reduxjs/toolkit/dist/query';
type SingleSelectionMenuItemsProps = {
imageDTO: ImageDTO;
@@ -154,21 +154,29 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
return (
<>
<Link href={imageDTO.image_url} target="_blank">
<MenuItem icon={<FaExternalLinkAlt />}>
{t('common.openInNewTab')}
</MenuItem>
</Link>
<MenuItem
as="a"
href={imageDTO.image_url}
target="_blank"
icon={<FaExternalLinkAlt />}
>
{t('common.openInNewTab')}
</MenuItem>
{isClipboardAPIAvailable && (
<MenuItem icon={<FaCopy />} onClickCapture={handleCopyImage}>
{t('parameters.copyImage')}
</MenuItem>
)}
<Link download={true} href={imageDTO.image_url} target="_blank">
<MenuItem icon={<FaDownload />} w="100%">
{t('parameters.downloadImage')}
</MenuItem>
</Link>
<MenuItem
as="a"
download={true}
href={imageDTO.image_url}
target="_blank"
icon={<FaDownload />}
w="100%"
>
{t('parameters.downloadImage')}
</MenuItem>
<MenuItem
icon={<FaQuoteRight />}
onClickCapture={handleRecallPrompt}

View File

@@ -48,6 +48,7 @@ const ParamLora = (props: Props) => {
handleReset={handleReset}
withSliderMarks
sliderMarks={[-1, 0, 1, 2]}
sliderNumberInputProps={{ min: -50, max: 50 }}
/>
<IAIIconButton
size="sm"

View File

@@ -54,7 +54,12 @@ const ParamLoRASelect = () => {
});
});
return data.sort((a, b) => (a.disabled && !b.disabled ? 1 : -1));
// Sort Alphabetically
data.sort((a, b) =>
a.label && b.label ? (a.label?.localeCompare(b.label) ? 1 : -1) : -1
);
return data.sort((a, b) => (a.disabled && !b.disabled ? -1 : 1));
}, [loras, loraModels, currentMainModel?.base_model]);
const handleChange = useCallback(

View File

@@ -0,0 +1,70 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import {
ImageNSFWBlurInvocation,
LatentsToImageInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import {
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NSFW_CHECKER,
} from './constants';
export const addNSFWCheckerToGraph = (
state: RootState,
graph: NonNullableGraph,
nodeIdToAddTo = LATENTS_TO_IMAGE
): void => {
const activeTabName = activeTabNameSelector(state);
const is_intermediate =
activeTabName === 'unifiedCanvas' ? !state.canvas.shouldAutoSave : false;
const nodeToAddTo = graph.nodes[nodeIdToAddTo] as
| LatentsToImageInvocation
| undefined;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (!nodeToAddTo) {
// something has gone terribly awry
return;
}
nodeToAddTo.is_intermediate = true;
const nsfwCheckerNode: ImageNSFWBlurInvocation = {
id: NSFW_CHECKER,
type: 'img_nsfw',
is_intermediate,
};
graph.nodes[NSFW_CHECKER] = nsfwCheckerNode;
graph.edges.push({
source: {
node_id: nodeIdToAddTo,
field: 'image',
},
destination: {
node_id: NSFW_CHECKER,
field: 'image',
},
});
if (metadataAccumulator) {
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: NSFW_CHECKER,
field: 'metadata',
},
});
}
};

View File

@@ -0,0 +1,95 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import {
ImageNSFWBlurInvocation,
ImageWatermarkInvocation,
LatentsToImageInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import {
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NSFW_CHECKER,
WATERMARKER,
} from './constants';
export const addWatermarkerToGraph = (
state: RootState,
graph: NonNullableGraph,
nodeIdToAddTo = LATENTS_TO_IMAGE
): void => {
const activeTabName = activeTabNameSelector(state);
const is_intermediate =
activeTabName === 'unifiedCanvas' ? !state.canvas.shouldAutoSave : false;
const nodeToAddTo = graph.nodes[nodeIdToAddTo] as
| LatentsToImageInvocation
| undefined;
const nsfwCheckerNode = graph.nodes[NSFW_CHECKER] as
| ImageNSFWBlurInvocation
| undefined;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (!nodeToAddTo) {
// something has gone terribly awry
return;
}
const watermarkerNode: ImageWatermarkInvocation = {
id: WATERMARKER,
type: 'img_watermark',
is_intermediate,
};
graph.nodes[WATERMARKER] = watermarkerNode;
// no matter the situation, we want the l2i node to be intermediate
nodeToAddTo.is_intermediate = true;
if (nsfwCheckerNode) {
// if we are using NSFW checker, we need to "disable" it output by marking it intermediate,
// then connect it to the watermark node
nsfwCheckerNode.is_intermediate = true;
graph.edges.push({
source: {
node_id: NSFW_CHECKER,
field: 'image',
},
destination: {
node_id: WATERMARKER,
field: 'image',
},
});
} else {
// otherwise we just connect to the watermark node
graph.edges.push({
source: {
node_id: nodeIdToAddTo,
field: 'image',
},
destination: {
node_id: WATERMARKER,
field: 'image',
},
});
}
if (metadataAccumulator) {
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: WATERMARKER,
field: 'metadata',
},
});
}
};

View File

@@ -10,7 +10,9 @@ import {
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CLIP_SKIP,
IMAGE_TO_IMAGE_GRAPH,
@@ -103,11 +105,6 @@ export const buildCanvasImageToImageGraph = (
is_intermediate: true,
skipped_layers: clipSkip,
},
[LATENTS_TO_IMAGE]: {
is_intermediate: !shouldAutoSave,
type: 'l2i',
id: LATENTS_TO_IMAGE,
},
[LATENTS_TO_LATENTS]: {
type: 'l2l',
id: LATENTS_TO_LATENTS,
@@ -126,6 +123,11 @@ export const buildCanvasImageToImageGraph = (
// image_name: initialImage.image_name,
// },
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate: !shouldAutoSave,
},
},
edges: [
{
@@ -333,5 +335,16 @@ export const buildCanvasImageToImageGraph = (
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, LATENTS_TO_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph);
}
return graph;
};

View File

@@ -8,7 +8,9 @@ import {
RangeOfSizeInvocation,
} from 'services/api/types';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CLIP_SKIP,
INPAINT,
@@ -249,5 +251,16 @@ export const buildCanvasInpaintGraph = (
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, INPAINT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, INPAINT);
}
return graph;
};

View File

@@ -5,7 +5,9 @@ import { initialGenerationState } from 'features/parameters/store/generationSlic
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CLIP_SKIP,
LATENTS_TO_IMAGE,
@@ -238,5 +240,16 @@ export const buildCanvasTextToImageGraph = (
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, TEXT_TO_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph);
}
return graph;
};

View File

@@ -9,7 +9,9 @@ import {
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CLIP_SKIP,
IMAGE_TO_IMAGE_GRAPH,
@@ -297,42 +299,6 @@ export const buildLinearImageToImageGraph = (
});
}
// TODO: add batch functionality
// if (isBatchEnabled && asInitialImage && batchImageNames.length > 0) {
// // we are going to connect an iterate up to the init image
// delete (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image;
// const imageCollection: ImageCollectionInvocation = {
// id: IMAGE_COLLECTION,
// type: 'image_collection',
// images: batchImageNames.map((image_name) => ({ image_name })),
// };
// const imageCollectionIterate: IterateInvocation = {
// id: IMAGE_COLLECTION_ITERATE,
// type: 'iterate',
// };
// graph.nodes[IMAGE_COLLECTION] = imageCollection;
// graph.nodes[IMAGE_COLLECTION_ITERATE] = imageCollectionIterate;
// graph.edges.push({
// source: { node_id: IMAGE_COLLECTION, field: 'collection' },
// destination: {
// node_id: IMAGE_COLLECTION_ITERATE,
// field: 'collection',
// },
// });
// graph.edges.push({
// source: { node_id: IMAGE_COLLECTION_ITERATE, field: 'item' },
// destination: {
// node_id: IMAGE_TO_LATENTS,
// field: 'image',
// },
// });
// }
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
@@ -379,5 +345,16 @@ export const buildLinearImageToImageGraph = (
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, LATENTS_TO_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph);
}
return graph;
};

View File

@@ -7,7 +7,9 @@ import {
ImageToLatentsInvocation,
} from 'services/api/types';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
IMAGE_TO_LATENTS,
LATENTS_TO_IMAGE,
@@ -48,6 +50,7 @@ export const buildLinearSDXLImageToImageGraph = (
const {
positiveStylePrompt,
negativeStylePrompt,
shouldConcatSDXLStylePrompt,
shouldUseSDXLRefiner,
refinerStart,
sdxlImg2ImgDenoisingStrength: strength,
@@ -89,13 +92,17 @@ export const buildLinearSDXLImageToImageGraph = (
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
style: positiveStylePrompt,
style: shouldConcatSDXLStylePrompt
? `${positivePrompt} ${positiveStylePrompt}`
: positiveStylePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
style: negativeStylePrompt,
style: shouldConcatSDXLStylePrompt
? `${negativePrompt} ${negativeStylePrompt}`
: negativeStylePrompt,
},
[NOISE]: {
type: 'noise',
@@ -365,5 +372,16 @@ export const buildLinearSDXLImageToImageGraph = (
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph);
}
return graph;
};

View File

@@ -3,7 +3,9 @@ import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
@@ -37,6 +39,7 @@ export const buildLinearSDXLTextToImageGraph = (
const {
positiveStylePrompt,
negativeStylePrompt,
shouldConcatSDXLStylePrompt,
shouldUseSDXLRefiner,
refinerStart,
} = state.sdxl;
@@ -72,13 +75,17 @@ export const buildLinearSDXLTextToImageGraph = (
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
style: positiveStylePrompt,
style: shouldConcatSDXLStylePrompt
? `${positivePrompt} ${positiveStylePrompt}`
: positiveStylePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
style: negativeStylePrompt,
style: shouldConcatSDXLStylePrompt
? `${negativePrompt} ${negativeStylePrompt}`
: negativeStylePrompt,
},
[NOISE]: {
type: 'noise',
@@ -247,5 +254,16 @@ export const buildLinearSDXLTextToImageGraph = (
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph);
}
return graph;
};

View File

@@ -5,7 +5,9 @@ import { initialGenerationState } from 'features/parameters/store/generationSlic
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CLIP_SKIP,
LATENTS_TO_IMAGE,
@@ -227,5 +229,16 @@ export const buildLinearTextToImageGraph = (
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, TEXT_TO_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph);
}
return graph;
};

View File

@@ -3,6 +3,8 @@ export const POSITIVE_CONDITIONING = 'positive_conditioning';
export const NEGATIVE_CONDITIONING = 'negative_conditioning';
export const TEXT_TO_LATENTS = 'text_to_latents';
export const LATENTS_TO_IMAGE = 'latents_to_image';
export const NSFW_CHECKER = 'nsfw_checker';
export const WATERMARKER = 'invisible_watermark';
export const NOISE = 'noise';
export const RANDOM_INT = 'rand_int';
export const RANGE_OF_SIZE = 'range_of_size';

View File

@@ -7,9 +7,9 @@ import { activeTabNameSelector } from '../../../../ui/store/uiSelectors';
const aspectRatios = [
{ name: 'Free', value: null },
{ name: 'Portrait', value: 0.67 / 1 },
{ name: 'Wide', value: 16 / 9 },
{ name: 'Square', value: 1 / 1 },
{ name: '2:3', value: 2 / 3 },
{ name: '16:9', value: 16 / 9 },
{ name: '1:1', value: 1 / 1 },
];
export default function ParamAspectRatio() {

View File

@@ -148,7 +148,7 @@ const ParamPositiveConditioning = () => {
<Box
sx={{
position: 'absolute',
top: shouldPinParametersPanel ? 6 : 0,
top: shouldPinParametersPanel ? 5 : 0,
insetInlineEnd: 0,
}}
>

View File

@@ -0,0 +1,17 @@
import { Flex } from '@chakra-ui/react';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
export default function ParamPromptArea() {
return (
<Flex
sx={{
flexDirection: 'column',
gap: 2,
}}
>
<ParamPositiveConditioning />
<ParamNegativeConditioning />
</Flex>
);
}

View File

@@ -1,3 +1,5 @@
import { components } from 'services/api/schema';
export const MODEL_TYPE_MAP = {
'sd-1': 'Stable Diffusion 1.x',
'sd-2': 'Stable Diffusion 2.x',
@@ -5,6 +7,13 @@ export const MODEL_TYPE_MAP = {
'sdxl-refiner': 'Stable Diffusion XL Refiner',
};
export const MODEL_TYPE_SHORT_MAP = {
'sd-1': 'SD1',
'sd-2': 'SD2',
sdxl: 'SDXL',
'sdxl-refiner': 'SDXLR',
};
export const clipSkipMap = {
'sd-1': {
maxClip: 12,
@@ -23,3 +32,12 @@ export const clipSkipMap = {
markers: [0, 1, 2, 3, 5, 10, 15, 20, 24],
},
};
type LoRAModelFormatMap = {
[key in components['schemas']['LoRAModelFormat']]: string;
};
export const LORA_MODEL_FORMAT_MAP: LoRAModelFormatMap = {
lycoris: 'LyCORIS',
diffusers: 'Diffusers',
};

View File

@@ -0,0 +1,43 @@
import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import { FaLink } from 'react-icons/fa';
import { setShouldConcatSDXLStylePrompt } from '../store/sdxlSlice';
export default function ParamSDXLConcatButton() {
const shouldConcatSDXLStylePrompt = useAppSelector(
(state: RootState) => state.sdxl.shouldConcatSDXLStylePrompt
);
const shouldPinParametersPanel = useAppSelector(
(state: RootState) => state.ui.shouldPinParametersPanel
);
const dispatch = useAppDispatch();
const handleShouldConcatPromptChange = () => {
dispatch(setShouldConcatSDXLStylePrompt(!shouldConcatSDXLStylePrompt));
};
return (
<IAIIconButton
aria-label="Concat"
tooltip="Concatenates Basic Prompt with Style (Recommended)"
variant="outline"
isChecked={shouldConcatSDXLStylePrompt}
onClick={handleShouldConcatPromptChange}
icon={<FaLink />}
size="xs"
sx={{
position: 'absolute',
insetInlineEnd: 1,
top: shouldPinParametersPanel ? 12 : 20,
border: 'none',
color: shouldConcatSDXLStylePrompt ? 'accent.500' : 'base.500',
_hover: {
bg: 'none',
},
}}
></IAIIconButton>
);
}

View File

@@ -13,15 +13,20 @@ import { useIsReadyToInvoke } from 'common/hooks/useIsReadyToInvoke';
import AddEmbeddingButton from 'features/embedding/components/AddEmbeddingButton';
import ParamEmbeddingPopover from 'features/embedding/components/ParamEmbeddingPopover';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { AnimatePresence } from 'framer-motion';
import { isEqual } from 'lodash-es';
import { flushSync } from 'react-dom';
import { setNegativeStylePromptSDXL } from '../store/sdxlSlice';
import SDXLConcatLink from './SDXLConcatLink';
const promptInputSelector = createSelector(
[stateSelector, activeTabNameSelector],
({ sdxl }, activeTabName) => {
const { negativeStylePrompt, shouldConcatSDXLStylePrompt } = sdxl;
return {
prompt: sdxl.negativeStylePrompt,
prompt: negativeStylePrompt,
shouldConcatSDXLStylePrompt,
activeTabName,
};
},
@@ -37,11 +42,13 @@ const promptInputSelector = createSelector(
*/
const ParamSDXLNegativeStyleConditioning = () => {
const dispatch = useAppDispatch();
const { prompt, activeTabName } = useAppSelector(promptInputSelector);
const isReady = useIsReadyToInvoke();
const promptRef = useRef<HTMLTextAreaElement>(null);
const { isOpen, onClose, onOpen } = useDisclosure();
const { prompt, activeTabName, shouldConcatSDXLStylePrompt } =
useAppSelector(promptInputSelector);
const handleChangePrompt = useCallback(
(e: ChangeEvent<HTMLTextAreaElement>) => {
dispatch(setNegativeStylePromptSDXL(e.target.value));
@@ -111,6 +118,20 @@ const ParamSDXLNegativeStyleConditioning = () => {
return (
<Box position="relative">
<AnimatePresence>
{shouldConcatSDXLStylePrompt && (
<Box
sx={{
position: 'absolute',
left: '3',
w: '94%',
top: '-17px',
}}
>
<SDXLConcatLink />
</Box>
)}
</AnimatePresence>
<FormControl>
<ParamEmbeddingPopover
isOpen={isOpen}

View File

@@ -13,15 +13,20 @@ import { useIsReadyToInvoke } from 'common/hooks/useIsReadyToInvoke';
import AddEmbeddingButton from 'features/embedding/components/AddEmbeddingButton';
import ParamEmbeddingPopover from 'features/embedding/components/ParamEmbeddingPopover';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { AnimatePresence } from 'framer-motion';
import { isEqual } from 'lodash-es';
import { flushSync } from 'react-dom';
import { setPositiveStylePromptSDXL } from '../store/sdxlSlice';
import SDXLConcatLink from './SDXLConcatLink';
const promptInputSelector = createSelector(
[stateSelector, activeTabNameSelector],
({ sdxl }, activeTabName) => {
const { positiveStylePrompt, shouldConcatSDXLStylePrompt } = sdxl;
return {
prompt: sdxl.positiveStylePrompt,
prompt: positiveStylePrompt,
shouldConcatSDXLStylePrompt,
activeTabName,
};
},
@@ -37,11 +42,13 @@ const promptInputSelector = createSelector(
*/
const ParamSDXLPositiveStyleConditioning = () => {
const dispatch = useAppDispatch();
const { prompt, activeTabName } = useAppSelector(promptInputSelector);
const isReady = useIsReadyToInvoke();
const promptRef = useRef<HTMLTextAreaElement>(null);
const { isOpen, onClose, onOpen } = useDisclosure();
const { prompt, activeTabName, shouldConcatSDXLStylePrompt } =
useAppSelector(promptInputSelector);
const handleChangePrompt = useCallback(
(e: ChangeEvent<HTMLTextAreaElement>) => {
dispatch(setPositiveStylePromptSDXL(e.target.value));
@@ -111,6 +118,20 @@ const ParamSDXLPositiveStyleConditioning = () => {
return (
<Box position="relative">
<AnimatePresence>
{shouldConcatSDXLStylePrompt && (
<Box
sx={{
position: 'absolute',
left: '3',
w: '94%',
top: '-17px',
}}
>
<SDXLConcatLink />
</Box>
)}
</AnimatePresence>
<FormControl>
<ParamEmbeddingPopover
isOpen={isOpen}

View File

@@ -0,0 +1,23 @@
import { Flex } from '@chakra-ui/react';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
import ParamSDXLConcatButton from './ParamSDXLConcatButton';
import ParamSDXLNegativeStyleConditioning from './ParamSDXLNegativeStyleConditioning';
import ParamSDXLPositiveStyleConditioning from './ParamSDXLPositiveStyleConditioning';
export default function ParamSDXLPromptArea() {
return (
<Flex
sx={{
flexDirection: 'column',
gap: 2,
}}
>
<ParamPositiveConditioning />
<ParamSDXLConcatButton />
<ParamSDXLPositiveStyleConditioning />
<ParamNegativeConditioning />
<ParamSDXLNegativeStyleConditioning />
</Flex>
);
}

View File

@@ -0,0 +1,101 @@
import { Box, Flex } from '@chakra-ui/react';
import { CSSObject } from '@emotion/react';
import { motion } from 'framer-motion';
import { FaLink } from 'react-icons/fa';
const sharedConcatLinkStyle: CSSObject = {
position: 'absolute',
bg: 'none',
w: 'full',
minH: 2,
borderRadius: 0,
borderLeft: 'none',
borderRight: 'none',
zIndex: 2,
maskImage:
'radial-gradient(circle at center, black, black 65%, black 30%, black 15%, transparent)',
};
export default function SDXLConcatLink() {
return (
<Flex>
<Box
as={motion.div}
initial={{
scaleX: 0,
borderWidth: 0,
display: 'none',
}}
animate={{
display: ['block', 'block', 'block', 'none'],
scaleX: [0, 0.25, 0.5, 1],
borderWidth: [0, 3, 3, 0],
transition: { duration: 0.37, times: [0, 0.25, 0.5, 1] },
}}
sx={{
top: '1px',
borderTop: 'none',
borderColor: 'base.400',
...sharedConcatLinkStyle,
_dark: {
borderColor: 'accent.500',
},
}}
/>
<Box
as={motion.div}
initial={{
opacity: 0,
scale: 0,
}}
animate={{
opacity: [0, 1, 1, 1],
scale: [0, 0.75, 1.5, 1],
transition: { duration: 0.42, times: [0, 0.25, 0.5, 1] },
}}
exit={{
opacity: 0,
scale: 0,
}}
sx={{
zIndex: 3,
position: 'absolute',
left: '48%',
top: '3px',
p: 1,
borderRadius: 4,
bg: 'accent.400',
color: 'base.50',
_dark: {
bg: 'accent.500',
},
}}
>
<FaLink size={12} />
</Box>
<Box
as={motion.div}
initial={{
scaleX: 0,
borderWidth: 0,
display: 'none',
}}
animate={{
display: ['block', 'block', 'block', 'none'],
scaleX: [0, 0.25, 0.5, 1],
borderWidth: [0, 3, 3, 0],
transition: { duration: 0.37, times: [0, 0.25, 0.5, 1] },
}}
sx={{
top: '17px',
borderBottom: 'none',
borderColor: 'base.400',
...sharedConcatLinkStyle,
_dark: {
borderColor: 'accent.500',
},
}}
/>
</Flex>
);
}

View File

@@ -1,21 +1,14 @@
import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/ParamDynamicPromptsCollapse';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse';
// import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse';
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
import ParamSDXLNegativeStyleConditioning from './ParamSDXLNegativeStyleConditioning';
import ParamSDXLPositiveStyleConditioning from './ParamSDXLPositiveStyleConditioning';
import ParamSDXLPromptArea from './ParamSDXLPromptArea';
import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse';
import SDXLImageToImageTabCoreParameters from './SDXLImageToImageTabCoreParameters';
const SDXLImageToImageTabParameters = () => {
return (
<>
<ParamPositiveConditioning />
<ParamSDXLPositiveStyleConditioning />
<ParamNegativeConditioning />
<ParamSDXLNegativeStyleConditioning />
<ParamSDXLPromptArea />
<ProcessButtons />
<SDXLImageToImageTabCoreParameters />
<ParamSDXLRefinerCollapse />

View File

@@ -1,20 +1,14 @@
import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/ParamDynamicPromptsCollapse';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse';
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
import TextToImageTabCoreParameters from 'features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters';
import ParamSDXLNegativeStyleConditioning from './ParamSDXLNegativeStyleConditioning';
import ParamSDXLPositiveStyleConditioning from './ParamSDXLPositiveStyleConditioning';
import ParamSDXLPromptArea from './ParamSDXLPromptArea';
import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse';
const SDXLTextToImageTabParameters = () => {
return (
<>
<ParamPositiveConditioning />
<ParamSDXLPositiveStyleConditioning />
<ParamNegativeConditioning />
<ParamSDXLNegativeStyleConditioning />
<ParamSDXLPromptArea />
<ProcessButtons />
<TextToImageTabCoreParameters />
<ParamSDXLRefinerCollapse />

View File

@@ -10,6 +10,7 @@ import { MainModelField } from 'services/api/types';
type SDXLInitialState = {
positiveStylePrompt: PositiveStylePromptSDXLParam;
negativeStylePrompt: NegativeStylePromptSDXLParam;
shouldConcatSDXLStylePrompt: boolean;
shouldUseSDXLRefiner: boolean;
sdxlImg2ImgDenoisingStrength: number;
refinerModel: MainModelField | null;
@@ -23,6 +24,7 @@ type SDXLInitialState = {
const sdxlInitialState: SDXLInitialState = {
positiveStylePrompt: '',
negativeStylePrompt: '',
shouldConcatSDXLStylePrompt: true,
shouldUseSDXLRefiner: false,
sdxlImg2ImgDenoisingStrength: 0.7,
refinerModel: null,
@@ -43,6 +45,9 @@ const sdxlSlice = createSlice({
setNegativeStylePromptSDXL: (state, action: PayloadAction<string>) => {
state.negativeStylePrompt = action.payload;
},
setShouldConcatSDXLStylePrompt: (state, action: PayloadAction<boolean>) => {
state.shouldConcatSDXLStylePrompt = action.payload;
},
setShouldUseSDXLRefiner: (state, action: PayloadAction<boolean>) => {
state.shouldUseSDXLRefiner = action.payload;
},
@@ -76,6 +81,7 @@ const sdxlSlice = createSlice({
export const {
setPositiveStylePromptSDXL,
setNegativeStylePromptSDXL,
setShouldConcatSDXLStylePrompt,
setShouldUseSDXLRefiner,
setSDXLImg2ImgDenoisingStrength,
refinerModelChanged,

View File

@@ -1,32 +0,0 @@
import { useColorMode } from '@chakra-ui/react';
import IAIIconButton from 'common/components/IAIIconButton';
import { useTranslation } from 'react-i18next';
import { FaMoon, FaSun } from 'react-icons/fa';
const ColorModeButton = () => {
const { colorMode, toggleColorMode } = useColorMode();
const { t } = useTranslation();
return (
<IAIIconButton
aria-label={
colorMode === 'dark' ? t('common.lightMode') : t('common.darkMode')
}
tooltip={
colorMode === 'dark' ? t('common.lightMode') : t('common.darkMode')
}
size="sm"
icon={
colorMode === 'dark' ? (
<FaSun fontSize={19} />
) : (
<FaMoon fontSize={18} />
)
}
onClick={toggleColorMode}
variant="link"
/>
);
};
export default ColorModeButton;

View File

@@ -1,50 +0,0 @@
import {
IconButton,
Menu,
MenuButton,
MenuItemOption,
MenuList,
MenuOptionGroup,
Tooltip,
} from '@chakra-ui/react';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { map } from 'lodash-es';
import { useTranslation } from 'react-i18next';
import { IoLanguage } from 'react-icons/io5';
import { LANGUAGES } from '../store/constants';
import { languageSelector } from '../store/systemSelectors';
import { languageChanged } from '../store/systemSlice';
export default function LanguagePicker() {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const language = useAppSelector(languageSelector);
return (
<Menu closeOnSelect={false}>
<Tooltip label={t('common.languagePickerLabel')} hasArrow>
<MenuButton
as={IconButton}
icon={<IoLanguage />}
variant="link"
aria-label={t('common.languagePickerLabel')}
fontSize={22}
minWidth={8}
/>
</Tooltip>
<MenuList>
<MenuOptionGroup value={language}>
{map(LANGUAGES, (languageName, l: keyof typeof LANGUAGES) => (
<MenuItemOption
key={l}
value={l}
onClick={() => dispatch(languageChanged(l))}
>
{languageName}
</MenuItemOption>
))}
</MenuOptionGroup>
</MenuList>
</Menu>
);
}

View File

@@ -10,6 +10,7 @@ import {
ModalOverlay,
Text,
useDisclosure,
useColorMode,
} from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { VALID_LOG_LEVELS } from 'app/logging/logger';
@@ -26,6 +27,8 @@ import {
setShouldConfirmOnDelete,
shouldAntialiasProgressImageChanged,
shouldLogToConsoleChanged,
shouldUseNSFWCheckerChanged,
shouldUseWatermarkerChanged,
} from 'features/system/store/systemSlice';
import {
setShouldShowProgressInViewer,
@@ -42,10 +45,15 @@ import {
} from 'react';
import { useTranslation } from 'react-i18next';
import { LogLevelName } from 'roarr';
import { useGetAppConfigQuery } from 'services/api/endpoints/appInfo';
import SettingSwitch from './SettingSwitch';
import SettingsClearIntermediates from './SettingsClearIntermediates';
import SettingsSchedulers from './SettingsSchedulers';
import StyledFlex from './StyledFlex';
import { useFeatureStatus } from '../../hooks/useFeatureStatus';
import { LANGUAGES } from '../../store/constants';
import { languageChanged } from '../../store/systemSlice';
import { languageSelector } from '../../store/systemSelectors';
const selector = createSelector(
[stateSelector],
@@ -57,6 +65,8 @@ const selector = createSelector(
shouldLogToConsole,
shouldAntialiasProgressImage,
isNodesEnabled,
shouldUseNSFWChecker,
shouldUseWatermarker,
} = system;
const {
@@ -78,6 +88,8 @@ const selector = createSelector(
shouldAntialiasProgressImage,
shouldShowAdvancedOptions,
isNodesEnabled,
shouldUseNSFWChecker,
shouldUseWatermarker,
};
},
{
@@ -92,6 +104,7 @@ type ConfigOptions = {
shouldShowAdvancedOptionsSettings: boolean;
shouldShowClearIntermediates: boolean;
shouldShowNodesToggle: boolean;
shouldShowLocalizationToggle: boolean;
};
type SettingsModalProps = {
@@ -113,6 +126,8 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
const shouldShowClearIntermediates =
config?.shouldShowClearIntermediates ?? true;
const shouldShowNodesToggle = config?.shouldShowNodesToggle ?? true;
const shouldShowLocalizationToggle =
config?.shouldShowLocalizationToggle ?? true;
useEffect(() => {
if (!shouldShowDeveloperSettings) {
@@ -120,6 +135,16 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
}
}, [shouldShowDeveloperSettings, dispatch]);
const { isNSFWCheckerAvailable, isWatermarkerAvailable } =
useGetAppConfigQuery(undefined, {
selectFromResult: ({ data }) => ({
isNSFWCheckerAvailable:
data?.nsfw_methods.includes('nsfw_checker') ?? false,
isWatermarkerAvailable:
data?.watermarking_methods.includes('invisible_watermark') ?? false,
}),
});
const {
isOpen: isSettingsModalOpen,
onOpen: onSettingsModalOpen,
@@ -143,6 +168,8 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
shouldAntialiasProgressImage,
shouldShowAdvancedOptions,
isNodesEnabled,
shouldUseNSFWChecker,
shouldUseWatermarker,
} = useAppSelector(selector);
const handleClickResetWebUI = useCallback(() => {
@@ -166,6 +193,13 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
[dispatch]
);
const handleLanguageChanged = useCallback(
(l: string) => {
dispatch(languageChanged(l as keyof typeof LANGUAGES));
},
[dispatch]
);
const handleLogToConsoleChanged = useCallback(
(e: ChangeEvent<HTMLInputElement>) => {
dispatch(shouldLogToConsoleChanged(e.target.checked));
@@ -180,6 +214,12 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
[dispatch]
);
const { colorMode, toggleColorMode } = useColorMode();
const isLocalizationEnabled =
useFeatureStatus('localization').isFeatureEnabled;
const language = useAppSelector(languageSelector);
return (
<>
{cloneElement(children, {
@@ -221,10 +261,31 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
<StyledFlex>
<Heading size="sm">{t('settings.generation')}</Heading>
<SettingsSchedulers />
<SettingSwitch
label="Enable NSFW Checker"
isDisabled={!isNSFWCheckerAvailable}
isChecked={shouldUseNSFWChecker}
onChange={(e: ChangeEvent<HTMLInputElement>) =>
dispatch(shouldUseNSFWCheckerChanged(e.target.checked))
}
/>
<SettingSwitch
label="Enable Invisible Watermark"
isDisabled={!isWatermarkerAvailable}
isChecked={shouldUseWatermarker}
onChange={(e: ChangeEvent<HTMLInputElement>) =>
dispatch(shouldUseWatermarkerChanged(e.target.checked))
}
/>
</StyledFlex>
<StyledFlex>
<Heading size="sm">{t('settings.ui')}</Heading>
<SettingSwitch
label={t('common.darkMode')}
isChecked={colorMode === 'dark'}
onChange={toggleColorMode}
/>
<SettingSwitch
label={t('settings.useSlidersForAll')}
isChecked={shouldUseSliders}
@@ -267,6 +328,18 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
onChange={handleToggleNodes}
/>
)}
{shouldShowLocalizationToggle && (
<IAIMantineSelect
disabled={!isLocalizationEnabled}
label={t('common.languagePickerLabel')}
value={language}
data={Object.entries(LANGUAGES).map(([value, label]) => ({
value,
label,
}))}
onChange={handleLanguageChanged}
/>
)}
</StyledFlex>
{shouldShowDeveloperSettings && (

View File

@@ -10,9 +10,9 @@ import { map } from 'lodash-es';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
const data = map(SCHEDULER_LABEL_MAP, (value, label) => ({
value,
label,
const data = map(SCHEDULER_LABEL_MAP, (label, name) => ({
value: name,
label: label,
})).sort((a, b) => a.label.localeCompare(b.label));
export default function SettingsSchedulers() {

View File

@@ -1,28 +1,40 @@
import { Flex, Spacer } from '@chakra-ui/react';
import { memo } from 'react';
import StatusIndicator from './StatusIndicator';
import { Link } from '@chakra-ui/react';
import {
Flex,
Menu,
MenuButton,
MenuGroup,
MenuItem,
MenuList,
Spacer,
} from '@chakra-ui/react';
import IAIIconButton from 'common/components/IAIIconButton';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { FaBug, FaDiscord, FaGithub, FaKeyboard } from 'react-icons/fa';
import { MdSettings } from 'react-icons/md';
import {
FaBars,
FaBug,
FaCog,
FaDiscord,
FaGithub,
FaKeyboard,
} from 'react-icons/fa';
import { menuListMotionProps } from 'theme/components/menu';
import { useFeatureStatus } from '../hooks/useFeatureStatus';
import ColorModeButton from './ColorModeButton';
import HotkeysModal from './HotkeysModal/HotkeysModal';
import InvokeAILogoComponent from './InvokeAILogoComponent';
import LanguagePicker from './LanguagePicker';
import SettingsModal from './SettingsModal/SettingsModal';
import StatusIndicator from './StatusIndicator';
const SiteHeader = () => {
const { t } = useTranslation();
const isLocalizationEnabled =
useFeatureStatus('localization').isFeatureEnabled;
const isBugLinkEnabled = useFeatureStatus('bugLink').isFeatureEnabled;
const isDiscordLinkEnabled = useFeatureStatus('discordLink').isFeatureEnabled;
const isGithubLinkEnabled = useFeatureStatus('githubLink').isFeatureEnabled;
const githubLink = 'http://github.com/invoke-ai/InvokeAI';
const discordLink = 'https://discord.gg/ZmtBAhwWhy';
return (
<Flex
sx={{
@@ -34,87 +46,61 @@ const SiteHeader = () => {
<Spacer />
<StatusIndicator />
<HotkeysModal>
<IAIIconButton
aria-label={t('common.hotkeysLabel')}
tooltip={t('common.hotkeysLabel')}
size="sm"
<Menu>
<MenuButton
as={IAIIconButton}
variant="link"
data-variant="link"
fontSize={20}
icon={<FaKeyboard />}
aria-label={t('accessibility.menu')}
icon={<FaBars />}
sx={{ boxSize: 8 }}
/>
</HotkeysModal>
{isLocalizationEnabled && <LanguagePicker />}
{isBugLinkEnabled && (
<Link
isExternal
href="http://github.com/invoke-ai/InvokeAI/issues"
marginBottom="-0.25rem"
>
<IAIIconButton
aria-label={t('common.reportBugLabel')}
tooltip={t('common.reportBugLabel')}
variant="link"
data-variant="link"
fontSize={20}
size="sm"
icon={<FaBug />}
/>
</Link>
)}
{isGithubLinkEnabled && (
<Link
isExternal
href="http://github.com/invoke-ai/InvokeAI"
marginBottom="-0.25rem"
>
<IAIIconButton
aria-label={t('common.githubLabel')}
tooltip={t('common.githubLabel')}
variant="link"
data-variant="link"
fontSize={20}
size="sm"
icon={<FaGithub />}
/>
</Link>
)}
{isDiscordLinkEnabled && (
<Link
isExternal
href="https://discord.gg/ZmtBAhwWhy"
marginBottom="-0.25rem"
>
<IAIIconButton
aria-label={t('common.discordLabel')}
tooltip={t('common.discordLabel')}
variant="link"
data-variant="link"
fontSize={20}
size="sm"
icon={<FaDiscord />}
/>
</Link>
)}
<ColorModeButton />
<SettingsModal>
<IAIIconButton
aria-label={t('common.settingsLabel')}
tooltip={t('common.settingsLabel')}
variant="link"
data-variant="link"
fontSize={22}
size="sm"
icon={<MdSettings />}
/>
</SettingsModal>
<MenuList motionProps={menuListMotionProps}>
<MenuGroup title={t('common.communityLabel')}>
{isGithubLinkEnabled && (
<MenuItem
as="a"
href={githubLink}
target="_blank"
icon={<FaGithub />}
>
{t('common.githubLabel')}
</MenuItem>
)}
{isBugLinkEnabled && (
<MenuItem
as="a"
href={`${githubLink}/issues`}
target="_blank"
icon={<FaBug />}
>
{t('common.reportBugLabel')}
</MenuItem>
)}
{isDiscordLinkEnabled && (
<MenuItem
as="a"
href={discordLink}
target="_blank"
icon={<FaDiscord />}
>
{t('common.discordLabel')}
</MenuItem>
)}
</MenuGroup>
<MenuGroup title={t('common.settingsLabel')}>
<HotkeysModal>
<MenuItem as="button" icon={<FaKeyboard />}>
{t('common.hotkeysLabel')}
</MenuItem>
</HotkeysModal>
<SettingsModal>
<MenuItem as="button" icon={<FaCog />}>
{t('common.settingsLabel')}
</MenuItem>
</SettingsModal>
</MenuGroup>
</MenuList>
</Menu>
</Flex>
);
};

View File

@@ -1,111 +0,0 @@
import { Flex, Link } from '@chakra-ui/react';
import { useTranslation } from 'react-i18next';
import { FaBug, FaDiscord, FaGithub, FaKeyboard } from 'react-icons/fa';
import { MdSettings } from 'react-icons/md';
import HotkeysModal from './HotkeysModal/HotkeysModal';
import LanguagePicker from './LanguagePicker';
import SettingsModal from './SettingsModal/SettingsModal';
import IAIIconButton from 'common/components/IAIIconButton';
import { useFeatureStatus } from '../hooks/useFeatureStatus';
const SiteHeaderMenu = () => {
const { t } = useTranslation();
const isLocalizationEnabled =
useFeatureStatus('localization').isFeatureEnabled;
const isBugLinkEnabled = useFeatureStatus('bugLink').isFeatureEnabled;
const isDiscordLinkEnabled = useFeatureStatus('discordLink').isFeatureEnabled;
const isGithubLinkEnabled = useFeatureStatus('githubLink').isFeatureEnabled;
return (
<Flex
alignItems="center"
flexDirection={{ base: 'column', xl: 'row' }}
gap={{ base: 4, xl: 1 }}
>
<HotkeysModal>
<IAIIconButton
aria-label={t('common.hotkeysLabel')}
tooltip={t('common.hotkeysLabel')}
size="sm"
variant="link"
data-variant="link"
fontSize={20}
icon={<FaKeyboard />}
/>
</HotkeysModal>
{isLocalizationEnabled && <LanguagePicker />}
{isBugLinkEnabled && (
<Link
isExternal
href="http://github.com/invoke-ai/InvokeAI/issues"
marginBottom="-0.25rem"
>
<IAIIconButton
aria-label={t('common.reportBugLabel')}
tooltip={t('common.reportBugLabel')}
variant="link"
data-variant="link"
fontSize={20}
size="sm"
icon={<FaBug />}
/>
</Link>
)}
{isGithubLinkEnabled && (
<Link
isExternal
href="http://github.com/invoke-ai/InvokeAI"
marginBottom="-0.25rem"
>
<IAIIconButton
aria-label={t('common.githubLabel')}
tooltip={t('common.githubLabel')}
variant="link"
data-variant="link"
fontSize={20}
size="sm"
icon={<FaGithub />}
/>
</Link>
)}
{isDiscordLinkEnabled && (
<Link
isExternal
href="https://discord.gg/ZmtBAhwWhy"
marginBottom="-0.25rem"
>
<IAIIconButton
aria-label={t('common.discordLabel')}
tooltip={t('common.discordLabel')}
variant="link"
data-variant="link"
fontSize={20}
size="sm"
icon={<FaDiscord />}
/>
</Link>
)}
<SettingsModal>
<IAIIconButton
aria-label={t('common.settingsLabel')}
tooltip={t('common.settingsLabel')}
variant="link"
data-variant="link"
fontSize={22}
size="sm"
icon={<MdSettings />}
/>
</SettingsModal>
</Flex>
);
};
SiteHeaderMenu.displayName = 'SiteHeaderMenu';
export default SiteHeaderMenu;

View File

@@ -87,6 +87,8 @@ export interface SystemState {
language: keyof typeof LANGUAGES;
isUploading: boolean;
isNodesEnabled: boolean;
shouldUseNSFWChecker: boolean;
shouldUseWatermarker: boolean;
}
export const initialSystemState: SystemState = {
@@ -119,6 +121,8 @@ export const initialSystemState: SystemState = {
language: 'en',
isUploading: false,
isNodesEnabled: false,
shouldUseNSFWChecker: false,
shouldUseWatermarker: false,
};
export const systemSlice = createSlice({
@@ -194,6 +198,12 @@ export const systemSlice = createSlice({
setIsNodesEnabled(state, action: PayloadAction<boolean>) {
state.isNodesEnabled = action.payload;
},
shouldUseNSFWCheckerChanged(state, action: PayloadAction<boolean>) {
state.shouldUseNSFWChecker = action.payload;
},
shouldUseWatermarkerChanged(state, action: PayloadAction<boolean>) {
state.shouldUseWatermarker = action.payload;
},
},
extraReducers(builder) {
/**
@@ -409,6 +419,8 @@ export const {
languageChanged,
progressImageSet,
setIsNodesEnabled,
shouldUseNSFWCheckerChanged,
shouldUseWatermarkerChanged,
} = systemSlice.actions;
export default systemSlice.reducer;

View File

@@ -227,7 +227,8 @@ const InvokeTabs = () => {
id="gallery"
order={3}
defaultSize={
galleryMinSizePct > DEFAULT_GALLERY_PCT
galleryMinSizePct > DEFAULT_GALLERY_PCT &&
galleryMinSizePct < 100 // prevent this error https://github.com/bvaughn/react-resizable-panels/blob/main/packages/react-resizable-panels/src/Panel.ts#L96
? galleryMinSizePct
: DEFAULT_GALLERY_PCT
}

View File

@@ -1,7 +1,10 @@
import { Flex } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import SDXLImageToImageTabParameters from 'features/sdxl/components/SDXLImageToImageTabParameters';
import SDXLTextToImageTabParameters from 'features/sdxl/components/SDXLTextToImageTabParameters';
import InvokeAILogoComponent from 'features/system/components/InvokeAILogoComponent';
import {
activeTabNameSelector,
@@ -39,13 +42,23 @@ const ParametersDrawer = () => {
dispatch(setShouldShowParametersPanel(false));
};
const model = useAppSelector((state: RootState) => state.generation.model);
const drawerContent = useMemo(() => {
if (activeTabName === 'txt2img') {
return <TextToImageTabParameters />;
return model && model.base_model === 'sdxl' ? (
<SDXLTextToImageTabParameters />
) : (
<TextToImageTabParameters />
);
}
if (activeTabName === 'img2img') {
return <ImageToImageTabParameters />;
return model && model.base_model === 'sdxl' ? (
<SDXLImageToImageTabParameters />
) : (
<ImageToImageTabParameters />
);
}
if (activeTabName === 'unifiedCanvas') {
@@ -53,7 +66,7 @@ const ParametersDrawer = () => {
}
return null;
}, [activeTabName]);
}, [activeTabName, model]);
if (shouldPinParametersPanel) {
return null;

View File

@@ -2,20 +2,18 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/Para
import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse';
import ParamAdvancedCollapse from 'features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse';
import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse';
import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse';
// import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse';
import ParamPromptArea from 'features/parameters/components/Parameters/Prompt/ParamPromptArea';
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
import ImageToImageTabCoreParameters from './ImageToImageTabCoreParameters';
const ImageToImageTabParameters = () => {
return (
<>
<ParamPositiveConditioning />
<ParamNegativeConditioning />
<ParamPromptArea />
<ProcessButtons />
<ImageToImageTabCoreParameters />
<ParamControlNetCollapse />

View File

@@ -3,20 +3,31 @@ import { Flex, Text } from '@chakra-ui/react';
import { useState } from 'react';
import {
MainModelConfigEntity,
DiffusersModelConfigEntity,
LoRAModelConfigEntity,
useGetMainModelsQuery,
useGetLoRAModelsQuery,
} from 'services/api/endpoints/models';
import CheckpointModelEdit from './ModelManagerPanel/CheckpointModelEdit';
import DiffusersModelEdit from './ModelManagerPanel/DiffusersModelEdit';
import LoRAModelEdit from './ModelManagerPanel/LoRAModelEdit';
import ModelList from './ModelManagerPanel/ModelList';
import { ALL_BASE_MODELS } from 'services/api/constants';
export default function ModelManagerPanel() {
const [selectedModelId, setSelectedModelId] = useState<string>();
const { model } = useGetMainModelsQuery(ALL_BASE_MODELS, {
const { mainModel } = useGetMainModelsQuery(ALL_BASE_MODELS, {
selectFromResult: ({ data }) => ({
model: selectedModelId ? data?.entities[selectedModelId] : undefined,
mainModel: selectedModelId ? data?.entities[selectedModelId] : undefined,
}),
});
const { loraModel } = useGetLoRAModelsQuery(undefined, {
selectFromResult: ({ data }) => ({
loraModel: selectedModelId ? data?.entities[selectedModelId] : undefined,
}),
});
const model = mainModel ? mainModel : loraModel;
return (
<Flex sx={{ gap: 8, w: 'full', h: 'full' }}>
@@ -30,7 +41,7 @@ export default function ModelManagerPanel() {
}
type ModelEditProps = {
model: MainModelConfigEntity | undefined;
model: MainModelConfigEntity | LoRAModelConfigEntity | undefined;
};
const ModelEdit = (props: ModelEditProps) => {
@@ -41,7 +52,16 @@ const ModelEdit = (props: ModelEditProps) => {
}
if (model?.model_format === 'diffusers') {
return <DiffusersModelEdit key={model.id} model={model} />;
return (
<DiffusersModelEdit
key={model.id}
model={model as DiffusersModelConfigEntity}
/>
);
}
if (model?.model_type === 'lora') {
return <LoRAModelEdit key={model.id} model={model} />;
}
return (

View File

@@ -1,6 +1,5 @@
import { Badge, Divider, Flex, Text } from '@chakra-ui/react';
import { useForm } from '@mantine/form';
import { makeToast } from 'features/system/util/makeToast';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIButton from 'common/components/IAIButton';
import IAIMantineTextInput from 'common/components/IAIMantineInput';
@@ -8,6 +7,7 @@ import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox';
import { MODEL_TYPE_MAP } from 'features/parameters/types/constants';
import { selectIsBusy } from 'features/system/store/systemSelectors';
import { addToast } from 'features/system/store/systemSlice';
import { makeToast } from 'features/system/util/makeToast';
import { useCallback, useEffect, useState } from 'react';
import { useTranslation } from 'react-i18next';
import {
@@ -115,7 +115,7 @@ export default function CheckpointModelEdit(props: CheckpointModelEditProps) {
{MODEL_TYPE_MAP[model.base_model]} Model
</Text>
</Flex>
{!['sdxl', 'sdxl-refiner'].includes(model.base_model) ? (
{![''].includes(model.base_model) ? (
<ModelConvert model={model} />
) : (
<Badge

View File

@@ -0,0 +1,137 @@
import { Divider, Flex, Text } from '@chakra-ui/react';
import { useForm } from '@mantine/form';
import { makeToast } from 'features/system/util/makeToast';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIButton from 'common/components/IAIButton';
import IAIMantineTextInput from 'common/components/IAIMantineInput';
import { selectIsBusy } from 'features/system/store/systemSelectors';
import { addToast } from 'features/system/store/systemSlice';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import {
LORA_MODEL_FORMAT_MAP,
MODEL_TYPE_MAP,
} from 'features/parameters/types/constants';
import {
LoRAModelConfigEntity,
useUpdateLoRAModelsMutation,
} from 'services/api/endpoints/models';
import { LoRAModelConfig } from 'services/api/types';
import BaseModelSelect from '../shared/BaseModelSelect';
type LoRAModelEditProps = {
model: LoRAModelConfigEntity;
};
export default function LoRAModelEdit(props: LoRAModelEditProps) {
const isBusy = useAppSelector(selectIsBusy);
const { model } = props;
const [updateLoRAModel, { isLoading }] = useUpdateLoRAModelsMutation();
const dispatch = useAppDispatch();
const { t } = useTranslation();
const loraEditForm = useForm<LoRAModelConfig>({
initialValues: {
model_name: model.model_name ? model.model_name : '',
base_model: model.base_model,
model_type: 'lora',
path: model.path ? model.path : '',
description: model.description ? model.description : '',
model_format: model.model_format,
},
validate: {
path: (value) =>
value.trim().length === 0 ? 'Must provide a path' : null,
},
});
const editModelFormSubmitHandler = useCallback(
(values: LoRAModelConfig) => {
const responseBody = {
base_model: model.base_model,
model_name: model.model_name,
body: values,
};
updateLoRAModel(responseBody)
.unwrap()
.then((payload) => {
loraEditForm.setValues(payload as LoRAModelConfig);
dispatch(
addToast(
makeToast({
title: t('modelManager.modelUpdated'),
status: 'success',
})
)
);
})
.catch((_) => {
loraEditForm.reset();
dispatch(
addToast(
makeToast({
title: t('modelManager.modelUpdateFailed'),
status: 'error',
})
)
);
});
},
[
dispatch,
loraEditForm,
model.base_model,
model.model_name,
t,
updateLoRAModel,
]
);
return (
<Flex flexDirection="column" rowGap={4} width="100%">
<Flex flexDirection="column">
<Text fontSize="lg" fontWeight="bold">
{model.model_name}
</Text>
<Text fontSize="sm" color="base.400">
{MODEL_TYPE_MAP[model.base_model]} Model {' '}
{LORA_MODEL_FORMAT_MAP[model.model_format]} format
</Text>
</Flex>
<Divider />
<form
onSubmit={loraEditForm.onSubmit((values) =>
editModelFormSubmitHandler(values)
)}
>
<Flex flexDirection="column" overflowY="scroll" gap={4}>
<IAIMantineTextInput
label={t('modelManager.name')}
{...loraEditForm.getInputProps('model_name')}
/>
<IAIMantineTextInput
label={t('modelManager.description')}
{...loraEditForm.getInputProps('description')}
/>
<BaseModelSelect {...loraEditForm.getInputProps('base_model')} />
<IAIMantineTextInput
label={t('modelManager.modelLocation')}
{...loraEditForm.getInputProps('path')}
/>
<IAIButton
type="submit"
isDisabled={isBusy || isLoading}
isLoading={isLoading}
>
{t('modelManager.updateModel')}
</IAIButton>
</Flex>
</form>
</Flex>
);
}

View File

@@ -9,6 +9,8 @@ import { useTranslation } from 'react-i18next';
import {
MainModelConfigEntity,
useGetMainModelsQuery,
useGetLoRAModelsQuery,
LoRAModelConfigEntity,
} from 'services/api/endpoints/models';
import ModelListItem from './ModelListItem';
import { ALL_BASE_MODELS } from 'services/api/constants';
@@ -20,22 +22,42 @@ type ModelListProps = {
type ModelFormat = 'images' | 'checkpoint' | 'diffusers';
type ModelType = 'main' | 'lora';
type CombinedModelFormat = ModelFormat | 'lora';
const ModelList = (props: ModelListProps) => {
const { selectedModelId, setSelectedModelId } = props;
const { t } = useTranslation();
const [nameFilter, setNameFilter] = useState<string>('');
const [modelFormatFilter, setModelFormatFilter] =
useState<ModelFormat>('images');
useState<CombinedModelFormat>('images');
const { filteredDiffusersModels } = useGetMainModelsQuery(ALL_BASE_MODELS, {
selectFromResult: ({ data }) => ({
filteredDiffusersModels: modelsFilter(data, 'diffusers', nameFilter),
filteredDiffusersModels: modelsFilter(
data,
'main',
'diffusers',
nameFilter
),
}),
});
const { filteredCheckpointModels } = useGetMainModelsQuery(ALL_BASE_MODELS, {
selectFromResult: ({ data }) => ({
filteredCheckpointModels: modelsFilter(data, 'checkpoint', nameFilter),
filteredCheckpointModels: modelsFilter(
data,
'main',
'checkpoint',
nameFilter
),
}),
});
const { filteredLoraModels } = useGetLoRAModelsQuery(undefined, {
selectFromResult: ({ data }) => ({
filteredLoraModels: modelsFilter(data, 'lora', undefined, nameFilter),
}),
});
@@ -68,6 +90,13 @@ const ModelList = (props: ModelListProps) => {
>
{t('modelManager.checkpointModels')}
</IAIButton>
<IAIButton
size="sm"
onClick={() => setModelFormatFilter('lora')}
isChecked={modelFormatFilter === 'lora'}
>
{t('modelManager.loraModels')}
</IAIButton>
</ButtonGroup>
<IAIInput
@@ -118,6 +147,24 @@ const ModelList = (props: ModelListProps) => {
</Flex>
</StyledModelContainer>
)}
{['images', 'lora'].includes(modelFormatFilter) &&
filteredLoraModels.length > 0 && (
<StyledModelContainer>
<Flex sx={{ gap: 2, flexDir: 'column' }}>
<Text variant="subtext" fontSize="sm">
LoRAs
</Text>
{filteredLoraModels.map((model) => (
<ModelListItem
key={model.id}
model={model}
isSelected={selectedModelId === model.id}
setSelectedModelId={setSelectedModelId}
/>
))}
</Flex>
</StyledModelContainer>
)}
</Flex>
</Flex>
</Flex>
@@ -126,12 +173,13 @@ const ModelList = (props: ModelListProps) => {
export default ModelList;
const modelsFilter = (
data: EntityState<MainModelConfigEntity> | undefined,
model_format: ModelFormat,
const modelsFilter = <T extends MainModelConfigEntity | LoRAModelConfigEntity>(
data: EntityState<T> | undefined,
model_type: ModelType,
model_format: ModelFormat | undefined,
nameFilter: string
) => {
const filteredModels: MainModelConfigEntity[] = [];
const filteredModels: T[] = [];
forEach(data?.entities, (model) => {
if (!model) {
return;
@@ -141,9 +189,11 @@ const modelsFilter = (
.toLowerCase()
.includes(nameFilter.toLowerCase());
const matchesFormat = model.model_format === model_format;
const matchesFormat =
model_format === undefined || model.model_format === model_format;
const matchesType = model.model_type === model_type;
if (matchesFilter && matchesFormat) {
if (matchesFilter && matchesFormat && matchesType) {
filteredModels.push(model);
}
});

View File

@@ -9,29 +9,26 @@ import { selectIsBusy } from 'features/system/store/systemSelectors';
import { addToast } from 'features/system/store/systemSlice';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
import {
MainModelConfigEntity,
LoRAModelConfigEntity,
useDeleteMainModelsMutation,
useDeleteLoRAModelsMutation,
} from 'services/api/endpoints/models';
type ModelListItemProps = {
model: MainModelConfigEntity;
model: MainModelConfigEntity | LoRAModelConfigEntity;
isSelected: boolean;
setSelectedModelId: (v: string | undefined) => void;
};
const modelBaseTypeMap = {
'sd-1': 'SD1',
'sd-2': 'SD2',
sdxl: 'SDXL',
'sdxl-refiner': 'SDXLR',
};
export default function ModelListItem(props: ModelListItemProps) {
const isBusy = useAppSelector(selectIsBusy);
const { t } = useTranslation();
const dispatch = useAppDispatch();
const [deleteMainModel] = useDeleteMainModelsMutation();
const [deleteLoRAModel] = useDeleteLoRAModelsMutation();
const { model, isSelected, setSelectedModelId } = props;
@@ -40,7 +37,10 @@ export default function ModelListItem(props: ModelListItemProps) {
}, [model.id, setSelectedModelId]);
const handleModelDelete = useCallback(() => {
deleteMainModel(model)
const method = { main: deleteMainModel, lora: deleteLoRAModel }[
model.model_type
];
method(model)
.unwrap()
.then((_) => {
dispatch(
@@ -60,14 +60,21 @@ export default function ModelListItem(props: ModelListItemProps) {
title: `${t('modelManager.modelDeleteFailed')}: ${
model.model_name
}`,
status: 'success',
status: 'error',
})
)
);
}
});
setSelectedModelId(undefined);
}, [deleteMainModel, model, setSelectedModelId, dispatch, t]);
}, [
deleteMainModel,
deleteLoRAModel,
model,
setSelectedModelId,
dispatch,
t,
]);
return (
<Flex sx={{ gap: 2, alignItems: 'center', w: 'full' }}>
@@ -100,8 +107,8 @@ export default function ModelListItem(props: ModelListItemProps) {
<Flex gap={4} alignItems="center">
<Badge minWidth={14} p={0.5} fontSize="sm" variant="solid">
{
modelBaseTypeMap[
model.base_model as keyof typeof modelBaseTypeMap
MODEL_TYPE_SHORT_MAP[
model.base_model as keyof typeof MODEL_TYPE_SHORT_MAP
]
}
</Badge>

View File

@@ -1,7 +1,7 @@
import { Flex } from '@chakra-ui/react';
import { RootState } from 'app/store/store';
import { useAppSelector } from 'app/store/storeHooks';
import TextToImageSDXLTabParameters from 'features/sdxl/components/SDXLTextToImageTabParameters';
import SDXLTextToImageTabParameters from 'features/sdxl/components/SDXLTextToImageTabParameters';
import { memo } from 'react';
import ParametersPinnedWrapper from '../../ParametersPinnedWrapper';
import TextToImageTabMain from './TextToImageTabMain';
@@ -13,7 +13,7 @@ const TextToImageTab = () => {
<Flex sx={{ gap: 4, w: 'full', h: 'full' }}>
<ParametersPinnedWrapper>
{model && model.base_model === 'sdxl' ? (
<TextToImageSDXLTabParameters />
<SDXLTextToImageTabParameters />
) : (
<TextToImageTabParameters />
)}

View File

@@ -2,20 +2,18 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/Para
import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse';
import ParamAdvancedCollapse from 'features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse';
import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse';
import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse';
// import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse';
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
import ParamPromptArea from '../../../../parameters/components/Parameters/Prompt/ParamPromptArea';
import TextToImageTabCoreParameters from './TextToImageTabCoreParameters';
const TextToImageTabParameters = () => {
return (
<>
<ParamPositiveConditioning />
<ParamNegativeConditioning />
<ParamPromptArea />
<ProcessButtons />
<TextToImageTabCoreParameters />
<ParamControlNetCollapse />

View File

@@ -4,18 +4,16 @@ import ParamAdvancedCollapse from 'features/parameters/components/Parameters/Adv
import ParamInfillAndScalingCollapse from 'features/parameters/components/Parameters/Canvas/InfillAndScaling/ParamInfillAndScalingCollapse';
import ParamSeamCorrectionCollapse from 'features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamCorrectionCollapse';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse';
// import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse';
import ParamPromptArea from 'features/parameters/components/Parameters/Prompt/ParamPromptArea';
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
import UnifiedCanvasCoreParameters from './UnifiedCanvasCoreParameters';
const UnifiedCanvasParameters = () => {
return (
<>
<ParamPositiveConditioning />
<ParamNegativeConditioning />
<ParamPromptArea />
<ProcessButtons />
<UnifiedCanvasCoreParameters />
<ParamControlNetCollapse />

View File

@@ -3,7 +3,6 @@ export type { PartialAppConfig } from './app/types/invokeai';
export { default as IAIIconButton } from './common/components/IAIIconButton';
export { default as IAIPopover } from './common/components/IAIPopover';
export { default as ParamMainModelSelect } from './features/parameters/components/Parameters/MainModel/ParamMainModelSelect';
export { default as ColorModeButton } from './features/system/components/ColorModeButton';
export { default as InvokeAiLogoComponent } from './features/system/components/InvokeAILogoComponent';
export { default as SettingsModal } from './features/system/components/SettingsModal/SettingsModal';
export { default as StatusIndicator } from './features/system/components/StatusIndicator';

View File

@@ -8,6 +8,7 @@ export const appInfoApi = api.injectEndpoints({
url: `app/version`,
method: 'GET',
}),
providesTags: ['AppVersion'],
keepUnusedDataFor: 86400000, // 1 day
}),
getAppConfig: build.query<AppConfig, void>({
@@ -15,6 +16,7 @@ export const appInfoApi = api.injectEndpoints({
url: `app/config`,
method: 'GET',
}),
providesTags: ['AppConfig'],
keepUnusedDataFor: 86400000, // 1 day
}),
}),

View File

@@ -52,9 +52,17 @@ type UpdateMainModelArg = {
body: MainModelConfig;
};
type UpdateLoRAModelArg = {
base_model: BaseModelType;
model_name: string;
body: LoRAModelConfig;
};
type UpdateMainModelResponse =
paths['/api/v1/models/{base_model}/{model_type}/{model_name}']['patch']['responses']['200']['content']['application/json'];
type UpdateLoRAModelResponse = UpdateMainModelResponse;
type DeleteMainModelArg = {
base_model: BaseModelType;
model_name: string;
@@ -62,6 +70,10 @@ type DeleteMainModelArg = {
type DeleteMainModelResponse = void;
type DeleteLoRAModelArg = DeleteMainModelArg;
type DeleteLoRAModelResponse = void;
type ConvertMainModelArg = {
base_model: BaseModelType;
model_name: string;
@@ -320,6 +332,31 @@ export const modelsApi = api.injectEndpoints({
);
},
}),
updateLoRAModels: build.mutation<
UpdateLoRAModelResponse,
UpdateLoRAModelArg
>({
query: ({ base_model, model_name, body }) => {
return {
url: `models/${base_model}/lora/${model_name}`,
method: 'PATCH',
body: body,
};
},
invalidatesTags: [{ type: 'LoRAModel', id: LIST_TAG }],
}),
deleteLoRAModels: build.mutation<
DeleteLoRAModelResponse,
DeleteLoRAModelArg
>({
query: ({ base_model, model_name }) => {
return {
url: `models/${base_model}/lora/${model_name}`,
method: 'DELETE',
};
},
invalidatesTags: [{ type: 'LoRAModel', id: LIST_TAG }],
}),
getControlNetModels: build.query<
EntityState<ControlNetModelConfigEntity>,
void
@@ -467,6 +504,8 @@ export const {
useAddMainModelsMutation,
useConvertMainModelsMutation,
useMergeMainModelsMutation,
useDeleteLoRAModelsMutation,
useUpdateLoRAModelsMutation,
useSyncModelsMutation,
useGetModelsInFolderQuery,
useGetCheckpointConfigsQuery,

View File

@@ -318,6 +318,21 @@ export type components = {
* @description List of available infill methods
*/
infill_methods: (string)[];
/**
* Upscaling Methods
* @description List of upscaling methods
*/
upscaling_methods: (components["schemas"]["Upscaler"])[];
/**
* Nsfw Methods
* @description List of NSFW checking methods
*/
nsfw_methods: (string)[];
/**
* Watermarking Methods
* @description List of invisible watermark methods
*/
watermarking_methods: (string)[];
};
/**
* AppVersion
@@ -886,8 +901,8 @@ export type components = {
*/
resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
};
/** ControlNetModelConfig */
ControlNetModelConfig: {
/** ControlNetModelCheckpointConfig */
ControlNetModelCheckpointConfig: {
/** Model Name */
model_name: string;
base_model: components["schemas"]["BaseModelType"];
@@ -900,7 +915,34 @@ export type components = {
path: string;
/** Description */
description?: string;
model_format: components["schemas"]["ControlNetModelFormat"];
/**
* Model Format
* @enum {string}
*/
model_format: "checkpoint";
error?: components["schemas"]["ModelError"];
/** Config */
config: string;
};
/** ControlNetModelDiffusersConfig */
ControlNetModelDiffusersConfig: {
/** Model Name */
model_name: string;
base_model: components["schemas"]["BaseModelType"];
/**
* Model Type
* @enum {string}
*/
model_type: "controlnet";
/** Path */
path: string;
/** Description */
description?: string;
/**
* Model Format
* @enum {string}
*/
model_format: "diffusers";
error?: components["schemas"]["ModelError"];
};
/**
@@ -916,12 +958,6 @@ export type components = {
/** @description Base model */
base_model: components["schemas"]["BaseModelType"];
};
/**
* ControlNetModelFormat
* @description An enumeration.
* @enum {string}
*/
ControlNetModelFormat: "checkpoint" | "diffusers";
/**
* ControlOutput
* @description node output for ControlNet info
@@ -1345,7 +1381,7 @@ export type components = {
* @description The nodes in this graph
*/
nodes?: {
[key: string]: (components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined;
[key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined;
};
/**
* Edges
@@ -1388,7 +1424,7 @@ export type components = {
* @description The results of node executions
*/
results: {
[key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined;
[key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined;
};
/**
* Errors
@@ -1935,6 +1971,39 @@ export type components = {
*/
image2?: components["schemas"]["ImageField"];
};
/**
* ImageNSFWBlurInvocation
* @description Add blur to NSFW-flagged images
*/
ImageNSFWBlurInvocation: {
/**
* Id
* @description The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Is Intermediate
* @description Whether or not this node is an intermediate node.
* @default false
*/
is_intermediate?: boolean;
/**
* Type
* @default img_nsfw
* @enum {string}
*/
type?: "img_nsfw";
/**
* Image
* @description The image to check
*/
image?: components["schemas"]["ImageField"];
/**
* Metadata
* @description Optional core metadata to be written to the image
*/
metadata?: components["schemas"]["CoreMetadata"];
};
/**
* ImageOutput
* @description Base class for invocations that output an image
@@ -2215,6 +2284,45 @@ export type components = {
*/
thumbnail_url: string;
};
/**
* ImageWatermarkInvocation
* @description Add an invisible watermark to an image
*/
ImageWatermarkInvocation: {
/**
* Id
* @description The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Is Intermediate
* @description Whether or not this node is an intermediate node.
* @default false
*/
is_intermediate?: boolean;
/**
* Type
* @default img_watermark
* @enum {string}
*/
type?: "img_watermark";
/**
* Image
* @description The image to check
*/
image?: components["schemas"]["ImageField"];
/**
* Text
* @description Watermark text
* @default InvokeAI
*/
text?: string;
/**
* Metadata
* @description Optional core metadata to be written to the image
*/
metadata?: components["schemas"]["CoreMetadata"];
};
/**
* InfillColorInvocation
* @description Infills transparent areas of an image with a solid color
@@ -2644,7 +2752,7 @@ export type components = {
vae?: components["schemas"]["VaeField"];
/**
* Tiled
* @description Decode latents by overlaping tiles(less memory consumption)
* @description Decode latents by overlapping tiles(less memory consumption)
* @default false
*/
tiled?: boolean;
@@ -3520,7 +3628,7 @@ export type components = {
/** ModelsList */
ModelsList: {
/** Models */
models: (components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"])[];
models: (components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"])[];
};
/**
* MultiplyInvocation
@@ -5309,6 +5417,19 @@ export type components = {
*/
loras: (components["schemas"]["LoraInfo"])[];
};
/** Upscaler */
Upscaler: {
/**
* Upscaling Method
* @description Name of upscaling method
*/
upscaling_method: string;
/**
* Upscaling Models
* @description List of upscaling models for this method
*/
upscaling_models: (string)[];
};
/**
* VAEModelField
* @description Vae model field
@@ -5435,12 +5556,6 @@ export type components = {
*/
image?: components["schemas"]["ImageField"];
};
/**
* StableDiffusion1ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion2ModelFormat
* @description An enumeration.
@@ -5453,6 +5568,18 @@ export type components = {
* @enum {string}
*/
StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
/**
* ControlNetModelFormat
* @description An enumeration.
* @enum {string}
*/
ControlNetModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion1ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
};
responses: never;
parameters: never;
@@ -5563,7 +5690,7 @@ export type operations = {
};
requestBody: {
content: {
"application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
"application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
};
};
responses: {
@@ -5600,7 +5727,7 @@ export type operations = {
};
requestBody: {
content: {
"application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
"application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
};
};
responses: {
@@ -5864,14 +5991,14 @@ export type operations = {
};
requestBody: {
content: {
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
};
};
responses: {
/** @description The model was updated successfully */
200: {
content: {
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
};
};
/** @description Bad request */
@@ -5902,7 +6029,7 @@ export type operations = {
/** @description The model imported successfully */
201: {
content: {
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
};
};
/** @description The model could not be found */
@@ -5928,14 +6055,14 @@ export type operations = {
add_model: {
requestBody: {
content: {
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
};
};
responses: {
/** @description The model added successfully */
201: {
content: {
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
};
};
/** @description The model could not be found */
@@ -5975,7 +6102,7 @@ export type operations = {
/** @description Model converted successfully */
200: {
content: {
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
};
};
/** @description Bad request */
@@ -6064,7 +6191,7 @@ export type operations = {
/** @description Model converted successfully */
200: {
content: {
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
"application/json": components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"];
};
};
/** @description Incompatible models */

View File

@@ -42,8 +42,13 @@ export type ControlField = components['schemas']['ControlField'];
// Model Configs
export type LoRAModelConfig = components['schemas']['LoRAModelConfig'];
export type VaeModelConfig = components['schemas']['VaeModelConfig'];
export type ControlNetModelCheckpointConfig =
components['schemas']['ControlNetModelCheckpointConfig'];
export type ControlNetModelDiffusersConfig =
components['schemas']['ControlNetModelDiffusersConfig'];
export type ControlNetModelConfig =
components['schemas']['ControlNetModelConfig'];
| ControlNetModelCheckpointConfig
| ControlNetModelDiffusersConfig;
export type TextualInversionModelConfig =
components['schemas']['TextualInversionModelConfig'];
export type DiffusersModelConfig =
@@ -134,6 +139,12 @@ export type ESRGANInvocation = TypeReq<
export type DivideInvocation = TypeReq<
components['schemas']['DivideInvocation']
>;
export type ImageNSFWBlurInvocation = TypeReq<
components['schemas']['ImageNSFWBlurInvocation']
>;
export type ImageWatermarkInvocation = TypeReq<
components['schemas']['ImageWatermarkInvocation']
>;
// ControlNet Nodes
export type ControlNetInvocation = TypeReq<

View File

@@ -33,12 +33,16 @@ const invokeAI = definePartsStyle((props) => ({
bg: mode('base.200', 'base.800')(props),
_hover: {
bg: mode('base.300', 'base.700')(props),
svg: {
opacity: 1,
},
},
_focus: {
bg: mode('base.400', 'base.600')(props),
},
svg: {
opacity: 0.5,
opacity: 0.7,
fontSize: 14,
},
},
}));

View File

@@ -13,6 +13,15 @@ const invokeAI = defineStyle((props) => ({
var(--invokeai-colors-base-200) 70%,
var(--invokeai-colors-base-200) 100%)`,
},
_disabled: {
'::-webkit-resizer': {
backgroundImage: `linear-gradient(135deg,
var(--invokeai-colors-base-50) 0%,
var(--invokeai-colors-base-50) 70%,
var(--invokeai-colors-base-200) 70%,
var(--invokeai-colors-base-200) 100%)`,
},
},
_dark: {
'::-webkit-resizer': {
backgroundImage: `linear-gradient(135deg,
@@ -21,6 +30,15 @@ const invokeAI = defineStyle((props) => ({
var(--invokeai-colors-base-800) 70%,
var(--invokeai-colors-base-800) 100%)`,
},
_disabled: {
'::-webkit-resizer': {
backgroundImage: `linear-gradient(135deg,
var(--invokeai-colors-base-900) 0%,
var(--invokeai-colors-base-900) 70%,
var(--invokeai-colors-base-800) 70%,
var(--invokeai-colors-base-800) 100%)`,
},
},
},
}));

File diff suppressed because it is too large Load Diff