diff --git a/installer/lib/installer.py b/installer/lib/installer.py
index aaf5779801..811a9d7b16 100644
--- a/installer/lib/installer.py
+++ b/installer/lib/installer.py
@@ -332,6 +332,7 @@ class InvokeAiInstance:
Configure the InvokeAI runtime directory
"""
+ auto_install = False
# set sys.argv to a consistent state
new_argv = [sys.argv[0]]
for i in range(1, len(sys.argv)):
@@ -340,13 +341,17 @@ class InvokeAiInstance:
new_argv.append(el)
new_argv.append(sys.argv[i + 1])
elif el in ["-y", "--yes", "--yes-to-all"]:
- new_argv.append(el)
+ auto_install = True
sys.argv = new_argv
import requests # to catch download exceptions
- from messages import introduction
+ import messages
- introduction()
+ auto_install = auto_install or messages.user_wants_auto_configuration()
+ if auto_install:
+ sys.argv.append('--yes')
+ else:
+ messages.introduction()
from invokeai.frontend.install.invokeai_configure import invokeai_configure
diff --git a/installer/lib/messages.py b/installer/lib/messages.py
index c5a39dc91c..4d6a06d2e0 100644
--- a/installer/lib/messages.py
+++ b/installer/lib/messages.py
@@ -7,7 +7,7 @@ import os
import platform
from pathlib import Path
-from prompt_toolkit import prompt
+from prompt_toolkit import prompt, HTML
from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.validation import Validator
from rich import box, print
@@ -65,17 +65,46 @@ def confirm_install(dest: Path) -> bool:
if dest.exists():
print(f":exclamation: Directory {dest} already exists :exclamation:")
dest_confirmed = Confirm.ask(
- ":stop_sign: Are you sure you want to (re)install in this location?",
+ ":stop_sign: (re)install in this location?",
default=False,
)
else:
print(f"InvokeAI will be installed in {dest}")
- dest_confirmed = not Confirm.ask("Would you like to pick a different location?", default=False)
+ dest_confirmed = Confirm.ask("Use this location?", default=True)
console.line()
return dest_confirmed
+def user_wants_auto_configuration() -> bool:
+ """Prompt the user to choose between manual and auto configuration."""
+ console.rule("InvokeAI Configuration Section")
+ console.print(
+ Panel(
+ Group(
+ "\n".join(
+ [
+ "Libraries are installed and InvokeAI will now set up its root directory and configuration. Choose between:",
+ "",
+ " * AUTOMATIC configuration: install reasonable defaults and a minimal set of starter models.",
+ " * MANUAL configuration: manually inspect and adjust configuration options and pick from a larger set of starter models.",
+ "",
+ "Later you can fine tune your configuration by selecting option [6] 'Change InvokeAI startup options' from the invoke.bat/invoke.sh launcher script.",
+ ]
+ ),
+ ),
+ box=box.MINIMAL,
+ padding=(1, 1),
+ )
+ )
+ choice = prompt(HTML("Choose <a>utomatic or <m>anual configuration [a/m] (a): "),
+ validator=Validator.from_callable(
+ lambda n: n=='' or n.startswith(('a', 'A', 'm', 'M')),
+ error_message="Please select 'a' or 'm'"
+ ),
+ ) or 'a'
+ return choice.lower().startswith('a')
+
def dest_path(dest=None) -> Path:
"""
Prompt the user for the destination path and create the path
@@ -180,7 +209,7 @@ def graphical_accelerator():
"cpu",
)
idk = (
- "I'm not sure what to choose",
+ "I'm not sure what to choose",
"idk",
)
diff --git a/invokeai/app/services/config/invokeai_config.py b/invokeai/app/services/config/invokeai_config.py
index 51ccf45704..8ea703f39a 100644
--- a/invokeai/app/services/config/invokeai_config.py
+++ b/invokeai/app/services/config/invokeai_config.py
@@ -241,7 +241,7 @@ class InvokeAIAppConfig(InvokeAISettings):
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
# CACHE
- ram : Union[float, Literal["auto"]] = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
+ ram : Union[float, Literal["auto"]] = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
vram : Union[float, Literal["auto"]] = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number or 'auto')", category="Model Cache", )
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", )