From ddb3f4b02b4101c4882c5e2cd46ee53db49fbc99 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Jun 2023 19:26:35 -0400 Subject: [PATCH] make configure script work properly on empty rootdir --- .../backend/install/invokeai_configure.py | 8 ++++---- .../backend/install/model_install_backend.py | 2 ++ invokeai/frontend/install/model_install.py | 19 +++++++++++-------- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 582b24cbfa..f2487efcfb 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -671,7 +671,8 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): } ) ) - + with open(root / 'invokeai.yaml','w') as f: + f.write('#empty invokeai.yaml initialization file') # ------------------------------------- def run_console_ui( @@ -827,8 +828,6 @@ def main(): errors = set() try: - models_to_download = default_user_selections(opt) - # We check for to see if the runtime directory is correctly initialized. old_init_file = config.root_path / 'invokeai.init' new_init_file = config.root_path / 'invokeai.yaml' @@ -841,6 +840,7 @@ def main(): if not config.model_conf_path.exists(): initialize_rootdir(config.root_path, opt.yes_to_all) + models_to_download = default_user_selections(opt) if opt.yes_to_all: write_default_options(opt, new_init_file) init_options = Namespace( @@ -855,7 +855,7 @@ def main(): '\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n' ) sys.exit(0) - + if opt.skip_support_models: logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST") else: diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 54e5cdc1d8..ced5e99cdc 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -95,6 +95,8 @@ class ModelInstall(object): prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, access_token:str = None): self.config = config + with open('log.txt','w') as file: + print(config.model_conf_path,file=file) self.mgr = ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 80ddebca84..a99251e78c 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -173,13 +173,14 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): rely=-3, when_pressed_function=self.on_back, ) - self.ok_button = self.add_widget_intelligent( - npyscreen.ButtonPress, - name=done_label, - relx=(window_width - len(done_label)) // 2, - rely=-3, - when_pressed_function=self.on_execute - ) + else: + self.ok_button = self.add_widget_intelligent( + npyscreen.ButtonPress, + name=done_label, + relx=(window_width - len(done_label)) // 2, + rely=-3, + when_pressed_function=self.on_execute + ) label = "APPLY CHANGES & EXIT" self.done = self.add_widget_intelligent( @@ -529,6 +530,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ui_sections = [self.starter_pipelines, self.pipeline_models, self.controlnet_models, self.lora_models, self.ti_models] for section in ui_sections: + if not 'models_selected' in section: + continue selected = set([section['models'][x] for x in section['models_selected'].value]) models_to_install = [x for x in selected if not self.all_models[x].installed] models_to_remove = [x for x in section['models'] if x not in selected and self.all_models[x].installed] @@ -540,7 +543,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): for section in ui_sections: if downloads := section.get('download_ids'): selections.install_models.extend(downloads.value.split()) - + # load directory and whether to scan on startup selections.scan_directory = self.pipeline_models['autoload_directory'].value selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value