From 90ae8ce26a3f82c0924aef5df4a031e3b1c328ce Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 5 Jul 2023 15:38:07 -0400 Subject: [PATCH 1/4] prevent model install crash "torch needs to be restarted with spawn" --- invokeai/backend/model_management/model_probe.py | 3 +-- invokeai/frontend/install/model_install.py | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 2828cc7ab1..938868e714 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -78,7 +78,6 @@ class ModelProbe(object): format_type = 'diffusers' if model_path.is_dir() else 'checkpoint' else: format_type = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint' - model_info = None try: model_type = cls.get_model_type_from_folder(model_path, model) \ @@ -105,7 +104,7 @@ class ModelProbe(object): ) else 512, ) except Exception: - return None + raise return model_info diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 33ef114912..f3ebcb22be 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -678,9 +678,8 @@ def select_and_download_models(opt: Namespace): # this is where the TUI is called else: - # needed because the torch library is loaded, even though we don't use it - # currently commented out because it has started generating errors (?) - # torch.multiprocessing.set_start_method("spawn") + # needed to support the probe() method running under a subprocess + torch.multiprocessing.set_start_method("spawn") # the third argument is needed in the Windows 11 environment in # order to launch and resize a console window running this program From 863336acbb34e7b6aaefa6bdeeca78a4e5358138 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 5 Jul 2023 16:18:53 -0400 Subject: [PATCH 2/4] Recognize and load diffusers-style LoRAs (.bin) Prevent double-reporting of autoimported models - closes #3636 Allow autoimport of diffusers-style LoRA models - closes #3637 --- invokeai/backend/install/model_install_backend.py | 5 ++++- invokeai/backend/model_management/model_manager.py | 5 +++-- invokeai/backend/model_management/model_probe.py | 6 ++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 00646e70e3..86a922c05a 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -193,7 +193,10 @@ class ModelInstall(object): models_installed.update(self._install_path(path)) # folders style or similar - elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): + elif path.is_dir() and any([(path/x).exists() for x in \ + {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'} + ] + ): models_installed.update(self._install_path(path)) # recursive scan diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index f15dcfac3c..db8a691d29 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -785,7 +785,7 @@ class ModelManager(object): if path in known_paths or path.parent in scanned_dirs: scanned_dirs.add(path) continue - if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): + if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}]): new_models_found.update(installer.heuristic_import(path)) scanned_dirs.add(path) @@ -794,7 +794,8 @@ class ModelManager(object): if path in known_paths or path.parent in scanned_dirs: continue if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}: - new_models_found.update(installer.heuristic_import(path)) + import_result = installer.heuristic_import(path) + new_models_found.update(import_result) self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models') installed.update(new_models_found) diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 938868e714..eef3292d6d 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -126,6 +126,8 @@ class ModelProbe(object): return ModelType.Vae elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}): return ModelType.Lora + elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}): + return ModelType.Lora elif any(key.startswith(v) for v in {"control_model", "input_blocks"}): return ModelType.ControlNet elif key in {"emb_params", "string_to_param"}: @@ -136,7 +138,7 @@ class ModelProbe(object): if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()): return ModelType.TextualInversion - raise ValueError("Unable to determine model type") + raise ValueError(f"Unable to determine model type for {model_path}") @classmethod def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin)->ModelType: @@ -166,7 +168,7 @@ class ModelProbe(object): return type # give up - raise ValueError("Unable to determine model type") + raise ValueError("Unable to determine model type for {folder_path}") @classmethod def _scan_and_load_checkpoint(cls,model_path: Path)->dict: From a7cbcae1768e7158edb88bf41d61bc8982b741ba Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 5 Jul 2023 20:59:14 -0400 Subject: [PATCH 3/4] expose max_cache_size to invokeai-configure interface --- invokeai/backend/install/invokeai_configure.py | 17 +++++++---------- .../backend/model_management/model_cache.py | 14 ++------------ 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index a0104bef25..0952a15cf7 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -430,13 +430,13 @@ to allow InvokeAI to download restricted styles & subjects from the "Concept Lib max_height=len(PRECISION_CHOICES) + 1, scroll_exit=True, ) - self.max_loaded_models = self.add_widget_intelligent( + self.max_cache_size = self.add_widget_intelligent( IntTitleSlider, - name="Number of models to cache in CPU memory (each will use 2-4 GB!)", - value=old_opts.max_loaded_models, - out_of=10, - lowest=1, - begin_entry_at=4, + name="Size of the RAM cache used for fast model switching (GB)", + value=old_opts.max_cache_size, + out_of=20, + lowest=3, + begin_entry_at=6, scroll_exit=True, ) self.nextrely += 1 @@ -539,7 +539,7 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license "outdir", "nsfw_checker", "free_gpu_mem", - "max_loaded_models", + "max_cache_size", "xformers_enabled", "always_use_cpu", ]: @@ -555,9 +555,6 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license new_opts.license_acceptance = self.license_acceptance.value new_opts.precision = PRECISION_CHOICES[self.precision.value[0]] - # widget library workaround to make max_loaded_models an int rather than a float - new_opts.max_loaded_models = int(new_opts.max_loaded_models) - return new_opts diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index df5a2f9272..4155edb686 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -8,7 +8,7 @@ The cache returns context manager generators designed to load the model into the GPU within the context, and unload outside the context. Use like this: - cache = ModelCache(max_models_cached=6) + cache = ModelCache(max_cache_size=7.5) with cache.get_model('runwayml/stable-diffusion-1-5') as SD1, cache.get_model('stabilityai/stable-diffusion-2') as SD2: do_something_in_GPU(SD1,SD2) @@ -91,7 +91,7 @@ class ModelCache(object): logger: types.ModuleType = logger ): ''' - :param max_models: Maximum number of models to cache in CPU RAM [4] + :param max_cache_size: Maximum size of the RAM cache [6.0 GB] :param execution_device: Torch device to load active model into [torch.device('cuda')] :param storage_device: Torch device to save inactive model in [torch.device('cpu')] :param precision: Precision for loaded models [torch.float16] @@ -126,16 +126,6 @@ class ModelCache(object): key += f":{submodel_type}" return key - #def get_model( - # self, - # repo_id_or_path: Union[str, Path], - # model_type: ModelType = ModelType.Diffusers, - # subfolder: Path = None, - # submodel: ModelType = None, - # revision: str = None, - # attach_model_part: Tuple[ModelType, str] = (None, None), - # gpu_load: bool = True, - #) -> ModelLocker: # ?? what does it return def _get_model_info( self, model_path: str, From d5f90b1a02891879f9359451d1983d20799c014b Mon Sep 17 00:00:00 2001 From: Mary Hipp Rogers Date: Thu, 6 Jul 2023 10:48:42 -0400 Subject: [PATCH 4/4] Improved loading for UI (#3667) * load images on gallery render * wait for models to be loaded before you can invoke --------- Co-authored-by: Mary Hipp --- .../listeners/socketio/socketConnected.ts | 12 +----------- .../web/src/common/hooks/useIsReadyToInvoke.ts | 14 +++++++++++++- .../gallery/components/ImageGalleryContent.tsx | 9 +++++++++ 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts index cab4738373..fe4bce682b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts @@ -1,6 +1,5 @@ import { log } from 'app/logging/useLogger'; import { appSocketConnected, socketConnected } from 'services/events/actions'; -import { receivedPageOfImages } from 'services/api/thunks/image'; import { receivedOpenAPISchema } from 'services/api/thunks/schema'; import { startAppListening } from '../..'; @@ -14,19 +13,10 @@ export const addSocketConnectedEventListener = () => { moduleLog.debug({ timestamp }, 'Connected'); - const { nodes, config, gallery } = getState(); + const { nodes, config } = getState(); const { disabledTabs } = config; - if (!gallery.ids.length) { - dispatch( - receivedPageOfImages({ - categories: ['general'], - is_intermediate: false, - }) - ); - } - if (!nodes.schema && !disabledTabs.includes('nodes')) { dispatch(receivedOpenAPISchema()); } diff --git a/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts b/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts index c75041eb6c..605aa8b162 100644 --- a/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts +++ b/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts @@ -6,10 +6,15 @@ import { validateSeedWeights } from 'common/util/seedWeightPairs'; import { generationSelector } from 'features/parameters/store/generationSelectors'; import { systemSelector } from 'features/system/store/systemSelectors'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; +import { + modelsApi, + useGetMainModelsQuery, +} from '../../services/api/endpoints/models'; const readinessSelector = createSelector( [stateSelector, activeTabNameSelector], - ({ generation, system, batch }, activeTabName) => { + (state, activeTabName) => { + const { generation, system, batch } = state; const { shouldGenerateVariations, seedWeights, initialImage, seed } = generation; @@ -32,6 +37,13 @@ const readinessSelector = createSelector( reasonsWhyNotReady.push('No initial image selected'); } + const { isSuccess: mainModelsSuccessfullyLoaded } = + modelsApi.endpoints.getMainModels.select()(state); + if (!mainModelsSuccessfullyLoaded) { + isReady = false; + reasonsWhyNotReady.push('Models are not loaded'); + } + // TODO: job queue // Cannot generate if already processing an image if (isProcessing) { diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx index 33edb303e3..a5fc653913 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx @@ -182,6 +182,15 @@ const ImageGalleryContent = () => { return () => osInstance()?.destroy(); }, [scroller, initialize, osInstance]); + useEffect(() => { + dispatch( + receivedPageOfImages({ + categories: ['general'], + is_intermediate: false, + }) + ); + }, [dispatch]); + const handleClickImagesCategory = useCallback(() => { dispatch(imageCategoriesChanged(IMAGE_CATEGORIES)); dispatch(setGalleryView('images'));