import os import sys import torch from argparse import Namespace from invokeai.backend import Args from omegaconf import OmegaConf from pathlib import Path from typing import types import invokeai.version from .model_manager_service import ModelManagerService from ...backend.util import choose_precision, choose_torch_device from ...backend import Globals # temporary function - should call ModelManagerService() directly def get_model_manager(config: Args, logger: types.ModuleType) -> ModelManagerService: if not config.conf: config_file = os.path.join(Globals.root, "configs", "models.yaml") if not os.path.exists(config_file): report_model_error( config, FileNotFoundError(f"The file {config_file} could not be found."), logger ) logger.info(f"{invokeai.version.__app_name__}, version {invokeai.version.__version__}") logger.info(f'InvokeAI runtime directory is "{Globals.root}"') # these two lines prevent a horrible warning message from appearing # when the frozen CLIP tokenizer is imported import transformers # type: ignore transformers.logging.set_verbosity_error() import diffusers diffusers.logging.set_verbosity_error() # normalize the config directory relative to root if not os.path.isabs(config.conf): config.conf = os.path.normpath(os.path.join(Globals.root, config.conf)) if config.embeddings: if not os.path.isabs(config.embedding_path): embedding_path = os.path.normpath( os.path.join(Globals.root, config.embedding_path) ) else: embedding_path = config.embedding_path else: embedding_path = None # creating the model manager try: device = torch.device(choose_torch_device()) if config.precision=="auto": precision = choose_precision(device) dtype = torch.float32 if precision=='float32' \ else torch.float16 max_cache_size = config.max_cache_size \ if hasattr(config,'max_cache_size') \ else config.max_loaded_models * 2.5 model_manager = ModelManagerService( config.conf, precision=dtype, device_type=device, max_cache_size=config.max_cache_size, # temporarily disabled until model manager stabilizes # embedding_path = Path(embedding_path), logger = logger, ) except (FileNotFoundError, TypeError, AssertionError) as e: report_model_error(config, e, logger) except (IOError, KeyError) as e: logger.error(f"{e}. Aborting.") sys.exit(-1) # try to autoconvert new models # autoimport new .ckpt files if path := config.autoconvert: model_manager.autoconvert_weights( conf_path=config.conf, weights_directory=path, ) logger.info('Model manager initialized') return model_manager def report_model_error(opt: Namespace, e: Exception, logger: types.ModuleType): logger.error(f'An error occurred while attempting to initialize the model: "{str(e)}"') logger.error( "This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models." ) yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE") if yes_to_all: logger.warning( "Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE" ) else: response = input( "Do you want to run invokeai-configure script to select and/or reinstall models? [y] " ) if response.startswith(("n", "N")): return logger.info("invokeai-configure is launching....\n") # Match arguments that were set on the CLI # only the arguments accepted by the configuration script are parsed root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] config = ["--config", opt.conf] if opt.conf is not None else [] sys.argv = ["invokeai-configure"] sys.argv.extend(root_dir) sys.argv.extend(config.to_dict()) if yes_to_all is not None: for arg in yes_to_all.split(): sys.argv.append(arg) from invokeai.frontend.install import invokeai_configure invokeai_configure() # TODO: Figure out how to restart # print('** InvokeAI will now restart') # sys.argv = previous_args # main() # would rather do a os.exec(), but doesn't exist? # sys.exit(0)