mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 12:38:01 -05:00
Compare commits
2 Commits
invokeai-b
...
v2.3.4.pos
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d81584c8fd | ||
|
|
1183bf96ed |
@@ -17,8 +17,6 @@ if sys.platform == "darwin":
|
||||
|
||||
import pyparsing # type: ignore
|
||||
|
||||
print(f'DEBUG: [1] All system modules imported', file=sys.stderr)
|
||||
|
||||
import ldm.invoke
|
||||
|
||||
from ..generate import Generate
|
||||
@@ -33,21 +31,13 @@ from .pngwriter import PngWriter, retrieve_metadata, write_metadata
|
||||
from .readline import Completer, get_completer
|
||||
from ..util import url_attachment_name
|
||||
|
||||
print(f'DEBUG: [2] All invokeai modules imported', file=sys.stderr)
|
||||
|
||||
# global used in multiple functions (fix)
|
||||
infile = None
|
||||
|
||||
def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
|
||||
print('DEBUG: [3] Entered main()', file=sys.stderr)
|
||||
print('DEBUG: INVOKEAI ENVIRONMENT:')
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
|
||||
|
||||
opt = Args()
|
||||
args = opt.parse_args()
|
||||
if not args:
|
||||
@@ -76,13 +66,9 @@ def main():
|
||||
Globals.sequential_guidance = args.sequential_guidance
|
||||
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
|
||||
|
||||
print(f'DEBUG: [4] Globals initialized', file=sys.stderr)
|
||||
|
||||
# run any post-install patches needed
|
||||
run_patches()
|
||||
|
||||
print(f'DEBUG: [5] Patches run', file=sys.stderr)
|
||||
|
||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||
|
||||
if not args.conf:
|
||||
@@ -98,9 +84,8 @@ def main():
|
||||
# loading here to avoid long delays on startup
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
print(f'DEBUG: [6] Importing torch modules', file=sys.stderr)
|
||||
|
||||
import transformers # type: ignore
|
||||
|
||||
from ldm.generate import Generate
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
@@ -108,7 +93,6 @@ def main():
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
|
||||
print(f'DEBUG: [7] loading restoration models', file=sys.stderr)
|
||||
# Loading Face Restoration and ESRGAN Modules
|
||||
gfpgan, codeformer, esrgan = load_face_restoration(opt)
|
||||
|
||||
@@ -130,7 +114,6 @@ def main():
|
||||
Globals.lora_models_dir = opt.lora_path
|
||||
|
||||
# migrate legacy models
|
||||
print(f'DEBUG: [8] migrating models', file=sys.stderr)
|
||||
ModelManager.migrate_models()
|
||||
|
||||
# load the infile as a list of lines
|
||||
@@ -148,7 +131,6 @@ def main():
|
||||
|
||||
model = opt.model or retrieve_last_used_model()
|
||||
|
||||
print(f'DEBUG: [9] Creating generate object', file=sys.stderr)
|
||||
# creating a Generate object:
|
||||
try:
|
||||
gen = Generate(
|
||||
@@ -175,7 +157,6 @@ def main():
|
||||
print(">> changed to seamless tiling mode")
|
||||
|
||||
# preload the model
|
||||
print(f'DEBUG: [10] Loading default model', file=sys.stderr)
|
||||
try:
|
||||
gen.load_model()
|
||||
except KeyError:
|
||||
@@ -223,7 +204,6 @@ def main():
|
||||
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
||||
def main_loop(gen, opt, completer):
|
||||
"""prompt/read/execute loop"""
|
||||
print(f'DEBUG: [11] In main loop', file=sys.stderr)
|
||||
global infile
|
||||
done = False
|
||||
doneAfterInFile = infile is not None
|
||||
@@ -1342,16 +1322,15 @@ def install_missing_config_files():
|
||||
install ckpt configuration files that may have been added to the
|
||||
distro after original root directory configuration
|
||||
"""
|
||||
pass
|
||||
# import invokeai.configs as conf
|
||||
# from shutil import copyfile
|
||||
import invokeai.configs as conf
|
||||
from shutil import copyfile
|
||||
|
||||
# root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
# repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||
# for src in repo_configs.iterdir():
|
||||
# dest = root_configs / src.name
|
||||
# if not dest.exists():
|
||||
# copyfile(src,dest)
|
||||
root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||
for src in repo_configs.iterdir():
|
||||
dest = root_configs / src.name
|
||||
if not dest.exists():
|
||||
copyfile(src,dest)
|
||||
|
||||
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
||||
"""
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__='2.3.4'
|
||||
__version__='2.3.4.post1'
|
||||
|
||||
@@ -32,8 +32,7 @@ def expand_prompts(
|
||||
template_file: Path,
|
||||
run_invoke: bool = False,
|
||||
invoke_model: str = None,
|
||||
invoke_outdir: str = None,
|
||||
invoke_root: str = None,
|
||||
invoke_outdir: Path = None,
|
||||
processes_per_gpu: int = 1,
|
||||
):
|
||||
"""
|
||||
@@ -62,8 +61,6 @@ def expand_prompts(
|
||||
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
|
||||
if invoke_model:
|
||||
invokeai_args.extend(("--model", invoke_model))
|
||||
if invoke_root:
|
||||
invokeai_args.extend(("--root", invoke_root))
|
||||
if invoke_outdir:
|
||||
outdir = os.path.expanduser(invoke_outdir)
|
||||
invokeai_args.extend(("--outdir", outdir))
|
||||
@@ -82,11 +79,6 @@ def expand_prompts(
|
||||
)
|
||||
import ldm.invoke.CLI
|
||||
|
||||
print(f'DEBUG: BATCH PARENT ENVIRONMENT:')
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
|
||||
parent_conn, child_conn = Pipe()
|
||||
children = set()
|
||||
for i in range(processes_to_launch):
|
||||
@@ -119,13 +111,6 @@ def expand_prompts(
|
||||
for p in children:
|
||||
p.terminate()
|
||||
|
||||
def _dummy_cli_main():
|
||||
counter = 0
|
||||
while line := sys.stdin.readline():
|
||||
print(f'[{counter}] {os.getpid()} got command {line.rstrip()}\n')
|
||||
counter += 1
|
||||
time.sleep(1)
|
||||
|
||||
def _get_fn_format(directory:str, sequence:int)->str:
|
||||
"""
|
||||
Get a filename that doesn't exceed filename length restrictions
|
||||
@@ -194,9 +179,9 @@ def _run_invoke(
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
|
||||
sys.argv = args
|
||||
sys.stdin = MessageToStdin(conn_in)
|
||||
# sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
# with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
|
||||
|
||||
def _filter_output(stream: TextIOBase):
|
||||
@@ -253,10 +238,6 @@ def main():
|
||||
default=1,
|
||||
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--root_dir',
|
||||
default=None,
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai' )
|
||||
opt = parser.parse_args()
|
||||
|
||||
if opt.example:
|
||||
@@ -280,7 +261,6 @@ def main():
|
||||
run_invoke=opt.invoke,
|
||||
invoke_model=opt.model,
|
||||
invoke_outdir=opt.outdir,
|
||||
invoke_root=opt.root,
|
||||
processes_per_gpu=opt.processes_per_gpu,
|
||||
)
|
||||
|
||||
|
||||
@@ -339,12 +339,14 @@ class KohyaLoraManager:
|
||||
return lora
|
||||
|
||||
def apply_lora_model(self, name, mult: float = 1.0):
|
||||
path_file = None
|
||||
for suffix in ["ckpt", "safetensors", "pt"]:
|
||||
path_file = Path(self.lora_path, f"{name}.{suffix}")
|
||||
if path_file.exists():
|
||||
path_files = [x for x in Path(self.lora_path).glob(f"**/{name}.{suffix}")]
|
||||
if len(path_files):
|
||||
path_file = path_files[0]
|
||||
print(f" | Loading lora {path_file.name} with weight {mult}")
|
||||
break
|
||||
if not path_file.exists():
|
||||
if not path_file:
|
||||
print(f" ** Unable to find lora: {name}")
|
||||
return
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ dependencies = [
|
||||
"clip_anytorch",
|
||||
"compel~=1.1.0",
|
||||
"datasets",
|
||||
"diffusers[torch]~=0.14",
|
||||
"diffusers[torch]==0.14",
|
||||
"dnspython==2.2.1",
|
||||
"einops",
|
||||
"eventlet",
|
||||
|
||||
Reference in New Issue
Block a user