mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-23 08:47:59 -05:00
Resolving merge conflicts for flake8
This commit is contained in:
committed by
psychedelicious
parent
f6db9da06c
commit
537ae2f901
@@ -1,6 +1,7 @@
|
||||
"""make variations of input image"""
|
||||
|
||||
import argparse, os, sys, glob
|
||||
import argparse
|
||||
import os
|
||||
import PIL
|
||||
import torch
|
||||
import numpy as np
|
||||
@@ -12,7 +13,6 @@ from einops import rearrange, repeat
|
||||
from torchvision.utils import make_grid
|
||||
from torch import autocast
|
||||
from contextlib import nullcontext
|
||||
import time
|
||||
from pytorch_lightning import seed_everything
|
||||
|
||||
from ldm.util import instantiate_from_config
|
||||
@@ -234,7 +234,6 @@ def main():
|
||||
with torch.no_grad():
|
||||
with precision_scope(device.type):
|
||||
with model.ema_scope():
|
||||
tic = time.time()
|
||||
all_samples = list()
|
||||
for n in trange(opt.n_iter, desc="Sampling"):
|
||||
for prompts in tqdm(data, desc="data"):
|
||||
@@ -279,8 +278,6 @@ def main():
|
||||
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_count += 1
|
||||
|
||||
toc = time.time()
|
||||
|
||||
print(f"Your samples are ready and waiting for you here: \n{outpath} \n" f" \nEnjoy.")
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import argparse, os, sys, glob
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
from omegaconf import OmegaConf
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import argparse, os, sys, glob
|
||||
import clip
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from omegaconf import OmegaConf
|
||||
from PIL import Image
|
||||
from tqdm import tqdm, trange
|
||||
from itertools import islice
|
||||
from einops import rearrange, repeat
|
||||
from einops import rearrange
|
||||
from torchvision.utils import make_grid
|
||||
import scann
|
||||
import time
|
||||
@@ -390,8 +390,8 @@ if __name__ == "__main__":
|
||||
grid = make_grid(grid, nrow=n_rows)
|
||||
|
||||
# to image
|
||||
grid = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy()
|
||||
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_np = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy()
|
||||
Image.fromarray(grid_np.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_count += 1
|
||||
|
||||
print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.")
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
import argparse, os, sys, datetime, glob, importlib, csv
|
||||
import argparse
|
||||
import datetime
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import torch
|
||||
|
||||
import torchvision
|
||||
import pytorch_lightning as pl
|
||||
|
||||
from packaging import version
|
||||
from omegaconf import OmegaConf
|
||||
from torch.utils.data import random_split, DataLoader, Dataset, Subset
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from functools import partial
|
||||
from PIL import Image
|
||||
|
||||
from pytorch_lightning import seed_everything
|
||||
from pytorch_lightning.trainer import Trainer
|
||||
from pytorch_lightning.callbacks import (
|
||||
ModelCheckpoint,
|
||||
Callback,
|
||||
LearningRateMonitor,
|
||||
)
|
||||
from pytorch_lightning.callbacks import Callback
|
||||
from pytorch_lightning.utilities.distributed import rank_zero_only
|
||||
from pytorch_lightning.utilities import rank_zero_info
|
||||
|
||||
@@ -651,7 +651,7 @@ if __name__ == "__main__":
|
||||
trainer_config["accelerator"] = "auto"
|
||||
for k in nondefault_trainer_args(opt):
|
||||
trainer_config[k] = getattr(opt, k)
|
||||
if not "gpus" in trainer_config:
|
||||
if "gpus" not in trainer_config:
|
||||
del trainer_config["accelerator"]
|
||||
cpu = True
|
||||
else:
|
||||
@@ -803,7 +803,7 @@ if __name__ == "__main__":
|
||||
trainer_opt.detect_anomaly = False
|
||||
|
||||
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
|
||||
trainer.logdir = logdir ###
|
||||
trainer.logdir = logdir
|
||||
|
||||
# data
|
||||
config.data.params.train.params.data_root = opt.data_root
|
||||
|
||||
@@ -2,7 +2,7 @@ from ldm.modules.encoders.modules import FrozenCLIPEmbedder, BERTEmbedder
|
||||
from ldm.modules.embedding_manager import EmbeddingManager
|
||||
from ldm.invoke.globals import Globals
|
||||
|
||||
import argparse, os
|
||||
import argparse
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
@@ -108,7 +108,7 @@ if __name__ == "__main__":
|
||||
manager.load(manager_ckpt)
|
||||
|
||||
for placeholder_string in manager.string_to_token_dict:
|
||||
if not placeholder_string in string_to_token_dict:
|
||||
if placeholder_string not in string_to_token_dict:
|
||||
string_to_token_dict[placeholder_string] = manager.string_to_token_dict[placeholder_string]
|
||||
string_to_param_dict[placeholder_string] = manager.string_to_param_dict[placeholder_string]
|
||||
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
import argparse, os, sys, glob, datetime, yaml
|
||||
import torch
|
||||
import argparse
|
||||
import datetime
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from tqdm import trange
|
||||
|
||||
@@ -10,7 +16,9 @@ from PIL import Image
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.util import instantiate_from_config
|
||||
|
||||
rescale = lambda x: (x + 1.0) / 2.0
|
||||
|
||||
def rescale(x: float) -> float:
|
||||
return (x + 1.0) / 2.0
|
||||
|
||||
|
||||
def custom_to_pil(x):
|
||||
@@ -45,7 +53,7 @@ def logs2pil(logs, keys=["sample"]):
|
||||
else:
|
||||
print(f"Unknown format for key {k}. ")
|
||||
img = None
|
||||
except:
|
||||
except Exception:
|
||||
img = None
|
||||
imgs[k] = img
|
||||
return imgs
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import os, sys
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
import scann
|
||||
import argparse
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import argparse, os, sys, glob
|
||||
import argparse
|
||||
import os
|
||||
import torch
|
||||
import numpy as np
|
||||
from omegaconf import OmegaConf
|
||||
@@ -7,10 +8,9 @@ from tqdm import tqdm, trange
|
||||
from itertools import islice
|
||||
from einops import rearrange
|
||||
from torchvision.utils import make_grid
|
||||
import time
|
||||
from pytorch_lightning import seed_everything
|
||||
from torch import autocast
|
||||
from contextlib import contextmanager, nullcontext
|
||||
from contextlib import nullcontext
|
||||
|
||||
import k_diffusion as K
|
||||
import torch.nn as nn
|
||||
@@ -251,7 +251,6 @@ def main():
|
||||
with torch.no_grad():
|
||||
with precision_scope(device.type):
|
||||
with model.ema_scope():
|
||||
tic = time.time()
|
||||
all_samples = list()
|
||||
for n in trange(opt.n_iter, desc="Sampling"):
|
||||
for prompts in tqdm(data, desc="data"):
|
||||
@@ -310,8 +309,6 @@ def main():
|
||||
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_count += 1
|
||||
|
||||
toc = time.time()
|
||||
|
||||
print(f"Your samples are ready and waiting for you here: \n{outpath} \n" f" \nEnjoy.")
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user