mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 13:48:07 -05:00
Compare commits
37 Commits
release_1.
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ddf0ef3af1 | ||
|
|
aa2729d868 | ||
|
|
5f352aec87 | ||
|
|
c4c4974b39 | ||
|
|
194f43f00b | ||
|
|
325bc5280e | ||
|
|
11cc8e545b | ||
|
|
9adac56f4e | ||
|
|
5d5307dcb4 | ||
|
|
3c74dd41c4 | ||
|
|
f5450bad61 | ||
|
|
2ace56313c | ||
|
|
78aba5b770 | ||
|
|
49f0d31fac | ||
|
|
bb91ca0462 | ||
|
|
d340afc9e5 | ||
|
|
7085d1910b | ||
|
|
a997e09c48 | ||
|
|
503f962f68 | ||
|
|
41f0afbcb6 | ||
|
|
6650b98e7c | ||
|
|
1ca3dc553c | ||
|
|
09afcc321c | ||
|
|
7b2335068c | ||
|
|
d3eff4d827 | ||
|
|
0d23a0f899 | ||
|
|
985948c8b9 | ||
|
|
6ae09f6e46 | ||
|
|
ae821ce0e6 | ||
|
|
ce5b94bf40 | ||
|
|
b5d9981125 | ||
|
|
9a237015da | ||
|
|
5eff5d4cd2 | ||
|
|
4527ef15f9 | ||
|
|
0cea751476 | ||
|
|
a5fb8469ed | ||
|
|
9eaef0c5a8 |
200
README.md
200
README.md
@@ -17,10 +17,15 @@ initialization only happens once. After that image generation
|
||||
from the command-line interface is very fast.
|
||||
|
||||
The script uses the readline library to allow for in-line editing,
|
||||
command history (up and down arrows), autocompletion, and more.
|
||||
command history (up and down arrows), autocompletion, and more. To help
|
||||
keep track of which prompts generated which images, the script writes a
|
||||
log file of image names and prompts to the selected output directory.
|
||||
In addition, as of version 1.02, it also writes the prompt into the PNG
|
||||
file's metadata where it can be retrieved using scripts/images2prompt.py
|
||||
|
||||
Note that this has only been tested in the Linux environment. Testing
|
||||
and tweaking for Windows is in progress.
|
||||
The script is confirmed to work on Linux and Windows systems. It should
|
||||
work on MacOSX as well, but this is not confirmed. Note that this script
|
||||
runs from the command-line (CMD or Terminal window), and does not have a GUI.
|
||||
|
||||
~~~~
|
||||
(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py
|
||||
@@ -37,17 +42,25 @@ setting sampler to plms
|
||||
* Initialization done! Awaiting your command...
|
||||
dream> ashley judd riding a camel -n2 -s150
|
||||
Outputs:
|
||||
outputs/txt2img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
|
||||
outputs/txt2img-samples/00010.png: "ashley judd riding a camel" -n2 -s150-S 1362479620
|
||||
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
|
||||
outputs/img-samples/00010.png: "ashley judd riding a camel" -n2 -s150 -S 1362479620
|
||||
|
||||
dream> "there's a fly in my soup" -n6 -g
|
||||
outputs/txt2img-samples/00041.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
||||
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
||||
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
|
||||
dream> q
|
||||
|
||||
# this shows how to retrieve the prompt stored in the saved image's metadata
|
||||
(ldm) ~/stable-diffusion$ python3 ./scripts/images2prompt.py outputs/img_samples/*.png
|
||||
00009.png: "ashley judd riding a camel" -s150 -S 416354203
|
||||
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
|
||||
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
||||
~~~~
|
||||
|
||||
The dream> prompt's arguments are pretty-much
|
||||
The dream> prompt's arguments are pretty much
|
||||
identical to those used in the Discord bot, except you don't need to
|
||||
type "!dream". A significant change is that creation of individual images is the default
|
||||
type "!dream" (it doesn't hurt if you do). A significant change is that creation of individual images
|
||||
is now the default
|
||||
unless --grid (-g) is given. For backward compatibility, the -i switch is recognized.
|
||||
For command-line help type -h (or --help) at the dream> prompt.
|
||||
|
||||
@@ -71,31 +84,158 @@ The --init_img (-I) option gives the path to the seed picture. --strength (-f) c
|
||||
the original will be modified, ranging from 0.0 (keep the original intact), to 1.0 (ignore the original
|
||||
completely). The default is 0.75, and ranges from 0.25-0.75 give interesting results.
|
||||
|
||||
## Changes
|
||||
|
||||
* v1.01 (21 August 2022)
|
||||
* A copy of the prompt and all of its switches and options is now stored in the corresponding
|
||||
image in a tEXt metadata field named "Dream". You can read the prompt using scripts/images2prompt.py,
|
||||
or an image editor that allows you to explore the full metadata.
|
||||
**Please run "conda env update -f environment.yaml" to load the k_lms dependencies!!**
|
||||
|
||||
* v1.01 (21 August 2022)
|
||||
* added k_lms sampling.
|
||||
**Please run "conda env update -f environment.yaml" to load the k_lms dependencies!!**
|
||||
* use half precision arithmetic by default, resulting in faster execution and lower memory requirements
|
||||
Pass argument --full_precision to dream.py to get slower but more accurate image generation
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
For installation, follow the instructions from the original CompViz/stable-diffusion
|
||||
README which is appended to this README for your convenience. A few things to be aware of:
|
||||
### Linux/Mac
|
||||
|
||||
1. You will need the stable-diffusion model weights, which have to be downloaded separately as described
|
||||
in the CompViz instructions. They are expected to be released in the latter half of August.
|
||||
1. You will need to install the following prerequisites if they are not already available. Use your
|
||||
operating system's preferred installer
|
||||
* Python (version 3.8.5 recommended; higher may work)
|
||||
* git
|
||||
|
||||
2. If you do not have the weights and want to play with low-quality image generation, then you can use
|
||||
the public LAION400m weights, which can be installed like this:
|
||||
2. Install the Python Anaconda environment manager using pip3.
|
||||
```
|
||||
~$ pip3 install anaconda
|
||||
```
|
||||
After installing anaconda, you should log out of your system and log back in. If the installation
|
||||
worked, your command prompt will be prefixed by the name of the current anaconda environment, "(base)".
|
||||
|
||||
~~~~
|
||||
mkdir -p models/ldm/text2img-large/
|
||||
wget -O models/ldm/text2img-large/model.ckpt https://ommer-lab.com/files/latent-diffusion/nitro/txt2img-f8-large/model.ckpt
|
||||
~~~~
|
||||
3. Copy the stable-diffusion source code from GitHub:
|
||||
```
|
||||
(base) ~$ git clone https://github.com/lstein/stable-diffusion.git
|
||||
```
|
||||
This will create stable-diffusion folder where you will follow the rest of the steps.
|
||||
|
||||
You will then have to invoke dream.py with the --laion400m (or -l for short) flag:
|
||||
~~~~
|
||||
(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py -l
|
||||
~~~~
|
||||
4. Enter the newly-created stable-diffusion folder. From this step forward make sure that you are working in the stable-diffusion directory!
|
||||
```
|
||||
(base) ~$ cd stable-diffusion
|
||||
(base) ~/stable-diffusion$
|
||||
```
|
||||
5. Use anaconda to copy necessary python packages, create a new python environment named "ldm",
|
||||
and activate the environment.
|
||||
```
|
||||
(base) ~/stable-diffusion$ conda env create -f environment.yaml
|
||||
(base) ~/stable-diffusion$ conda activate ldm
|
||||
(ldm) ~/stable-diffusion$
|
||||
```
|
||||
After these steps, your command prompt will be prefixed by "(ldm)" as shown above.
|
||||
|
||||
3. To get around issues that arise when running the stable diffusion model on a machine without internet
|
||||
connectivity, I wrote a script that pre-downloads internet dependencies. Whether or not your GPU machine
|
||||
has connectivity, you will need to run this preloading script before the first run of dream.py. See
|
||||
"Workaround for machines with limited internet connectivity" below for the walkthrough.
|
||||
6. Load a couple of small machine-learning models required by stable diffusion:
|
||||
```
|
||||
(ldm) ~/stable-diffusion$ python3 scripts/preload_models.py
|
||||
```
|
||||
|
||||
7. Now you need to install the weights for the stable diffusion model.
|
||||
|
||||
For testing prior to the release of the real weights, you can use an older weight file that produces low-quality images. Create a directory within stable-diffusion named "models/ldm/text2img-large", and use the wget URL downloader tool to copy the weight file into it:
|
||||
```
|
||||
(ldm) ~/stable-diffusion$ mkdir -p models/ldm/text2img-large
|
||||
(ldm) ~/stable-diffusion$ wget -O models/ldm/text2img-large/model.ckpt https://ommer-lab.com/files/latent-diffusion/nitro/txt2img-f8-large/model.ckpt
|
||||
```
|
||||
For testing with the released weighs, you will do something similar, but with a directory named "models/ldm/stable-diffusion-v1"
|
||||
```
|
||||
(ldm) ~/stable-diffusion$ mkdir -p models/ldm/stable-diffusion-v1
|
||||
(ldm) ~/stable-diffusion$ wget -O models/ldm/stable-diffusion-v1/model.ckpt <ENTER URL HERE>
|
||||
```
|
||||
These weight files are ~5 GB in size, so downloading may take a while.
|
||||
|
||||
8. Start generating images!
|
||||
```
|
||||
# for the pre-release weights use the -l or --liaon400m switch
|
||||
(ldm) ~/stable-diffusion$ python3 scripts/dream.py -l
|
||||
|
||||
# for the post-release weights do not use the switch
|
||||
(ldm) ~/stable-diffusion$ python3 scripts/dream.py
|
||||
|
||||
# for additional configuration switches and arguments, use -h or --help
|
||||
(ldm) ~/stable-diffusion$ python3 scripts/dream.py -h
|
||||
```
|
||||
9. Subsequently, to relaunch the script, be sure to run "conda activate ldm" (step 5, second command), enter the "stable-diffusion"
|
||||
directory, and then launch the dream script (step 8). If you forget to activate the ldm environment, the script will fail with multiple ModuleNotFound errors.
|
||||
|
||||
### Updating to newer versions of the script
|
||||
|
||||
This distribution is changing rapidly. If you used the "git clone" method (step 5) to download the stable-diffusion directory, then to update to the latest and greatest version, launch the Anaconda window, enter "stable-diffusion", and type:
|
||||
```
|
||||
(ldm) ~/stable-diffusion$ git pull
|
||||
```
|
||||
This will bring your local copy into sync with the remote one.
|
||||
|
||||
### Windows
|
||||
|
||||
1. Install Python version 3.8.5 from here: https://www.python.org/downloads/windows/
|
||||
(note that several users have reported that later versions do not work properly)
|
||||
|
||||
2. Install Anaconda3 (miniconda3 version) from here: https://docs.anaconda.com/anaconda/install/windows/
|
||||
|
||||
3. Install Git from here: https://git-scm.com/download/win
|
||||
|
||||
4. Launch Anaconda from the Windows Start menu. This will bring up a command window. Type all the remaining commands in this window.
|
||||
|
||||
5. Run the command:
|
||||
```
|
||||
git clone https://github.com/lstein/stable-diffusion.git
|
||||
```
|
||||
This will create stable-diffusion folder where you will follow the rest of the steps.
|
||||
|
||||
6. Enter the newly-created stable-diffusion folder. From this step forward make sure that you are working in the stable-diffusion directory!
|
||||
```
|
||||
cd stable-diffusion
|
||||
```
|
||||
|
||||
7. Run the following two commands:
|
||||
```
|
||||
conda env create -f environment.yaml (step 7a)
|
||||
conda activate ldm (step 7b)
|
||||
```
|
||||
This will install all python requirements and activate the "ldm" environment which sets PATH and other environment variables properly.
|
||||
|
||||
8. Run the command:
|
||||
```
|
||||
python scripts\preload_models.py
|
||||
```
|
||||
This installs two machine learning models that stable diffusion requires.
|
||||
|
||||
9. Now you need to install the weights for the big stable diffusion model.
|
||||
|
||||
For testing prior to the release of the real weights, create a directory within stable-diffusion named "models\ldm\text2img-large".
|
||||
|
||||
For testing with the released weights, create a directory within stable-diffusion named "models\ldm\stable-diffusion-v1".
|
||||
|
||||
Then use a web browser to copy model.ckpt into the appropriate directory. For the text2img-large (pre-release) model, the weights are at https://ommer-lab.com/files/latent-diffusion/nitro/txt2img-f8-large/model.ckpt. Check back here later for the release URL.
|
||||
|
||||
10. Start generating images!
|
||||
```
|
||||
# for the pre-release weights
|
||||
python scripts\dream.py -l
|
||||
|
||||
# for the post-release weights
|
||||
python scripts\dream.py
|
||||
```
|
||||
11. Subsequently, to relaunch the script, first activate the Anaconda command window (step 4), run "conda activate ldm" (step 7b), and then launch the dream script (step 10).
|
||||
|
||||
### Updating to newer versions of the script
|
||||
|
||||
This distribution is changing rapidly. If you used the "git clone" method (step 5) to download the stable-diffusion directory, then to update to the latest and greatest version, launch the Anaconda window, enter "stable-diffusion", and type:
|
||||
```
|
||||
git pull
|
||||
```
|
||||
This will bring your local copy into sync with the remote one.
|
||||
|
||||
## Simplified API for text to image generation
|
||||
|
||||
@@ -154,7 +294,9 @@ For support,
|
||||
please use this repository's GitHub Issues tracking service. Feel free
|
||||
to send me an email if you use and like the script.
|
||||
|
||||
*Author:* Lincoln D. Stein <lincoln.stein@gmail.com>
|
||||
*Original Author:* Lincoln D. Stein <lincoln.stein@gmail.com>
|
||||
|
||||
*Contributions by:* [Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison), [xraxra](https://github.com/xraxra), and [bmaltais](https://github.com/bmaltais)
|
||||
|
||||
# Original README from CompViz/stable-diffusion
|
||||
*Stable Diffusion was made possible thanks to a collaboration with [Stability AI](https://stability.ai/) and [Runway](https://runwayml.com/) and builds upon our previous work:*
|
||||
@@ -181,6 +323,7 @@ See [this section](#stable-diffusion-v1) below and the [model card](https://hugg
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
A suitable [conda](https://conda.io/) environment named `ldm` can be created
|
||||
and activated with:
|
||||
|
||||
@@ -195,8 +338,7 @@ You can also update an existing [latent diffusion](https://github.com/CompVis/la
|
||||
conda install pytorch torchvision -c pytorch
|
||||
pip install transformers==4.19.2
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
## Stable Diffusion v1
|
||||
|
||||
|
||||
@@ -24,6 +24,8 @@ dependencies:
|
||||
- transformers==4.19.2
|
||||
- torchmetrics==0.6.0
|
||||
- kornia==0.6
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- accelerate==0.12.0
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||
- -e .
|
||||
|
||||
74
ldm/models/diffusion/ksampler.py
Normal file
74
ldm/models/diffusion/ksampler.py
Normal file
@@ -0,0 +1,74 @@
|
||||
'''wrapper around part of Karen Crownson's k-duffsion library, making it call compatible with other Samplers'''
|
||||
import k_diffusion as K
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import accelerate
|
||||
|
||||
class CFGDenoiser(nn.Module):
|
||||
def __init__(self, model):
|
||||
super().__init__()
|
||||
self.inner_model = model
|
||||
|
||||
def forward(self, x, sigma, uncond, cond, cond_scale):
|
||||
x_in = torch.cat([x] * 2)
|
||||
sigma_in = torch.cat([sigma] * 2)
|
||||
cond_in = torch.cat([uncond, cond])
|
||||
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
|
||||
return uncond + (cond - uncond) * cond_scale
|
||||
|
||||
class KSampler(object):
|
||||
def __init__(self,model,schedule="lms", **kwargs):
|
||||
super().__init__()
|
||||
self.model = K.external.CompVisDenoiser(model)
|
||||
self.accelerator = accelerate.Accelerator()
|
||||
self.device = self.accelerator.device
|
||||
self.schedule = schedule
|
||||
|
||||
def forward(self, x, sigma, uncond, cond, cond_scale):
|
||||
x_in = torch.cat([x] * 2)
|
||||
sigma_in = torch.cat([sigma] * 2)
|
||||
cond_in = torch.cat([uncond, cond])
|
||||
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
|
||||
return uncond + (cond - uncond) * cond_scale
|
||||
|
||||
|
||||
# most of these arguments are ignored and are only present for compatibility with
|
||||
# other samples
|
||||
@torch.no_grad()
|
||||
def sample(self,
|
||||
S,
|
||||
batch_size,
|
||||
shape,
|
||||
conditioning=None,
|
||||
callback=None,
|
||||
normals_sequence=None,
|
||||
img_callback=None,
|
||||
quantize_x0=False,
|
||||
eta=0.,
|
||||
mask=None,
|
||||
x0=None,
|
||||
temperature=1.,
|
||||
noise_dropout=0.,
|
||||
score_corrector=None,
|
||||
corrector_kwargs=None,
|
||||
verbose=True,
|
||||
x_T=None,
|
||||
log_every_t=100,
|
||||
unconditional_guidance_scale=1.,
|
||||
unconditional_conditioning=None,
|
||||
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
||||
**kwargs
|
||||
):
|
||||
|
||||
sigmas = self.model.get_sigmas(S)
|
||||
if x_T:
|
||||
x = x_T
|
||||
else:
|
||||
x = torch.randn([batch_size, *shape], device=self.device) * sigmas[0] # for GPU draw
|
||||
model_wrap_cfg = CFGDenoiser(self.model)
|
||||
extra_args = {'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': unconditional_guidance_scale}
|
||||
return (K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args, disable=not self.accelerator.is_main_process),
|
||||
None)
|
||||
|
||||
def gather(samples_ddim):
|
||||
return self.accelerator.gather(samples_ddim)
|
||||
@@ -11,7 +11,7 @@ t2i = T2I(outdir = <path> // outputs/txt2img-samples
|
||||
batch_size = <integer> // how many images to generate per sampling (1)
|
||||
steps = <integer> // 50
|
||||
seed = <integer> // current system time
|
||||
sampler = ['ddim','plms'] // ddim
|
||||
sampler_name= ['ddim','plms','klms'] // klms
|
||||
grid = <boolean> // false
|
||||
width = <integer> // image width, multiple of 64 (512)
|
||||
height = <integer> // image height, multiple of 64 (512)
|
||||
@@ -62,8 +62,9 @@ import time
|
||||
import math
|
||||
|
||||
from ldm.util import instantiate_from_config
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
from ldm.models.diffusion.ksampler import KSampler
|
||||
|
||||
class T2I:
|
||||
"""T2I class
|
||||
@@ -76,7 +77,7 @@ class T2I:
|
||||
batch_size
|
||||
steps
|
||||
seed
|
||||
sampler
|
||||
sampler_name
|
||||
grid
|
||||
individual
|
||||
width
|
||||
@@ -87,6 +88,8 @@ class T2I:
|
||||
downsampling_factor
|
||||
precision
|
||||
strength
|
||||
|
||||
The vast majority of these arguments default to reasonable values.
|
||||
"""
|
||||
def __init__(self,
|
||||
outdir="outputs/txt2img-samples",
|
||||
@@ -101,13 +104,15 @@ class T2I:
|
||||
cfg_scale=7.5,
|
||||
weights="models/ldm/stable-diffusion-v1/model.ckpt",
|
||||
config = "configs/latent-diffusion/txt2img-1p4B-eval.yaml",
|
||||
sampler="plms",
|
||||
sampler_name="klms",
|
||||
latent_channels=4,
|
||||
downsampling_factor=8,
|
||||
ddim_eta=0.0, # deterministic
|
||||
fixed_code=False,
|
||||
precision='autocast',
|
||||
strength=0.75 # default in scripts/img2img.py
|
||||
full_precision=False,
|
||||
strength=0.75, # default in scripts/img2img.py
|
||||
latent_diffusion_weights=False # just to keep track of this parameter when regenerating prompt
|
||||
):
|
||||
self.outdir = outdir
|
||||
self.batch_size = batch_size
|
||||
@@ -117,17 +122,19 @@ class T2I:
|
||||
self.grid = grid
|
||||
self.steps = steps
|
||||
self.cfg_scale = cfg_scale
|
||||
self.weights = weights
|
||||
self.weights = weights
|
||||
self.config = config
|
||||
self.sampler_name = sampler
|
||||
self.sampler_name = sampler_name
|
||||
self.fixed_code = fixed_code
|
||||
self.latent_channels = latent_channels
|
||||
self.downsampling_factor = downsampling_factor
|
||||
self.ddim_eta = ddim_eta
|
||||
self.precision = precision
|
||||
self.full_precision = full_precision
|
||||
self.strength = strength
|
||||
self.model = None # empty for now
|
||||
self.sampler = None
|
||||
self.latent_diffusion_weights=latent_diffusion_weights
|
||||
if seed is None:
|
||||
self.seed = self._new_seed()
|
||||
else:
|
||||
@@ -256,6 +263,8 @@ class T2I:
|
||||
|
||||
model = self.load_model() # will instantiate the model or return it from cache
|
||||
|
||||
precision_scope = autocast if self.precision=="autocast" else nullcontext
|
||||
|
||||
# grid and individual are mutually exclusive, with individual taking priority.
|
||||
# not necessary, but needed for compatability with dream bot
|
||||
if (grid is None):
|
||||
@@ -279,7 +288,8 @@ class T2I:
|
||||
assert os.path.isfile(init_img)
|
||||
init_image = self._load_img(init_img).to(self.device)
|
||||
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
||||
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
|
||||
with precision_scope("cuda"):
|
||||
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
|
||||
|
||||
@@ -292,7 +302,6 @@ class T2I:
|
||||
t_enc = int(strength * steps)
|
||||
print(f"target t_enc is {t_enc} steps")
|
||||
|
||||
precision_scope = autocast if self.precision=="autocast" else nullcontext
|
||||
images = list()
|
||||
seeds = list()
|
||||
|
||||
@@ -385,6 +394,9 @@ class T2I:
|
||||
elif self.sampler_name == 'ddim':
|
||||
print("setting sampler to ddim")
|
||||
self.sampler = DDIMSampler(self.model)
|
||||
elif self.sampler_name == 'klms':
|
||||
print("setting sampler to klms")
|
||||
self.sampler = KSampler(self.model,'lms')
|
||||
else:
|
||||
print(f"unsupported sampler {self.sampler_name}, defaulting to plms")
|
||||
self.sampler = PLMSSampler(self.model)
|
||||
@@ -401,6 +413,11 @@ class T2I:
|
||||
m, u = model.load_state_dict(sd, strict=False)
|
||||
model.cuda()
|
||||
model.eval()
|
||||
if self.full_precision:
|
||||
print('Using slower but more accurate full-precision math (--full_precision)')
|
||||
else:
|
||||
print('Using half precision math. Call with --full_precision to use slower but more accurate full precision.')
|
||||
model.half()
|
||||
return model
|
||||
|
||||
def _load_img(self,path):
|
||||
|
||||
105
scripts/dream.py
105
scripts/dream.py
@@ -1,8 +1,10 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import shlex
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
from PIL import Image,PngImagePlugin
|
||||
|
||||
# readline unavailable on windows systems
|
||||
try:
|
||||
@@ -11,7 +13,7 @@ try:
|
||||
except:
|
||||
readline_available = False
|
||||
|
||||
debugging = True
|
||||
debugging = False
|
||||
|
||||
def main():
|
||||
''' Initialize command-line parsers and the diffusion model '''
|
||||
@@ -35,6 +37,7 @@ def main():
|
||||
setup_readline()
|
||||
|
||||
print("* Initializing, be patient...\n")
|
||||
sys.path.append('.')
|
||||
from pytorch_lightning import logging
|
||||
from ldm.simplet2i import T2I
|
||||
|
||||
@@ -46,9 +49,12 @@ def main():
|
||||
height=height,
|
||||
batch_size=opt.batch_size,
|
||||
outdir=opt.outdir,
|
||||
sampler=opt.sampler,
|
||||
sampler_name=opt.sampler_name,
|
||||
weights=weights,
|
||||
config=config)
|
||||
full_precision=opt.full_precision,
|
||||
config=config,
|
||||
latent_diffusion_weights=opt.laion400m # this is solely for recreating the prompt
|
||||
)
|
||||
|
||||
# make sure the output directory exists
|
||||
if not os.path.exists(opt.outdir):
|
||||
@@ -116,7 +122,7 @@ def main_loop(t2i,parser,log):
|
||||
else:
|
||||
results = t2i.img2img(**vars(opt))
|
||||
print("Outputs:")
|
||||
write_log_message(opt,switches,results,log)
|
||||
write_log_message(t2i,opt,results,log)
|
||||
except KeyboardInterrupt:
|
||||
print('*interrupted*')
|
||||
continue
|
||||
@@ -124,34 +130,62 @@ def main_loop(t2i,parser,log):
|
||||
print("goodbye!")
|
||||
|
||||
|
||||
def write_log_message(opt,switches,results,logfile):
|
||||
''' logs the name of the output image, its prompt and seed to both the terminal and the log file '''
|
||||
if opt.grid:
|
||||
_output_for_grid(switches,results,logfile)
|
||||
else:
|
||||
_output_for_individual(switches,results,logfile)
|
||||
def write_log_message(t2i,opt,results,logfile):
|
||||
''' logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata '''
|
||||
switches = _reconstruct_switches(t2i,opt)
|
||||
prompt_str = ' '.join(switches)
|
||||
|
||||
def _output_for_individual(switches,results,logfile):
|
||||
# when multiple images are produced in batch, then we keep track of where each starts
|
||||
last_seed = None
|
||||
img_num = 1
|
||||
batch_size = opt.batch_size or t2i.batch_size
|
||||
seenit = {}
|
||||
|
||||
for r in results:
|
||||
log_message = " ".join([' ',str(r[0])+':',
|
||||
f'"{switches[0]}"',
|
||||
*switches[1:],f'-S {r[1]}'])
|
||||
seed = r[1]
|
||||
log_message = (f'{r[0]}: {prompt_str} -S{seed}')
|
||||
|
||||
if batch_size > 1:
|
||||
if seed != last_seed:
|
||||
img_num = 1
|
||||
log_message += f' # (batch image {img_num} of {batch_size})'
|
||||
else:
|
||||
img_num += 1
|
||||
log_message += f' # (batch image {img_num} of {batch_size})'
|
||||
last_seed = seed
|
||||
print(log_message)
|
||||
logfile.write(log_message+"\n")
|
||||
logfile.flush()
|
||||
if r[0] not in seenit:
|
||||
seenit[r[0]] = True
|
||||
try:
|
||||
_write_prompt_to_png(r[0],f'{prompt_str} -S{seed}')
|
||||
except FileNotFoundError:
|
||||
print(f"Could not open file '{r[0]}' for reading")
|
||||
|
||||
def _output_for_grid(switches,results,logfile):
|
||||
first_seed = results[0][1]
|
||||
log_message = " ".join([' ',str(results[0][0])+':',
|
||||
f'"{switches[0]}"',
|
||||
*switches[1:],f'-S {results[0][1]}'])
|
||||
print(log_message)
|
||||
logfile.write(log_message+"\n")
|
||||
all_seeds = [row[1] for row in results]
|
||||
log_message = f' seeds for individual rows: {all_seeds}'
|
||||
print(log_message)
|
||||
logfile.write(log_message+"\n")
|
||||
def _reconstruct_switches(t2i,opt):
|
||||
'''Normalize the prompt and switches'''
|
||||
switches = list()
|
||||
switches.append(f'"{opt.prompt}"')
|
||||
switches.append(f'-s{opt.steps or t2i.steps}')
|
||||
switches.append(f'-b{opt.batch_size or t2i.batch_size}')
|
||||
switches.append(f'-W{opt.width or t2i.width}')
|
||||
switches.append(f'-H{opt.height or t2i.height}')
|
||||
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
||||
if opt.init_img:
|
||||
switches.append(f'-I{opt.init_img}')
|
||||
if opt.strength and opt.init_img is not None:
|
||||
switches.append(f'-f{opt.strength or t2i.strength}')
|
||||
if t2i.full_precision:
|
||||
switches.append('-F')
|
||||
return switches
|
||||
|
||||
def _write_prompt_to_png(path,prompt):
|
||||
info = PngImagePlugin.PngInfo()
|
||||
info.add_text("Dream",prompt)
|
||||
im = Image.open(path)
|
||||
im.save(path,"PNG",pnginfo=info)
|
||||
|
||||
def create_argv_parser():
|
||||
parser = argparse.ArgumentParser(description="Parse script's command line args")
|
||||
parser.add_argument("--laion400m",
|
||||
@@ -159,19 +193,24 @@ def create_argv_parser():
|
||||
"-l",
|
||||
dest='laion400m',
|
||||
action='store_true',
|
||||
help="fallback to the latent diffusion (LAION4400M) weights and config")
|
||||
help="fallback to the latent diffusion (laion400m) weights and config")
|
||||
parser.add_argument('-n','--iterations',
|
||||
type=int,
|
||||
default=1,
|
||||
help="number of images to generate")
|
||||
parser.add_argument('-F','--full_precision',
|
||||
dest='full_precision',
|
||||
action='store_true',
|
||||
help="use slower full precision math for calculations")
|
||||
parser.add_argument('-b','--batch_size',
|
||||
type=int,
|
||||
default=1,
|
||||
help="number of images to produce per iteration (currently not working properly - producing too many images)")
|
||||
parser.add_argument('--sampler',
|
||||
choices=['plms','ddim'],
|
||||
default='plms',
|
||||
help="which sampler to use")
|
||||
help="number of images to produce per iteration (faster, but doesn't generate individual seeds")
|
||||
parser.add_argument('--sampler','-m',
|
||||
dest="sampler_name",
|
||||
choices=['plms','ddim', 'klms'],
|
||||
default='klms',
|
||||
help="which sampler to use (klms) - can only be set on command line")
|
||||
parser.add_argument('-o',
|
||||
'--outdir',
|
||||
type=str,
|
||||
@@ -186,7 +225,7 @@ def create_cmd_parser():
|
||||
parser.add_argument('-s','--steps',type=int,help="number of steps")
|
||||
parser.add_argument('-S','--seed',type=int,help="image seed")
|
||||
parser.add_argument('-n','--iterations',type=int,default=1,help="number of samplings to perform")
|
||||
parser.add_argument('-b','--batch_size',type=int,default=1,help="number of images to produce per sampling (currently broken)")
|
||||
parser.add_argument('-b','--batch_size',type=int,default=1,help="number of images to produce per sampling")
|
||||
parser.add_argument('-W','--width',type=int,help="image width, multiple of 64")
|
||||
parser.add_argument('-H','--height',type=int,help="image height, multiple of 64")
|
||||
parser.add_argument('-C','--cfg_scale',default=7.5,type=float,help="prompt configuration scale")
|
||||
|
||||
29
scripts/images2prompt.py
Normal file
29
scripts/images2prompt.py
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
'''This script reads the "Dream" Stable Diffusion prompt embedded in files generated by dream.py'''
|
||||
|
||||
import sys
|
||||
from PIL import Image,PngImagePlugin
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: file2prompt.py <file1.png> <file2.png> <file3.png>...")
|
||||
exit(-1)
|
||||
|
||||
filenames = sys.argv[1:]
|
||||
for f in filenames:
|
||||
try:
|
||||
im = Image.open(f)
|
||||
try:
|
||||
prompt = im.text['Dream']
|
||||
except KeyError:
|
||||
prompt = ''
|
||||
print(f'{f}: {prompt}')
|
||||
except FileNotFoundError:
|
||||
sys.stderr.write(f'{f} not found\n')
|
||||
continue
|
||||
except PermissionError:
|
||||
sys.stderr.write(f'{f} could not be opened due to inadequate permissions\n')
|
||||
continue
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
#!/usr/bin/env python3
|
||||
# Before running stable-diffusion on an internet-isolated machine,
|
||||
# run this script from one with internet connectivity. The
|
||||
# two machines must share a common .cache directory.
|
||||
|
||||
@@ -12,6 +12,10 @@ from pytorch_lightning import seed_everything
|
||||
from torch import autocast
|
||||
from contextlib import contextmanager, nullcontext
|
||||
|
||||
import accelerate
|
||||
import k_diffusion as K
|
||||
import torch.nn as nn
|
||||
|
||||
from ldm.util import instantiate_from_config
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
@@ -80,6 +84,11 @@ def main():
|
||||
action='store_true',
|
||||
help="use plms sampling",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--klms",
|
||||
action='store_true',
|
||||
help="use klms sampling",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--laion400m",
|
||||
action='store_true',
|
||||
@@ -190,6 +199,22 @@ def main():
|
||||
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
model = model.to(device)
|
||||
|
||||
#for klms
|
||||
model_wrap = K.external.CompVisDenoiser(model)
|
||||
accelerator = accelerate.Accelerator()
|
||||
device = accelerator.device
|
||||
class CFGDenoiser(nn.Module):
|
||||
def __init__(self, model):
|
||||
super().__init__()
|
||||
self.inner_model = model
|
||||
|
||||
def forward(self, x, sigma, uncond, cond, cond_scale):
|
||||
x_in = torch.cat([x] * 2)
|
||||
sigma_in = torch.cat([sigma] * 2)
|
||||
cond_in = torch.cat([uncond, cond])
|
||||
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
|
||||
return uncond + (cond - uncond) * cond_scale
|
||||
|
||||
if opt.plms:
|
||||
sampler = PLMSSampler(model)
|
||||
else:
|
||||
@@ -226,8 +251,8 @@ def main():
|
||||
with model.ema_scope():
|
||||
tic = time.time()
|
||||
all_samples = list()
|
||||
for n in trange(opt.n_iter, desc="Sampling"):
|
||||
for prompts in tqdm(data, desc="data"):
|
||||
for n in trange(opt.n_iter, desc="Sampling", disable =not accelerator.is_main_process):
|
||||
for prompts in tqdm(data, desc="data", disable =not accelerator.is_main_process):
|
||||
uc = None
|
||||
if opt.scale != 1.0:
|
||||
uc = model.get_learned_conditioning(batch_size * [""])
|
||||
@@ -235,18 +260,32 @@ def main():
|
||||
prompts = list(prompts)
|
||||
c = model.get_learned_conditioning(prompts)
|
||||
shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
|
||||
samples_ddim, _ = sampler.sample(S=opt.ddim_steps,
|
||||
conditioning=c,
|
||||
batch_size=opt.n_samples,
|
||||
shape=shape,
|
||||
verbose=False,
|
||||
unconditional_guidance_scale=opt.scale,
|
||||
unconditional_conditioning=uc,
|
||||
eta=opt.ddim_eta,
|
||||
x_T=start_code)
|
||||
|
||||
|
||||
if not opt.klms:
|
||||
samples_ddim, _ = sampler.sample(S=opt.ddim_steps,
|
||||
conditioning=c,
|
||||
batch_size=opt.n_samples,
|
||||
shape=shape,
|
||||
verbose=False,
|
||||
unconditional_guidance_scale=opt.scale,
|
||||
unconditional_conditioning=uc,
|
||||
eta=opt.ddim_eta,
|
||||
x_T=start_code)
|
||||
else:
|
||||
sigmas = model_wrap.get_sigmas(opt.ddim_steps)
|
||||
if start_code:
|
||||
x = start_code
|
||||
else:
|
||||
x = torch.randn([opt.n_samples, *shape], device=device) * sigmas[0] # for GPU draw
|
||||
model_wrap_cfg = CFGDenoiser(model_wrap)
|
||||
extra_args = {'cond': c, 'uncond': uc, 'cond_scale': opt.scale}
|
||||
samples_ddim = K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args, disable=not accelerator.is_main_process)
|
||||
|
||||
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
||||
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
|
||||
if opt.klms:
|
||||
x_sample = accelerator.gather(x_samples_ddim)
|
||||
|
||||
if not opt.skip_save:
|
||||
for x_sample in x_samples_ddim:
|
||||
|
||||
Reference in New Issue
Block a user