mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 16:18:06 -05:00
Compare commits
38 Commits
release-1.
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2114c386ad | ||
|
|
6d2b4cbda1 | ||
|
|
562831fc4b | ||
|
|
d04518e65e | ||
|
|
d598b6c79d | ||
|
|
4ec21a5423 | ||
|
|
b64c902354 | ||
|
|
2ada3288e7 | ||
|
|
91966e9ffa | ||
|
|
2ad73246f9 | ||
|
|
d3a802db69 | ||
|
|
b95908daec | ||
|
|
79add5f0b6 | ||
|
|
650ae3eb13 | ||
|
|
0e3059728c | ||
|
|
b7735b3788 | ||
|
|
39b55ae016 | ||
|
|
e82c5eba18 | ||
|
|
1c8ecacddf | ||
|
|
eb58276a2c | ||
|
|
72a9d75330 | ||
|
|
1a7743f3c2 | ||
|
|
c521ac08ee | ||
|
|
29727f3e12 | ||
|
|
51b9a1d8d3 | ||
|
|
ab131cb55e | ||
|
|
269fcf92d9 | ||
|
|
8b682ac83b | ||
|
|
36e4130f1c | ||
|
|
0a7fe6f2d9 | ||
|
|
25fa0ad1f2 | ||
|
|
df9f088eb4 | ||
|
|
b1600d4ca3 | ||
|
|
0efc3bf780 | ||
|
|
dd16fe16bb | ||
|
|
4d72644db4 | ||
|
|
7ea168227c | ||
|
|
ef8ddffe46 |
13
.gitmodules
vendored
13
.gitmodules
vendored
@@ -1,13 +0,0 @@
|
||||
[submodule "taming-transformers"]
|
||||
path = src/taming-transformers
|
||||
url = https://github.com/CompVis/taming-transformers.git
|
||||
ignore = dirty
|
||||
[submodule "clip"]
|
||||
path = src/clip
|
||||
url = https://github.com/openai/CLIP.git
|
||||
ignore = dirty
|
||||
[submodule "k-diffusion"]
|
||||
path = src/k-diffusion
|
||||
url = https://github.com/lstein/k-diffusion.git
|
||||
ignore = dirty
|
||||
|
||||
52
README.md
52
README.md
@@ -31,13 +31,7 @@ runs from the command-line (CMD or Terminal window), and does not have a GUI.
|
||||
(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py
|
||||
* Initializing, be patient...
|
||||
Loading model from models/ldm/text2img-large/model.ckpt
|
||||
LatentDiffusion: Running in eps-prediction mode
|
||||
DiffusionWrapper has 872.30 M params.
|
||||
making attention of type 'vanilla' with 512 in_channels
|
||||
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
making attention of type 'vanilla' with 512 in_channels
|
||||
Loading Bert tokenizer from "models/bert"
|
||||
setting sampler to plms
|
||||
(...more initialization messages...)
|
||||
|
||||
* Initialization done! Awaiting your command...
|
||||
dream> ashley judd riding a camel -n2 -s150
|
||||
@@ -84,6 +78,27 @@ The --init_img (-I) option gives the path to the seed picture. --strength (-f) c
|
||||
the original will be modified, ranging from 0.0 (keep the original intact), to 1.0 (ignore the original
|
||||
completely). The default is 0.75, and ranges from 0.25-0.75 give interesting results.
|
||||
|
||||
You may also pass a -v<count> option to generate count variants on the original image. This is done by
|
||||
passing the first generated image back into img2img the requested number of times. It generates interesting
|
||||
variants.
|
||||
|
||||
## Barebones Web Server
|
||||
|
||||
As of version 1.10, this distribution comes with a bare bones web server (see screenshot). To use it,
|
||||
run the command:
|
||||
|
||||
~~~~
|
||||
(ldm) ~/stable-diffusion$ python3 scripts/dream_web.py
|
||||
~~~~
|
||||
|
||||
You can then connect to the server by pointing your web browser at
|
||||
http://localhost:9090, or to the network name or IP address of the server.
|
||||
|
||||
Kudos to [Tesseract Cat](https://github.com/TesseractCat) for
|
||||
contributing this code.
|
||||
|
||||

|
||||
|
||||
## Weighted Prompts
|
||||
|
||||
You may weight different sections of the prompt to tell the sampler to attach different levels of
|
||||
@@ -128,10 +143,10 @@ samples, samples scaled for a sample of the prompt and one with the init word pr
|
||||
On a RTX3090, the process for SD will take ~1h @1.6 iterations/sec.
|
||||
|
||||
Note: According to the associated paper, the optimal number of images
|
||||
is 3-5 any more images than that and your model might not converge.
|
||||
is 3-5. Your model may not converge if you use more images than that.
|
||||
|
||||
Training will run indefinately, but you may wish to stop it before the
|
||||
heat death of the universe, when you fine a low loss epoch or around
|
||||
heat death of the universe, when you find a low loss epoch or around
|
||||
~5000 iterations.
|
||||
|
||||
Once the model is trained, specify the trained .pt file when starting
|
||||
@@ -166,11 +181,16 @@ repository and associated paper for details and limitations.
|
||||
|
||||
## Changes
|
||||
|
||||
* v1.09 (24 August 2022)
|
||||
* A barebone web server for interactive online generation of txt2img and img2img.
|
||||
* A new -v option allows you to generate multiple variants of an initial image
|
||||
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
|
||||
* Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))
|
||||
* Enabled all of the samplers from k_diffusion
|
||||
|
||||
* v1.08 (24 August 2022)
|
||||
* Escape single quotes on the dream> command before trying to parse. This avoids
|
||||
parse errors.
|
||||
* A new -v option allows you to generate multiple variants of an initial image
|
||||
in img2img mode. (kudos to Oceanswave)
|
||||
* Removed instruction to get Python3.8 as first step in Windows install.
|
||||
Anaconda3 does it for you.
|
||||
* Added bounds checks for numeric arguments that could cause crashes.
|
||||
@@ -449,13 +469,15 @@ to send me an email if you use and like the script.
|
||||
|
||||
*Contributions by:*
|
||||
[Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison),
|
||||
[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan] (https://github.com/Oceanswave],
|
||||
[nicolai256](https://github.com/nicolai256], [Benjamin Warner](https://github.com/warner-benjamin),
|
||||
and [tildebyte](https://github.com/tildebyte)
|
||||
[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan](https://github.com/Oceanswave),
|
||||
[nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin),
|
||||
[tildebyte](https://github.com/tildebyte),
|
||||
and [Tesseract Cat](https://github.com/TesseractCat)
|
||||
|
||||
|
||||
Original portions of the software are Copyright (c) 2020 Lincoln D. Stein (https://github.com/lstein)
|
||||
|
||||
#Further Reading
|
||||
|
||||
Please see the original README for more information on this software
|
||||
and underlying algorithm, located in the file README-CompViz.md.
|
||||
and underlying algorithm, located in the file README-CompViz.md.
|
||||
|
||||
@@ -18,14 +18,13 @@ dependencies:
|
||||
- pytorch-lightning==1.4.2
|
||||
- omegaconf==2.1.1
|
||||
- test-tube>=0.7.5
|
||||
- streamlit>=0.73.1
|
||||
- pillow==9.0.1
|
||||
- streamlit==1.12.0
|
||||
- pillow==9.2.0
|
||||
- einops==0.3.0
|
||||
- torch-fidelity==0.3.0
|
||||
- transformers==4.19.2
|
||||
- torchmetrics==0.6.0
|
||||
- kornia==0.6
|
||||
- accelerate==0.12.0
|
||||
- kornia==0.6.0
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
'''wrapper around part of Karen Crownson's k-duffsion library, making it call compatible with other Samplers'''
|
||||
'''wrapper around part of Katherine Crowson's k-diffusion library, making it call compatible with other Samplers'''
|
||||
import k_diffusion as K
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import accelerate
|
||||
|
||||
class CFGDenoiser(nn.Module):
|
||||
def __init__(self, model):
|
||||
@@ -17,12 +16,11 @@ class CFGDenoiser(nn.Module):
|
||||
return uncond + (cond - uncond) * cond_scale
|
||||
|
||||
class KSampler(object):
|
||||
def __init__(self,model,schedule="lms", **kwargs):
|
||||
def __init__(self, model, schedule="lms", device="cuda", **kwargs):
|
||||
super().__init__()
|
||||
self.model = K.external.CompVisDenoiser(model)
|
||||
self.accelerator = accelerate.Accelerator()
|
||||
self.device = self.accelerator.device
|
||||
self.model = K.external.CompVisDenoiser(model)
|
||||
self.schedule = schedule
|
||||
self.device = device
|
||||
|
||||
def forward(self, x, sigma, uncond, cond, cond_scale):
|
||||
x_in = torch.cat([x] * 2)
|
||||
@@ -67,8 +65,5 @@ class KSampler(object):
|
||||
x = torch.randn([batch_size, *shape], device=self.device) * sigmas[0] # for GPU draw
|
||||
model_wrap_cfg = CFGDenoiser(self.model)
|
||||
extra_args = {'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': unconditional_guidance_scale}
|
||||
return (K.sampling.__dict__[f'sample_{self.schedule}'](model_wrap_cfg, x, sigmas, extra_args=extra_args, disable=not self.accelerator.is_main_process),
|
||||
return (K.sampling.__dict__[f'sample_{self.schedule}'](model_wrap_cfg, x, sigmas, extra_args=extra_args),
|
||||
None)
|
||||
|
||||
def gather(samples_ddim):
|
||||
return self.accelerator.gather(samples_ddim)
|
||||
|
||||
@@ -467,17 +467,17 @@ The vast majority of these arguments default to reasonable values.
|
||||
elif self.sampler_name == 'ddim':
|
||||
self.sampler = DDIMSampler(self.model, device=self.device)
|
||||
elif self.sampler_name == 'k_dpm_2_a':
|
||||
self.sampler = KSampler(self.model,'dpm_2_ancestral')
|
||||
self.sampler = KSampler(self.model, 'dpm_2_ancestral', device=self.device)
|
||||
elif self.sampler_name == 'k_dpm_2':
|
||||
self.sampler = KSampler(self.model,'dpm_2')
|
||||
self.sampler = KSampler(self.model, 'dpm_2', device=self.device)
|
||||
elif self.sampler_name == 'k_euler_a':
|
||||
self.sampler = KSampler(self.model,'euler_ancestral')
|
||||
self.sampler = KSampler(self.model, 'euler_ancestral', device=self.device)
|
||||
elif self.sampler_name == 'k_euler':
|
||||
self.sampler = KSampler(self.model,'euler')
|
||||
self.sampler = KSampler(self.model, 'euler', device=self.device)
|
||||
elif self.sampler_name == 'k_heun':
|
||||
self.sampler = KSampler(self.model,'heun')
|
||||
self.sampler = KSampler(self.model, 'heun', device=self.device)
|
||||
elif self.sampler_name == 'k_lms':
|
||||
self.sampler = KSampler(self.model,'lms')
|
||||
self.sampler = KSampler(self.model, 'lms', device=self.device)
|
||||
else:
|
||||
msg = f'unsupported sampler {self.sampler_name}, defaulting to plms'
|
||||
self.sampler = PLMSSampler(self.model, device=self.device)
|
||||
@@ -494,6 +494,7 @@ The vast majority of these arguments default to reasonable values.
|
||||
sd = pl_sd["state_dict"]
|
||||
model = instantiate_from_config(config.model)
|
||||
m, u = model.load_state_dict(sd, strict=False)
|
||||
model.to(self.device)
|
||||
model.eval()
|
||||
if self.full_precision:
|
||||
print('Using slower but more accurate full-precision math (--full_precision)')
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
accelerate==0.12.0
|
||||
albumentations==0.4.3
|
||||
einops==0.3.0
|
||||
huggingface-hub==0.8.1
|
||||
@@ -8,15 +7,15 @@ kornia==0.6.0
|
||||
numpy==1.19.2
|
||||
omegaconf==2.1.1
|
||||
opencv-python==4.1.2.30
|
||||
pillow==9.0.1
|
||||
pillow==9.2.0
|
||||
pudb==2019.2
|
||||
pytorch
|
||||
torch==1.11.0
|
||||
torchvision==0.12.0
|
||||
pytorch-lightning==1.4.2
|
||||
streamlit==1.12.0
|
||||
test-tube>=0.7.5
|
||||
torch-fidelity==0.3.0
|
||||
torchmetrics==0.6.0
|
||||
torchvision
|
||||
transformers==4.19.2
|
||||
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
|
||||
@@ -183,6 +183,7 @@ def main_loop(t2i,parser,log,infile):
|
||||
if opt.variants is not None:
|
||||
print(f"Generating {opt.variants} variant(s)...")
|
||||
newopt = copy.deepcopy(opt)
|
||||
newopt.iterations = 1
|
||||
newopt.variants = None
|
||||
for r in results:
|
||||
newopt.init_img = r[0]
|
||||
|
||||
108
scripts/dream_web.py
Normal file
108
scripts/dream_web.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import json
|
||||
import base64
|
||||
import mimetypes
|
||||
import os
|
||||
from pytorch_lightning import logging
|
||||
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
|
||||
|
||||
print("Loading model...")
|
||||
from ldm.simplet2i import T2I
|
||||
model = T2I(sampler_name='k_lms')
|
||||
|
||||
# to get rid of annoying warning messages from pytorch
|
||||
import transformers
|
||||
transformers.logging.set_verbosity_error()
|
||||
logging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
|
||||
|
||||
print("Initializing model, be patient...")
|
||||
model.load_model()
|
||||
|
||||
class DreamServer(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
if self.path == "/":
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.end_headers()
|
||||
with open("./static/dream_web/index.html", "rb") as content:
|
||||
self.wfile.write(content.read())
|
||||
elif os.path.exists("." + self.path):
|
||||
mime_type = mimetypes.guess_type(self.path)[0]
|
||||
if mime_type is not None:
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", mime_type)
|
||||
self.end_headers()
|
||||
with open("." + self.path, "rb") as content:
|
||||
self.wfile.write(content.read())
|
||||
else:
|
||||
self.send_response(404)
|
||||
else:
|
||||
self.send_response(404)
|
||||
|
||||
def do_POST(self):
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/json")
|
||||
self.end_headers()
|
||||
|
||||
content_length = int(self.headers['Content-Length'])
|
||||
post_data = json.loads(self.rfile.read(content_length))
|
||||
prompt = post_data['prompt']
|
||||
initimg = post_data['initimg']
|
||||
iterations = int(post_data['iterations'])
|
||||
steps = int(post_data['steps'])
|
||||
width = int(post_data['width'])
|
||||
height = int(post_data['height'])
|
||||
cfgscale = float(post_data['cfgscale'])
|
||||
seed = None if int(post_data['seed']) == -1 else int(post_data['seed'])
|
||||
|
||||
print(f"Request to generate with prompt: {prompt}")
|
||||
|
||||
outputs = []
|
||||
if initimg is None:
|
||||
# Run txt2img
|
||||
outputs = model.txt2img(prompt,
|
||||
iterations=iterations,
|
||||
cfg_scale = cfgscale,
|
||||
width = width,
|
||||
height = height,
|
||||
seed = seed,
|
||||
steps = steps)
|
||||
else:
|
||||
# Decode initimg as base64 to temp file
|
||||
with open("./img2img-tmp.png", "wb") as f:
|
||||
initimg = initimg.split(",")[1] # Ignore mime type
|
||||
f.write(base64.b64decode(initimg))
|
||||
|
||||
# Run img2img
|
||||
outputs = model.img2img(prompt,
|
||||
init_img = "./img2img-tmp.png",
|
||||
iterations = iterations,
|
||||
cfg_scale = cfgscale,
|
||||
seed = seed,
|
||||
steps = steps)
|
||||
# Remove the temp file
|
||||
os.remove("./img2img-tmp.png")
|
||||
|
||||
print(f"Prompt generated with output: {outputs}")
|
||||
|
||||
post_data['initimg'] = '' # Don't send init image back
|
||||
outputs = [x + [post_data] for x in outputs] # Append config to each output
|
||||
result = {'outputs': outputs}
|
||||
self.wfile.write(bytes(json.dumps(result), "utf-8"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Change working directory to the stable-diffusion directory
|
||||
os.chdir(
|
||||
os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))
|
||||
)
|
||||
|
||||
# Start server
|
||||
dream_server = ThreadingHTTPServer(("0.0.0.0", 9090), DreamServer)
|
||||
print("\n\n* Started Stable Diffusion dream server! Point your browser at http://localhost:9090 or use the host's DNS name or IP address. *")
|
||||
|
||||
try:
|
||||
dream_server.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
dream_server.server_close()
|
||||
|
||||
@@ -12,7 +12,6 @@ from pytorch_lightning import seed_everything
|
||||
from torch import autocast
|
||||
from contextlib import contextmanager, nullcontext
|
||||
|
||||
import accelerate
|
||||
import k_diffusion as K
|
||||
import torch.nn as nn
|
||||
|
||||
@@ -201,8 +200,6 @@ def main():
|
||||
|
||||
#for klms
|
||||
model_wrap = K.external.CompVisDenoiser(model)
|
||||
accelerator = accelerate.Accelerator()
|
||||
device = accelerator.device
|
||||
class CFGDenoiser(nn.Module):
|
||||
def __init__(self, model):
|
||||
super().__init__()
|
||||
@@ -251,8 +248,8 @@ def main():
|
||||
with model.ema_scope():
|
||||
tic = time.time()
|
||||
all_samples = list()
|
||||
for n in trange(opt.n_iter, desc="Sampling", disable =not accelerator.is_main_process):
|
||||
for prompts in tqdm(data, desc="data", disable =not accelerator.is_main_process):
|
||||
for n in trange(opt.n_iter, desc="Sampling"):
|
||||
for prompts in tqdm(data, desc="data"):
|
||||
uc = None
|
||||
if opt.scale != 1.0:
|
||||
uc = model.get_learned_conditioning(batch_size * [""])
|
||||
@@ -279,13 +276,10 @@ def main():
|
||||
x = torch.randn([opt.n_samples, *shape], device=device) * sigmas[0] # for GPU draw
|
||||
model_wrap_cfg = CFGDenoiser(model_wrap)
|
||||
extra_args = {'cond': c, 'uncond': uc, 'cond_scale': opt.scale}
|
||||
samples_ddim = K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args, disable=not accelerator.is_main_process)
|
||||
samples_ddim = K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args)
|
||||
|
||||
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
||||
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
|
||||
if opt.klms:
|
||||
x_sample = accelerator.gather(x_samples_ddim)
|
||||
|
||||
if not opt.skip_save:
|
||||
for x_sample in x_samples_ddim:
|
||||
|
||||
1
src/clip
1
src/clip
Submodule src/clip deleted from d50d76daa6
Submodule src/k-diffusion updated: db57990687...ef1bf07627
Submodule src/taming-transformers deleted from 24268930bf
61
static/dream_web/index.css
Normal file
61
static/dream_web/index.css
Normal file
@@ -0,0 +1,61 @@
|
||||
* {
|
||||
font-family: 'Arial';
|
||||
}
|
||||
#header {
|
||||
text-decoration: dotted underline;
|
||||
}
|
||||
#search {
|
||||
margin-top: 20vh;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
max-width: 800px;
|
||||
|
||||
text-align: center;
|
||||
}
|
||||
fieldset {
|
||||
border: none;
|
||||
}
|
||||
#fieldset-search {
|
||||
display: flex;
|
||||
}
|
||||
#prompt {
|
||||
flex-grow: 1;
|
||||
|
||||
border-radius: 20px 0px 0px 20px;
|
||||
padding: 5px 10px 5px 10px;
|
||||
border: 1px solid black;
|
||||
border-right: none;
|
||||
outline: none;
|
||||
}
|
||||
#submit {
|
||||
border-radius: 0px 20px 20px 0px;
|
||||
padding: 5px 10px 5px 10px;
|
||||
border: 1px solid black;
|
||||
}
|
||||
#results {
|
||||
text-align: center;
|
||||
max-width: 1000px;
|
||||
margin: auto;
|
||||
padding-top: 10px;
|
||||
}
|
||||
img {
|
||||
cursor: pointer;
|
||||
height: 30vh;
|
||||
border-radius: 5px;
|
||||
margin: 10px;
|
||||
}
|
||||
#fieldset-config {
|
||||
line-height:2em;
|
||||
}
|
||||
input[type="number"] {
|
||||
width: 60px;
|
||||
}
|
||||
#seed {
|
||||
width: 150px;
|
||||
}
|
||||
hr {
|
||||
width: 200px;
|
||||
}
|
||||
label {
|
||||
white-space: nowrap;
|
||||
}
|
||||
48
static/dream_web/index.html
Normal file
48
static/dream_web/index.html
Normal file
@@ -0,0 +1,48 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Stable Diffusion Dream Server</title>
|
||||
<link rel="icon" href="data:,">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
<link rel="stylesheet" href="static/dream_web/index.css">
|
||||
<script src="static/dream_web/index.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="search">
|
||||
<h2 id="header">Stable Diffusion Dream Server</h2>
|
||||
|
||||
<form id="generate-form" method="post" action="#">
|
||||
<fieldset id="fieldset-search">
|
||||
<input type="text" id="prompt" name="prompt">
|
||||
<input type="submit" id="submit" value="Generate">
|
||||
</fieldset>
|
||||
<fieldset id="fieldset-config">
|
||||
<label for="iterations">Images to generate:</label>
|
||||
<input value="1" type="number" id="iterations" name="iterations">
|
||||
<label for="steps">Steps:</label>
|
||||
<input value="50" type="number" id="steps" name="steps">
|
||||
<label for="cfgscale">Cfg Scale:</label>
|
||||
<input value="7.5" type="number" id="cfgscale" name="cfgscale" step="any">
|
||||
<span>•</span>
|
||||
<label title="Set to multiple of 64" for="width">Width:</label>
|
||||
<input value="512" type="number" id="width" name="width">
|
||||
<label title="Set to multiple of 64" for="height">Height:</label>
|
||||
<input value="512" type="number" id="height" name="height">
|
||||
<br>
|
||||
<label title="Upload an image to use img2img" for="initimg">Img2Img Init:</label>
|
||||
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
|
||||
<label title="Set to -1 for random seed" for="seed">Seed:</label>
|
||||
<input value="-1" type="number" id="seed" name="seed">
|
||||
<button type="button" id="reset">↺</button>
|
||||
</fieldset>
|
||||
</form>
|
||||
<div id="about">For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub site</a></div>
|
||||
</div>
|
||||
<hr>
|
||||
<div id="results">
|
||||
<div id="no-results-message">
|
||||
<i><p>No results...</p></i>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
101
static/dream_web/index.js
Normal file
101
static/dream_web/index.js
Normal file
@@ -0,0 +1,101 @@
|
||||
function toBase64(file) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const r = new FileReader();
|
||||
r.readAsDataURL(file);
|
||||
r.onload = () => resolve(r.result);
|
||||
r.onerror = (error) => reject(error);
|
||||
});
|
||||
}
|
||||
|
||||
function appendOutput(output) {
|
||||
let outputNode = document.createElement("img");
|
||||
outputNode.src = output[0];
|
||||
|
||||
let outputConfig = output[2];
|
||||
let altText = output[1].toString() + " | " + outputConfig.prompt;
|
||||
outputNode.alt = altText;
|
||||
outputNode.title = altText;
|
||||
|
||||
// Reload image config
|
||||
outputNode.addEventListener('click', () => {
|
||||
let form = document.querySelector("#generate-form");
|
||||
for (const [k, v] of new FormData(form)) {
|
||||
form.querySelector(`*[name=${k}]`).value = outputConfig[k];
|
||||
}
|
||||
document.querySelector("#seed").value = output[1];
|
||||
|
||||
saveFields(document.querySelector("#generate-form"));
|
||||
});
|
||||
|
||||
document.querySelector("#results").prepend(outputNode);
|
||||
}
|
||||
|
||||
function appendOutputs(outputs) {
|
||||
for (const output of outputs) {
|
||||
appendOutput(output);
|
||||
}
|
||||
}
|
||||
|
||||
function saveFields(form) {
|
||||
for (const [k, v] of new FormData(form)) {
|
||||
if (typeof v !== 'object') { // Don't save 'file' type
|
||||
localStorage.setItem(k, v);
|
||||
}
|
||||
}
|
||||
}
|
||||
function loadFields(form) {
|
||||
for (const [k, v] of new FormData(form)) {
|
||||
const item = localStorage.getItem(k);
|
||||
if (item != null) {
|
||||
form.querySelector(`*[name=${k}]`).value = item;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function generateSubmit(form) {
|
||||
const prompt = document.querySelector("#prompt").value;
|
||||
|
||||
// Convert file data to base64
|
||||
let formData = Object.fromEntries(new FormData(form));
|
||||
formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
|
||||
|
||||
// Post as JSON
|
||||
fetch(form.action, {
|
||||
method: form.method,
|
||||
body: JSON.stringify(formData),
|
||||
}).then(async (result) => {
|
||||
let data = await result.json();
|
||||
|
||||
// Re-enable form, remove no-results-message
|
||||
form.querySelector('fieldset').removeAttribute('disabled');
|
||||
document.querySelector("#prompt").value = prompt;
|
||||
|
||||
if (data.outputs.length != 0) {
|
||||
document.querySelector("#no-results-message")?.remove();
|
||||
appendOutputs(data.outputs);
|
||||
} else {
|
||||
alert("Error occurred while generating.");
|
||||
}
|
||||
});
|
||||
|
||||
// Disable form while generating
|
||||
form.querySelector('fieldset').setAttribute('disabled','');
|
||||
document.querySelector("#prompt").value = `Generating: "${prompt}"`;
|
||||
}
|
||||
|
||||
window.onload = () => {
|
||||
document.querySelector("#generate-form").addEventListener('submit', (e) => {
|
||||
e.preventDefault();
|
||||
const form = e.target;
|
||||
|
||||
generateSubmit(form);
|
||||
});
|
||||
document.querySelector("#generate-form").addEventListener('change', (e) => {
|
||||
saveFields(e.target.form);
|
||||
});
|
||||
document.querySelector("#reset").addEventListener('click', (e) => {
|
||||
document.querySelector("#seed").value = -1;
|
||||
saveFields(e.target.form);
|
||||
});
|
||||
loadFields(document.querySelector("#generate-form"));
|
||||
};
|
||||
BIN
static/dream_web_server.png
Normal file
BIN
static/dream_web_server.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 536 KiB |
Reference in New Issue
Block a user