diff --git a/README.md b/README.md
index 51c0f8828e..d944be4951 100644
--- a/README.md
+++ b/README.md
@@ -4,6 +4,11 @@

+
+
+
+
+# **Stable Diffusion Dream Script**
[![discord badge]][discord link]
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
diff --git a/docs/assets/join-us-on-discord-image.png b/docs/assets/join-us-on-discord-image.png
new file mode 100644
index 0000000000..53e4ee0fe0
Binary files /dev/null and b/docs/assets/join-us-on-discord-image.png differ
diff --git a/docs/assets/step1.png b/docs/assets/step1.png
new file mode 100644
index 0000000000..6309f41f20
Binary files /dev/null and b/docs/assets/step1.png differ
diff --git a/docs/assets/step2.png b/docs/assets/step2.png
new file mode 100644
index 0000000000..06027289b2
Binary files /dev/null and b/docs/assets/step2.png differ
diff --git a/docs/assets/step4.png b/docs/assets/step4.png
new file mode 100644
index 0000000000..c24db6b470
Binary files /dev/null and b/docs/assets/step4.png differ
diff --git a/docs/assets/step5.png b/docs/assets/step5.png
new file mode 100644
index 0000000000..b4e9b50576
Binary files /dev/null and b/docs/assets/step5.png differ
diff --git a/docs/assets/step6.png b/docs/assets/step6.png
new file mode 100644
index 0000000000..c43140c1aa
Binary files /dev/null and b/docs/assets/step6.png differ
diff --git a/docs/assets/step7.png b/docs/assets/step7.png
new file mode 100644
index 0000000000..a575af28b2
Binary files /dev/null and b/docs/assets/step7.png differ
diff --git a/docs/features/INPAINTING.md b/docs/features/INPAINTING.md
index 85f631ac12..317dc99698 100644
--- a/docs/features/INPAINTING.md
+++ b/docs/features/INPAINTING.md
@@ -37,5 +37,44 @@ We are hoping to get rid of the need for this workaround in an upcoming release.
5. Open the Layers toolbar (^L) and select "Floating Selection"
6. Set opacity to 0%
7. Export as PNG
+8. In the export dialogue, Make sure the "Save colour values from
+ transparent pixels" checkbox is selected.
+
+
+## Recipe for Adobe Photoshop
+
+1. Open image in Photoshop
+
+
+
+
+2. Use any of the selection tools (Marquee, Lasso, or Wand) to select the area you desire to inpaint.
+
+
+
+
+3. Because we'll be applying a mask over the area we want to preserve, you should now select the inverse by using the Shift + Ctrl + I shortcut, or right clicking and using the "Select Inverse" option.
+
+4. You'll now create a mask by selecting the image layer, and Masking the selection. Make sure that you don't delete any of the underlying image, or your inpainting results will be dramatically impacted.
+
+
+
+
+5. Make sure to hide any background layers that are present. You should see the mask applied to your image layer, and the image on your canvas should display the checkered background.
+
+
+
+
+
+
+
+
+6. Save the image as a transparent PNG by using the "Save a Copy" option in the File menu, or using the Alt + Ctrl + S keyboard shortcut.
+
+7. After following the inpainting instructions above (either through the CLI or the Web UI), marvel at your newfound ability to selectively dream. Lookin' good!
+
+
+
+
8. In the export dialogue, Make sure the "Save colour values from transparent pixels" checkbox is
selected.
diff --git a/ldm/dream/args.py b/ldm/dream/args.py
index 8a8a3a41b3..7d8f473b4a 100644
--- a/ldm/dream/args.py
+++ b/ldm/dream/args.py
@@ -602,6 +602,16 @@ def metadata_dumps(opt,
This is intended to be turned into JSON and stored in the
"sd
'''
+
+ # top-level metadata minus `image` or `images`
+ metadata = {
+ 'model' : 'stable diffusion',
+ 'model_id' : opt.model,
+ 'model_hash' : model_hash,
+ 'app_id' : APP_ID,
+ 'app_version' : APP_VERSION,
+ }
+
# add some RFC266 fields that are generated internally, and not as
# user args
image_dict = opt.to_dict(
@@ -647,22 +657,22 @@ def metadata_dumps(opt,
else:
rfc_dict['type'] = 'txt2img'
- images = []
if len(seeds)==0 and opt.seed:
seeds=[seed]
-
- for seed in seeds:
- rfc_dict['seed'] = seed
- images.append(copy.copy(rfc_dict))
- return {
- 'model' : 'stable diffusion',
- 'model_id' : opt.model,
- 'model_hash' : model_hash,
- 'app_id' : APP_ID,
- 'app_version' : APP_VERSION,
- 'images' : images,
- }
+ if opt.grid:
+ images = []
+ for seed in seeds:
+ rfc_dict['seed'] = seed
+ images.append(copy.copy(rfc_dict))
+ metadata['images'] = images
+ else:
+ # there should only ever be a single seed if we did not generate a grid
+ assert len(seeds) == 1, 'Expected a single seed'
+ rfc_dict['seed'] = seeds[0]
+ metadata['image'] = rfc_dict
+
+ return metadata
def metadata_loads(metadata):
'''
diff --git a/ldm/dream/conditioning.py b/ldm/dream/conditioning.py
index e2e3b0116d..ed2d4ef431 100644
--- a/ldm/dream/conditioning.py
+++ b/ldm/dream/conditioning.py
@@ -38,14 +38,14 @@ def get_uc_and_c(prompt, model, log_tokens=False, skip_normalize=False):
c = torch.zeros_like(uc)
# normalize each "sub prompt" and add it
for subprompt, weight in weighted_subprompts:
- log_tokenization(subprompt, model, log_tokens)
+ log_tokenization(subprompt, model, log_tokens, weight)
c = torch.add(
c,
model.get_learned_conditioning([subprompt]),
alpha=weight,
)
else: # just standard 1 prompt
- log_tokenization(prompt, model, log_tokens)
+ log_tokenization(prompt, model, log_tokens, 1)
c = model.get_learned_conditioning([prompt])
uc = model.get_learned_conditioning([unconditioned_words])
return (uc, c)
@@ -86,7 +86,7 @@ def split_weighted_subprompts(text, skip_normalize=False)->list:
# shows how the prompt is tokenized
# usually tokens have '' to indicate end-of-word,
# but for readability it has been replaced with ' '
-def log_tokenization(text, model, log=False):
+def log_tokenization(text, model, log=False, weight=1):
if not log:
return
tokens = model.cond_stage_model.tokenizer._tokenize(text)
@@ -103,8 +103,8 @@ def log_tokenization(text, model, log=False):
usedTokens += 1
else: # over max token length
discarded = discarded + f"\x1b[0;3{s};40m{token}"
- print(f"\n>> Tokens ({usedTokens}):\n{tokenized}\x1b[0m")
- if discarded != "":
- print(
- f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m"
- )
+ print(f"\n>> Tokens ({usedTokens}), Weight ({weight:.2f}):\n{tokenized}\x1b[0m")
+ if discarded != "":
+ print(
+ f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m"
+ )
diff --git a/ldm/restoration/codeformer/weights/.gitkeep b/ldm/restoration/codeformer/weights/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2