mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-02-03 20:55:02 -05:00
commit 9bb0b5d0036c4dffbb72ce11e097fae4ab63defd Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Sat Oct 15 23:43:41 2022 +0200 undo local_files_only stuff commit eed93f5d30c34cfccaf7497618ae9af17a5ecfbb Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Sat Oct 15 23:40:37 2022 +0200 Revert "Merge branch 'development-invoke' into fix-prompts" This reverts commit 7c40892a9f184f7e216f14d14feb0411c5a90e24, reversing changes made to e3f2dd62b0548ca6988818ef058093a4f5b022f2. commit f06d6024e345c69e6d5a91ab5423925a68ee95a7 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Thu Oct 13 23:30:16 2022 +0200 more efficiently handle multiple conditioning commit 5efdfcbcd980ce6202ab74e7f90e7415ce7260da Merge: b9c0dc5 ac08bb6 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Thu Oct 13 14:51:01 2022 +0200 Merge branch 'optional-disable-karras-schedule' into fix-prompts commit ac08bb6fd25e19a9d35cf6c199e66500fb604af1 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Thu Oct 13 14:50:43 2022 +0200 append '*use_model_sigmas*' to prompt string to use model sigmas commit 70d8c05a3ff329409f76204f4af94e55d468ab8b Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Thu Oct 13 12:12:17 2022 +0200 make karras scheduling switchable commitd60df54f69replaced the model's own scheduling with karras scheduling. this has changed image generation (seems worse now?) this commit wraps the change in a bool. commit b9c0dc5f1a658a0e6c3936000e9ae559e1c7a1db Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 20:16:00 2022 +0200 add test of more complex conjunction commit 9ac0c15cc0d7b5f6df3289d3ad474260972a17be Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 17:18:25 2022 +0200 improve comments commit ad33bce60590b87b2a93e90f16dc9d3e935d04a5 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 17:04:46 2022 +0200 put back thresholding stuff commit 4852c698a325049834ba0d4b358f07210bc7171a Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 14:25:02 2022 +0200 notes on improving conjunction efficiency commit a53bb1e5b68025d09642b935ae6a9a015cfaf2d6 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 14:14:33 2022 +0200 optional weights support for Conjunction commit fec79ab15e4f0c84dd61cb1b45a5e6a72ae4aaeb Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 12:07:27 2022 +0200 fix blend error and log parsing output commit 1f751c2a039f9c97af57b18e0f019512631d5a25 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 10:33:33 2022 +0200 fix broken euler sampler commit 02f8148d17efe4b6bde8d29b827092a0626363ee Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 10:24:20 2022 +0200 cleanup prompt parser commit 8028d49ae6c16c0d6ec9c9de9c12d56c32201421 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Wed Oct 12 10:14:18 2022 +0200 explicit conjunction, improve flattening logic commit 8a1710892185f07eb77483f7edae0fc4d6bbb250 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 22:59:30 2022 +0200 adapt multi-conditioning to also work with ddim commit 53802a839850d0d1ff017c6bafe457c4bed750b0 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 22:31:42 2022 +0200 unconditioning is also fancy-prompt-syntaxable commit 7c40892a9f184f7e216f14d14feb0411c5a90e24 Merge: e3f2dd6 dbe0da4 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 21:39:54 2022 +0200 Merge branch 'development-invoke' into fix-prompts commit e3f2dd62b0548ca6988818ef058093a4f5b022f2 Merge: eef0e4806f542eAuthor: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 21:38:09 2022 +0200 Merge remote-tracking branch 'upstream/development' into fix-prompts commit eef0e484c2eaa1bd4e0e0b1d3f8d7bba38478144 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 21:26:25 2022 +0200 fix run-on paren-less attention, add some comments commit fd29afdf0e9f5e0cdc60239e22480c36ca0aaeca Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 21:03:02 2022 +0200 python 3.9 compatibility commit 26f7646eef7f39bc8f7ce805e747df0f723464da Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 20:58:42 2022 +0200 first pass connecting PromptParser to conditioning commit ae53dff3796d7b9a5e7ed30fa1edb0374af6cd8d Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 20:51:15 2022 +0200 update frontend dist commit 9be4a59a2d76f49e635474b5984bfca826a5dab4 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 19:01:39 2022 +0200 fix issues with correctness checking FlattenedPrompt commit 3be212323eab68e72a363a654124edd9809e4cf0 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 18:43:16 2022 +0200 parsing nested seems to work pretty ok commit acd73eb08cf67c27cac8a22934754321256f56a9 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 18:26:17 2022 +0200 wip introducing FlattenedPrompt class commit 71698d5c7c2ac855b690d8ef67e8830148c59eda Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 15:59:42 2022 +0200 recursive attention weighting seems to actually work commit a4e1ec6b20deb7cc0cd12737bdbd266e56144709 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 15:06:24 2022 +0200 now apparently almost supported nested attention commit da76fd1ddf22a3888cdc08fd4fed38d8b178e524 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 11 13:23:37 2022 +0200 wip prompt parsing commit dbe0da4572c2ac22f26a7afd722349a5680a9e47 Author: Kyle Schouviller <kyle0654@hotmail.com> Date: Mon Oct 10 22:32:35 2022 -0700 Adding node-based invocation apps commit 8f2a2ffc083366de74d7dae471b50b6f98a7c5f8 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Mon Oct 10 19:03:18 2022 +0200 fix merge issues commit 73118dee2a8f4891700756e014caf1c9ca629267 Merge: fd0084412413b0Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Mon Oct 10 12:42:48 2022 +0200 Merge remote-tracking branch 'upstream/development' into fix-prompts commit fd0084413541013c2cf71e006af0392719bef53d Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Mon Oct 10 12:39:38 2022 +0200 wip prompt parsing commit 0be9363db9307859d2b65cffc6af01f57d7873a4 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Mon Oct 10 03:20:06 2022 +0200 better +/- attention parsing commit 5383f691874a58ab01cda1e4fac6cf330146526a Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Mon Oct 10 02:27:47 2022 +0200 prompt parser seems to work commit 591d098a33ce35462428d8c169501d8ed73615ab Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Sun Oct 9 20:25:37 2022 +0200 supports weighting unconditioning, cross-attention with | commit 7a7220563aa05a2980235b5b908362f66b728309 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Sun Oct 9 18:15:56 2022 +0200 i think cross attention might be working? commit 951ed391e7126bff228c18b2db304ad28d59644a Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Sun Oct 9 16:04:54 2022 +0200 weighted CFG denoiser working with a single item commit ee532a0c2827368c9e45a6a5f3975666402873da Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Sun Oct 9 06:33:40 2022 +0200 wip probably doesn't work or compile commit 14654bcbd207b9ca28a6cbd37dbd967d699b062d Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Fri Oct 7 18:11:48 2022 +0200 use tan() to calculate embedding weight for <1 attentions commit 1a8e76b31aa5abf5150419ebf3b29d4658d07f2b Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Fri Oct 7 16:14:54 2022 +0200 fix bad math.max reference commit f697ff896875876ccaa1e5527405bdaa7ed27cde Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Fri Oct 7 15:55:57 2022 +0200 respect http[s]x protocol when making socket.io middleware commit 41d3dd4eeae8d4efb05dfb44fc6d8aac5dc468ab Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Fri Oct 7 13:29:54 2022 +0200 fractional weighting works, by blending with prompts excluding the word commit 087fb6dfb3e8f5e84de8c911f75faa3e3fa3553c Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Fri Oct 7 10:52:03 2022 +0200 wip doing weights <1 by averaging with conditioning absent the lower-weighted fragment commit 3c49e3f3ec7c18dc60f3e18ed2f7f0d97aad3a47 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Fri Oct 7 10:36:15 2022 +0200 notate CFGDenoiser, perhaps commit d2bcf1bb522026ebf209ad0103f6b370383e5070 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Thu Oct 6 05:04:47 2022 +0200 hack blending syntax to test attention weighting more extensively commit 94904ef2cf917f74ec23ef7a570e12ff8255b048 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Thu Oct 6 04:56:37 2022 +0200 conditioning works, apparently commit 7c6663ddd70f665fd1308b6dd74f92ca393a8df5 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Thu Oct 6 02:20:24 2022 +0200 attention weighting, definitely works in positive direction commit 5856d453a9b020bc1a28ff643ae1f58c12c9be73 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 4 19:02:14 2022 +0200 wip bubbling weights down commit a2ed14fd9b7d3cb36b6c5348018b364c76d1e892 Author: Damian at mba <damian@frey.NOSPAMco.nz> Date: Tue Oct 4 17:35:39 2022 +0200 bring in changes from PC
332 lines
14 KiB
Python
332 lines
14 KiB
Python
import pyparsing
|
|
import pyparsing as pp
|
|
from pyparsing import original_text_for
|
|
|
|
|
|
class Prompt():
|
|
|
|
def __init__(self, parts: list):
|
|
for c in parts:
|
|
allowed_types = [Fragment, Attention, CFGScale]
|
|
if type(c) not in allowed_types:
|
|
raise PromptParser.ParsingException(f"Prompt cannot contain {type(c)}, only {allowed_types} are allowed")
|
|
self.children = parts
|
|
def __repr__(self):
|
|
return f"Prompt:{self.children}"
|
|
def __eq__(self, other):
|
|
return type(other) is Prompt and other.children == self.children
|
|
|
|
class FlattenedPrompt():
|
|
def __init__(self, parts: list):
|
|
# verify type correctness
|
|
for c in parts:
|
|
if type(c) is not tuple:
|
|
raise PromptParser.ParsingException(
|
|
f"FlattenedPrompt cannot contain {type(c)}, only ('text', weight) tuples are allowed")
|
|
text = c[0]
|
|
weight = c[1]
|
|
if type(text) is not str:
|
|
raise PromptParser.ParsingException(f"FlattenedPrompt cannot contain {type(c)}, only ('text', weight) tuples are allowed")
|
|
if type(weight) is not float and type(weight) is not int:
|
|
raise PromptParser.ParsingException(
|
|
f"FlattenedPrompt cannot contain {type(c)}, only ('text', weight) tuples are allowed")
|
|
# all looks good
|
|
self.children = parts
|
|
|
|
def __repr__(self):
|
|
return f"FlattenedPrompt:{self.children}"
|
|
def __eq__(self, other):
|
|
return type(other) is FlattenedPrompt and other.children == self.children
|
|
|
|
|
|
class Attention():
|
|
|
|
def __init__(self, weight: float, children: list):
|
|
self.weight = weight
|
|
self.children = children
|
|
#print(f"A: requested attention '{children}' to {weight}")
|
|
|
|
def __repr__(self):
|
|
return f"Attention:'{self.children}' @ {self.weight}"
|
|
def __eq__(self, other):
|
|
return type(other) is Attention and other.weight == self.weight and other.fragment == self.fragment
|
|
|
|
|
|
class CFGScale():
|
|
def __init__(self, scale_factor: float, fragment: str):
|
|
self.fragment = fragment
|
|
self.scale_factor = scale_factor
|
|
#print(f"S: requested CFGScale '{fragment}' x {scale_factor}")
|
|
|
|
def __repr__(self):
|
|
return f"CFGScale:'{self.fragment}' x {self.scale_factor}"
|
|
def __eq__(self, other):
|
|
return type(other) is CFGScale and other.scale_factor == self.scale_factor and other.fragment == self.fragment
|
|
|
|
|
|
|
|
class Fragment():
|
|
def __init__(self, text: str):
|
|
assert(type(text) is str)
|
|
self.text = text
|
|
|
|
def __repr__(self):
|
|
return "Fragment:'"+self.text+"'"
|
|
def __eq__(self, other):
|
|
return type(other) is Fragment and other.text == self.text
|
|
|
|
class Conjunction():
|
|
def __init__(self, prompts: list, weights: list = None):
|
|
# force everything to be a Prompt
|
|
#print("making conjunction with", parts)
|
|
self.prompts = [x if (type(x) is Prompt or type(x) is Blend or type(x) is FlattenedPrompt)
|
|
else Prompt(x) for x in prompts]
|
|
self.weights = [1.0]*len(self.prompts) if weights is None else list(weights)
|
|
if len(self.weights) != len(self.prompts):
|
|
raise PromptParser.ParsingException(f"while parsing Conjunction: mismatched parts/weights counts {prompts}, {weights}")
|
|
self.type = 'AND'
|
|
|
|
def __repr__(self):
|
|
return f"Conjunction:{self.prompts} | weights {self.weights}"
|
|
def __eq__(self, other):
|
|
return type(other) is Conjunction \
|
|
and other.prompts == self.prompts \
|
|
and other.weights == self.weights
|
|
|
|
|
|
class Blend():
|
|
def __init__(self, prompts: list, weights: list[float], normalize_weights: bool=True):
|
|
#print("making Blend with prompts", prompts, "and weights", weights)
|
|
if len(prompts) != len(weights):
|
|
raise PromptParser.ParsingException(f"while parsing Blend: mismatched prompts/weights counts {prompts}, {weights}")
|
|
for c in prompts:
|
|
if type(c) is not Prompt and type(c) is not FlattenedPrompt:
|
|
raise(PromptParser.ParsingException(f"{type(c)} cannot be added to a Blend, only Prompts or FlattenedPrompts"))
|
|
# upcast all lists to Prompt objects
|
|
self.prompts = [x if (type(x) is Prompt or type(x) is FlattenedPrompt)
|
|
else Prompt(x) for x in prompts]
|
|
self.prompts = prompts
|
|
self.weights = weights
|
|
self.normalize_weights = normalize_weights
|
|
|
|
def __repr__(self):
|
|
return f"Blend:{self.prompts} | weights {self.weights}"
|
|
def __eq__(self, other):
|
|
return other.__repr__() == self.__repr__()
|
|
|
|
|
|
class PromptParser():
|
|
|
|
class ParsingException(Exception):
|
|
pass
|
|
|
|
def __init__(self, attention_plus_base=1.1, attention_minus_base=0.9):
|
|
|
|
self.attention_plus_base = attention_plus_base
|
|
self.attention_minus_base = attention_minus_base
|
|
|
|
self.root = self.build_parser_logic()
|
|
|
|
|
|
def parse(self, prompt: str) -> [list]:
|
|
'''
|
|
:param prompt: The prompt string to parse
|
|
:return: a tuple
|
|
'''
|
|
#print(f"!!parsing '{prompt}'")
|
|
|
|
if len(prompt.strip()) == 0:
|
|
return Conjunction(prompts=[FlattenedPrompt([('', 1.0)])], weights=[1.0])
|
|
|
|
root = self.root.parse_string(prompt)
|
|
#print(f"'{prompt}' parsed to root", root)
|
|
#fused = fuse_fragments(parts)
|
|
#print("fused to", fused)
|
|
|
|
return self.flatten(root[0])
|
|
|
|
def flatten(self, root: Conjunction):
|
|
|
|
def fuse_fragments(items):
|
|
# print("fusing fragments in ", items)
|
|
result = []
|
|
for x in items:
|
|
last_weight = result[-1][1] if len(result) > 0 else None
|
|
this_text = x[0]
|
|
this_weight = x[1]
|
|
if last_weight is not None and last_weight == this_weight:
|
|
last_text = result[-1][0]
|
|
result[-1] = (last_text + ' ' + this_text, last_weight)
|
|
else:
|
|
result.append(x)
|
|
return result
|
|
|
|
def flatten_internal(node, weight_scale, results, prefix):
|
|
#print(prefix + "flattening", node, "...")
|
|
if type(node) is pp.ParseResults:
|
|
for x in node:
|
|
results = flatten_internal(x, weight_scale, results, prefix+'pr')
|
|
#print(prefix, " ParseResults expanded, results is now", results)
|
|
elif type(node) is Fragment:
|
|
results.append((node.text, float(weight_scale)))
|
|
elif type(node) is Attention:
|
|
#if node.weight < 1:
|
|
# todo: inject a blend when flattening attention with weight <1"
|
|
for c in node.children:
|
|
results = flatten_internal(c, weight_scale*node.weight, results, prefix+' ')
|
|
elif type(node) is Blend:
|
|
flattened_subprompts = []
|
|
#print(" flattening blend with prompts", node.prompts, "weights", node.weights)
|
|
for prompt in node.prompts:
|
|
# prompt is a list
|
|
flattened_subprompts = flatten_internal(prompt, weight_scale, flattened_subprompts, prefix+'B ')
|
|
results += [Blend(prompts=flattened_subprompts, weights=node.weights)]
|
|
elif type(node) is Prompt:
|
|
#print(prefix + "about to flatten Prompt with children", node.children)
|
|
flattened_prompt = []
|
|
for child in node.children:
|
|
flattened_prompt = flatten_internal(child, weight_scale, flattened_prompt, prefix+'P ')
|
|
results += [FlattenedPrompt(parts=fuse_fragments(flattened_prompt))]
|
|
#print(prefix + "after flattening Prompt, results is", results)
|
|
else:
|
|
raise PromptParser.ParsingException(f"unhandled node type {type(node)} when flattening {node}")
|
|
#print(prefix + "-> after flattening", type(node), "results is", results)
|
|
return results
|
|
|
|
#print("flattening", root)
|
|
|
|
flattened_parts = []
|
|
for part in root.prompts:
|
|
flattened_parts += flatten_internal(part, 1.0, [], ' C| ')
|
|
weights = root.weights
|
|
return Conjunction(flattened_parts, weights)
|
|
|
|
|
|
|
|
def build_parser_logic(self):
|
|
|
|
lparen = pp.Literal("(").suppress()
|
|
rparen = pp.Literal(")").suppress()
|
|
# accepts int or float notation, always maps to float
|
|
number = pyparsing.pyparsing_common.real | pp.Word(pp.nums).set_parse_action(pp.token_map(float))
|
|
SPACE_CHARS = ' \t\n'
|
|
|
|
prompt_part = pp.Forward()
|
|
word = pp.Forward()
|
|
|
|
def make_fragment(x):
|
|
#print("### making fragment for", x)
|
|
if type(x) is str:
|
|
return Fragment(x)
|
|
elif type(x) is pp.ParseResults or type(x) is list:
|
|
return Fragment(' '.join([s for s in x]))
|
|
else:
|
|
raise PromptParser.ParsingException("Cannot make fragment from " + str(x))
|
|
|
|
# attention control of the form +(phrase) / -(phrase) / <weight>(phrase)
|
|
# phrase can be multiple words, can have multiple +/- signs to increase the effect or type a floating point or integer weight
|
|
attention = pp.Forward()
|
|
attention_head = (number | pp.Word('+') | pp.Word('-'))\
|
|
.set_name("attention_head")\
|
|
.set_debug(False)
|
|
fragment_inside_attention = pp.CharsNotIn(SPACE_CHARS+'()')\
|
|
.set_parse_action(make_fragment)\
|
|
.set_name("fragment_inside_attention")\
|
|
.set_debug(False)
|
|
attention_with_parens = pp.Forward()
|
|
attention_with_parens_body = pp.nested_expr(content=pp.delimited_list((attention_with_parens | fragment_inside_attention), delim=SPACE_CHARS))
|
|
attention_with_parens << (attention_head + attention_with_parens_body)
|
|
|
|
def make_attention(x):
|
|
# print("making Attention from parsing with args", x0, x1)
|
|
weight = 1
|
|
# number(str)
|
|
if type(x[0]) is float or type(x[0]) is int:
|
|
weight = float(x[0])
|
|
# +(str) or -(str) or +str or -str
|
|
elif type(x[0]) is str:
|
|
base = self.attention_plus_base if x[0][0] == '+' else self.attention_minus_base
|
|
weight = pow(base, len(x[0]))
|
|
# print("Making attention with children of type", [str(type(x)) for x in x1])
|
|
return Attention(weight=weight, children=x[1])
|
|
|
|
attention_with_parens.set_parse_action(make_attention)\
|
|
.set_name("attention_with_parens")\
|
|
.set_debug(False)
|
|
|
|
# attention control of the form ++word --word (no parens)
|
|
attention_without_parens = (
|
|
(pp.Word('+') | pp.Word('-')) +
|
|
pp.CharsNotIn(SPACE_CHARS+'()').set_parse_action(lambda x: [[make_fragment(x)]])
|
|
)\
|
|
.set_name("attention_without_parens")\
|
|
.set_debug(False)
|
|
attention_without_parens.set_parse_action(make_attention)
|
|
|
|
attention << (attention_with_parens | attention_without_parens)\
|
|
.set_name("attention")\
|
|
.set_debug(False)
|
|
|
|
# fragments of text with no attention control
|
|
word << pp.Word(pp.printables).set_parse_action(lambda x: Fragment(' '.join([s for s in x])))
|
|
word.set_name("word")
|
|
word.set_debug(False)
|
|
prompt_part << (attention | word)
|
|
prompt_part.set_debug(False)
|
|
prompt_part.set_name("prompt_part")
|
|
|
|
# root prompt definition
|
|
prompt = pp.Group(pp.OneOrMore(prompt_part))\
|
|
.set_parse_action(lambda x: Prompt(x[0]))
|
|
|
|
# weighted blend of prompts
|
|
# ("promptA", "promptB").blend(a, b) where "promptA" and "promptB" are valid prompts and a and b are float or
|
|
# int weights.
|
|
# can specify more terms eg ("promptA", "promptB", "promptC").blend(a,b,c)
|
|
|
|
def make_prompt_from_quoted_string(x):
|
|
#print(' got quoted prompt', x)
|
|
|
|
x_unquoted = x[0][1:-1]
|
|
if len(x_unquoted.strip()) == 0:
|
|
# print(' b : just an empty string')
|
|
return Prompt([Fragment('')])
|
|
# print(' b parsing ', c_unquoted)
|
|
x_parsed = prompt.parse_string(x_unquoted)
|
|
#print(" quoted prompt was parsed to", type(x_parsed),":", x_parsed)
|
|
return x_parsed[0]
|
|
|
|
quoted_prompt = pp.dbl_quoted_string.set_parse_action(make_prompt_from_quoted_string)
|
|
quoted_prompt.set_name('quoted_prompt')
|
|
|
|
blend_terms = pp.delimited_list(quoted_prompt).set_name('blend_terms')
|
|
blend_weights = pp.delimited_list(number).set_name('blend_weights')
|
|
blend = pp.Group(lparen + pp.Group(blend_terms) + rparen
|
|
+ pp.Literal(".blend").suppress()
|
|
+ lparen + pp.Group(blend_weights) + rparen).set_name('blend')
|
|
blend.set_debug(False)
|
|
|
|
|
|
blend.set_parse_action(lambda x: Blend(prompts=x[0][0], weights=x[0][1]))
|
|
|
|
conjunction_terms = blend_terms.copy().set_name('conjunction_terms')
|
|
conjunction_weights = blend_weights.copy().set_name('conjunction_weights')
|
|
conjunction_with_parens_and_quotes = pp.Group(lparen + pp.Group(conjunction_terms) + rparen
|
|
+ pp.Literal(".and").suppress()
|
|
+ lparen + pp.Optional(pp.Group(conjunction_weights)) + rparen).set_name('conjunction')
|
|
def make_conjunction(x):
|
|
parts_raw = x[0][0]
|
|
weights = x[0][1] if len(x[0])>1 else [1.0]*len(parts_raw)
|
|
parts = [part for part in parts_raw]
|
|
return Conjunction(parts, weights)
|
|
conjunction_with_parens_and_quotes.set_parse_action(make_conjunction)
|
|
|
|
implicit_conjunction = pp.OneOrMore(blend | prompt)
|
|
implicit_conjunction.set_parse_action(lambda x: Conjunction(x))
|
|
|
|
conjunction = conjunction_with_parens_and_quotes | implicit_conjunction
|
|
conjunction.set_debug(False)
|
|
|
|
# top-level is a conjunction of one or more blends or prompts
|
|
return conjunction
|