mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-23 10:38:09 -05:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a74e2108bb | ||
|
|
ca5689dc54 | ||
|
|
b567d65032 | ||
|
|
35ac8e78bd | ||
|
|
e90fd96eee | ||
|
|
ed72d51969 | ||
|
|
d5267357b1 | ||
|
|
e085eb63bd | ||
|
|
8e470f9b6f | ||
|
|
83163ddd9a |
@@ -302,6 +302,29 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
|
||||
add_time_ids = torch.tensor([original_size + crop_coords + target_size])
|
||||
|
||||
# [1, 77, 768], [1, 154, 1280]
|
||||
if c1.shape[1] < c2.shape[1]:
|
||||
c1 = torch.cat(
|
||||
[
|
||||
c1,
|
||||
torch.zeros(
|
||||
(c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), device=c1.device, dtype=c1.dtype
|
||||
),
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
elif c1.shape[1] > c2.shape[1]:
|
||||
c2 = torch.cat(
|
||||
[
|
||||
c2,
|
||||
torch.zeros(
|
||||
(c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), device=c2.device, dtype=c2.dtype
|
||||
),
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[
|
||||
SDXLConditioningInfo(
|
||||
|
||||
@@ -445,8 +445,14 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
latents = context.services.latents.get(self.latents.latents_name)
|
||||
if seed is None:
|
||||
seed = self.latents.seed
|
||||
else:
|
||||
|
||||
if noise is not None and noise.shape[1:] != latents.shape[1:]:
|
||||
raise Exception(f"Incompatable 'noise' and 'latents' shapes: {latents.shape=} {noise.shape=}")
|
||||
|
||||
elif noise is not None:
|
||||
latents = torch.zeros_like(noise)
|
||||
else:
|
||||
raise Exception("'latents' or 'noise' must be provided!")
|
||||
|
||||
if seed is None:
|
||||
seed = 0
|
||||
|
||||
@@ -109,7 +109,7 @@ class IntegerCollectionInvocation(BaseInvocation):
|
||||
"""A collection of integer primitive values"""
|
||||
|
||||
collection: list[int] = InputField(
|
||||
default=0, description="The collection of integer values", ui_type=UIType.IntegerCollection
|
||||
default_factory=list, description="The collection of integer values", ui_type=UIType.IntegerCollection
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
|
||||
@@ -261,7 +261,7 @@ class ImageCollectionInvocation(BaseInvocation):
|
||||
"""A collection of image primitive values"""
|
||||
|
||||
collection: list[ImageField] = InputField(
|
||||
default=0, description="The collection of image values", ui_type=UIType.ImageCollection
|
||||
default_factory=list, description="The collection of image values", ui_type=UIType.ImageCollection
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageCollectionOutput:
|
||||
@@ -451,7 +451,9 @@ class ConditioningCollectionInvocation(BaseInvocation):
|
||||
"""A collection of conditioning tensor primitive values"""
|
||||
|
||||
collection: list[ConditioningField] = InputField(
|
||||
default=0, description="The collection of conditioning tensors", ui_type=UIType.ConditioningCollection
|
||||
default_factory=list,
|
||||
description="The collection of conditioning tensors",
|
||||
ui_type=UIType.ConditioningCollection,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ConditioningCollectionOutput:
|
||||
|
||||
@@ -492,10 +492,10 @@ def _parse_legacy_yamlfile(root: Path, initfile: Path) -> ModelPaths:
|
||||
loras = paths.get("lora_dir", "loras")
|
||||
controlnets = paths.get("controlnet_dir", "controlnets")
|
||||
return ModelPaths(
|
||||
models=root / models,
|
||||
embeddings=root / embeddings,
|
||||
loras=root / loras,
|
||||
controlnets=root / controlnets,
|
||||
models=root / models if models else None,
|
||||
embeddings=root / embeddings if embeddings else None,
|
||||
loras=root / loras if loras else None,
|
||||
controlnets=root / controlnets if controlnets else None,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -265,7 +265,7 @@ class InvokeAICrossAttentionMixin:
|
||||
if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
else:
|
||||
slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1]))
|
||||
slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1]))
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
def einsum_op_mps_v2(self, q, k, v):
|
||||
|
||||
@@ -215,10 +215,7 @@ class InvokeAIDiffuserComponent:
|
||||
dim=0,
|
||||
),
|
||||
}
|
||||
(
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
) = self._concat_conditionings_for_batch(
|
||||
(encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch(
|
||||
conditioning_data.unconditioned_embeddings.embeds,
|
||||
conditioning_data.text_embeddings.embeds,
|
||||
)
|
||||
@@ -280,10 +277,7 @@ class InvokeAIDiffuserComponent:
|
||||
wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0
|
||||
|
||||
if wants_cross_attention_control:
|
||||
(
|
||||
unconditioned_next_x,
|
||||
conditioned_next_x,
|
||||
) = self._apply_cross_attention_controlled_conditioning(
|
||||
(unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning(
|
||||
sample,
|
||||
timestep,
|
||||
conditioning_data,
|
||||
@@ -291,10 +285,7 @@ class InvokeAIDiffuserComponent:
|
||||
**kwargs,
|
||||
)
|
||||
elif self.sequential_guidance:
|
||||
(
|
||||
unconditioned_next_x,
|
||||
conditioned_next_x,
|
||||
) = self._apply_standard_conditioning_sequentially(
|
||||
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially(
|
||||
sample,
|
||||
timestep,
|
||||
conditioning_data,
|
||||
@@ -302,10 +293,7 @@ class InvokeAIDiffuserComponent:
|
||||
)
|
||||
|
||||
else:
|
||||
(
|
||||
unconditioned_next_x,
|
||||
conditioned_next_x,
|
||||
) = self._apply_standard_conditioning(
|
||||
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning(
|
||||
sample,
|
||||
timestep,
|
||||
conditioning_data,
|
||||
|
||||
@@ -395,7 +395,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
||||
D = np.diag(np.random.rand(3))
|
||||
U = orth(np.random.rand(3, 3))
|
||||
conv = np.dot(np.dot(np.transpose(U), D), U)
|
||||
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img = np.clip(img, 0.0, 1.0)
|
||||
return img
|
||||
|
||||
@@ -413,7 +413,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
|
||||
D = np.diag(np.random.rand(3))
|
||||
U = orth(np.random.rand(3, 3))
|
||||
conv = np.dot(np.dot(np.transpose(U), D), U)
|
||||
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img = np.clip(img, 0.0, 1.0)
|
||||
return img
|
||||
|
||||
|
||||
@@ -399,7 +399,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
||||
D = np.diag(np.random.rand(3))
|
||||
U = orth(np.random.rand(3, 3))
|
||||
conv = np.dot(np.dot(np.transpose(U), D), U)
|
||||
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img = np.clip(img, 0.0, 1.0)
|
||||
return img
|
||||
|
||||
@@ -417,7 +417,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
|
||||
D = np.diag(np.random.rand(3))
|
||||
U = orth(np.random.rand(3, 3))
|
||||
conv = np.dot(np.dot(np.transpose(U), D), U)
|
||||
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
||||
img = np.clip(img, 0.0, 1.0)
|
||||
return img
|
||||
|
||||
|
||||
@@ -562,14 +562,18 @@ def rgb2ycbcr(img, only_y=True):
|
||||
if only_y:
|
||||
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
|
||||
else:
|
||||
rlt = np.matmul(
|
||||
img,
|
||||
[
|
||||
[65.481, -37.797, 112.0],
|
||||
[128.553, -74.203, -93.786],
|
||||
[24.966, 112.0, -18.214],
|
||||
],
|
||||
) / 255.0 + [16, 128, 128]
|
||||
rlt = (
|
||||
np.matmul(
|
||||
img,
|
||||
[
|
||||
[65.481, -37.797, 112.0],
|
||||
[128.553, -74.203, -93.786],
|
||||
[24.966, 112.0, -18.214],
|
||||
],
|
||||
)
|
||||
/ 255.0
|
||||
+ [16, 128, 128]
|
||||
)
|
||||
if in_img_type == np.uint8:
|
||||
rlt = rlt.round()
|
||||
else:
|
||||
@@ -588,14 +592,18 @@ def ycbcr2rgb(img):
|
||||
if in_img_type != np.uint8:
|
||||
img *= 255.0
|
||||
# convert
|
||||
rlt = np.matmul(
|
||||
img,
|
||||
[
|
||||
[0.00456621, 0.00456621, 0.00456621],
|
||||
[0, -0.00153632, 0.00791071],
|
||||
[0.00625893, -0.00318811, 0],
|
||||
],
|
||||
) * 255.0 + [-222.921, 135.576, -276.836]
|
||||
rlt = (
|
||||
np.matmul(
|
||||
img,
|
||||
[
|
||||
[0.00456621, 0.00456621, 0.00456621],
|
||||
[0, -0.00153632, 0.00791071],
|
||||
[0.00625893, -0.00318811, 0],
|
||||
],
|
||||
)
|
||||
* 255.0
|
||||
+ [-222.921, 135.576, -276.836]
|
||||
)
|
||||
if in_img_type == np.uint8:
|
||||
rlt = rlt.round()
|
||||
else:
|
||||
@@ -618,14 +626,18 @@ def bgr2ycbcr(img, only_y=True):
|
||||
if only_y:
|
||||
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
|
||||
else:
|
||||
rlt = np.matmul(
|
||||
img,
|
||||
[
|
||||
[24.966, 112.0, -18.214],
|
||||
[128.553, -74.203, -93.786],
|
||||
[65.481, -37.797, 112.0],
|
||||
],
|
||||
) / 255.0 + [16, 128, 128]
|
||||
rlt = (
|
||||
np.matmul(
|
||||
img,
|
||||
[
|
||||
[24.966, 112.0, -18.214],
|
||||
[128.553, -74.203, -93.786],
|
||||
[65.481, -37.797, 112.0],
|
||||
],
|
||||
)
|
||||
/ 255.0
|
||||
+ [16, 128, 128]
|
||||
)
|
||||
if in_img_type == np.uint8:
|
||||
rlt = rlt.round()
|
||||
else:
|
||||
@@ -716,11 +728,11 @@ def ssim(img1, img2):
|
||||
|
||||
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
|
||||
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
|
||||
mu1_sq = mu1**2
|
||||
mu2_sq = mu2**2
|
||||
mu1_sq = mu1 ** 2
|
||||
mu2_sq = mu2 ** 2
|
||||
mu1_mu2 = mu1 * mu2
|
||||
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
|
||||
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
|
||||
sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
|
||||
sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
|
||||
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
|
||||
|
||||
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
|
||||
@@ -737,8 +749,8 @@ def ssim(img1, img2):
|
||||
# matlab 'imresize' function, now only support 'bicubic'
|
||||
def cubic(x):
|
||||
absx = torch.abs(x)
|
||||
absx2 = absx**2
|
||||
absx3 = absx**3
|
||||
absx2 = absx ** 2
|
||||
absx3 = absx ** 3
|
||||
return (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + (
|
||||
-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
|
||||
) * (((absx > 1) * (absx <= 2)).type_as(absx))
|
||||
|
||||
@@ -475,10 +475,7 @@ class TextualInversionDataset(Dataset):
|
||||
|
||||
if self.center_crop:
|
||||
crop = min(img.shape[0], img.shape[1])
|
||||
(
|
||||
h,
|
||||
w,
|
||||
) = (
|
||||
(h, w,) = (
|
||||
img.shape[0],
|
||||
img.shape[1],
|
||||
)
|
||||
|
||||
@@ -203,7 +203,7 @@ class ChunkedSlicedAttnProcessor:
|
||||
if attn.upcast_attention:
|
||||
out_item_size = 4
|
||||
|
||||
chunk_size = 2**29
|
||||
chunk_size = 2 ** 29
|
||||
|
||||
out_size = query.shape[1] * key.shape[1] * out_item_size
|
||||
chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size))
|
||||
|
||||
@@ -207,7 +207,7 @@ def parallel_data_prefetch(
|
||||
return gather_res
|
||||
|
||||
|
||||
def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3):
|
||||
def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3):
|
||||
delta = (res[0] / shape[0], res[1] / shape[1])
|
||||
d = (shape[0] // res[0], shape[1] // res[1])
|
||||
|
||||
|
||||
@@ -4,14 +4,14 @@ sd-1/main/stable-diffusion-v1-5:
|
||||
repo_id: runwayml/stable-diffusion-v1-5
|
||||
recommended: True
|
||||
default: True
|
||||
sd-1/main/stable-diffusion-inpainting:
|
||||
sd-1/main/stable-diffusion-v1-5-inpainting:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||
repo_id: runwayml/stable-diffusion-inpainting
|
||||
recommended: True
|
||||
sd-2/main/stable-diffusion-2-1:
|
||||
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-1
|
||||
recommended: True
|
||||
recommended: False
|
||||
sd-2/main/stable-diffusion-2-inpainting:
|
||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-inpainting
|
||||
@@ -19,19 +19,19 @@ sd-2/main/stable-diffusion-2-inpainting:
|
||||
sdxl/main/stable-diffusion-xl-base-1-0:
|
||||
description: Stable Diffusion XL base model (12 GB)
|
||||
repo_id: stabilityai/stable-diffusion-xl-base-1.0
|
||||
recommended: False
|
||||
recommended: True
|
||||
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
||||
description: Stable Diffusion XL refiner model (12 GB)
|
||||
repo_id: stabilityai/stable-diffusion-xl-refiner-1.0
|
||||
recommended: false
|
||||
recommended: False
|
||||
sdxl/vae/sdxl-1-0-vae-fix:
|
||||
description: Fine tuned version of the SDXL-1.0 VAE
|
||||
repo_id: madebyollin/sdxl-vae-fp16-fix
|
||||
recommended: true
|
||||
recommended: True
|
||||
sd-1/main/Analog-Diffusion:
|
||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||
repo_id: wavymulder/Analog-Diffusion
|
||||
recommended: false
|
||||
recommended: False
|
||||
sd-1/main/Deliberate:
|
||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||
repo_id: XpucT/Deliberate
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,4 +1,4 @@
|
||||
import{x as m,h1 as Ze,w as y,Z as Ya,h2 as Za,a8 as ua,ac as d,h3 as b,h4 as o,h5 as Ja,h6 as h,h7 as fa,h8 as Qa,h9 as eo,aF as ro,ha as ao,a5 as oo,hb as to}from"./index-3a52d467.js";import{s as ha,n as t,t as io,o as ma,p as no,q as ga,v as ya,w as pa,x as lo,y as Sa,z as xa,A as xr,B as so,D as co,E as bo,F as $a,G as ka,H as _a,J as vo,K as wa,L as uo,M as fo,N as ho,O as mo,Q as za,R as go,S as yo,T as po,U as So,V as xo,W as $o,e as ko,X as _o}from"./menu-0ce947db.js";var Ca=String.raw,Aa=Ca`
|
||||
import{x as m,h1 as Ze,w as y,Z as Ya,h2 as Za,a8 as ua,ac as d,h3 as b,h4 as o,h5 as Ja,h6 as h,h7 as fa,h8 as Qa,h9 as eo,aF as ro,ha as ao,a5 as oo,hb as to}from"./index-08cda350.js";import{s as ha,n as t,t as io,o as ma,p as no,q as ga,v as ya,w as pa,x as lo,y as Sa,z as xa,A as xr,B as so,D as co,E as bo,F as $a,G as ka,H as _a,J as vo,K as wa,L as uo,M as fo,N as ho,O as mo,Q as za,R as go,S as yo,T as po,U as So,V as xo,W as $o,e as ko,X as _o}from"./menu-3d10c968.js";var Ca=String.raw,Aa=Ca`
|
||||
:root,
|
||||
:host {
|
||||
--chakra-vh: 100vh;
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
invokeai/frontend/web/dist/index.html
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@@ -12,7 +12,7 @@
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-3a52d467.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-08cda350.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
|
||||
@@ -280,7 +280,7 @@ export type ConditioningInputFieldValue = z.infer<
|
||||
export const zControlNetModel = zModelIdentifier;
|
||||
export type ControlNetModel = z.infer<typeof zControlNetModel>;
|
||||
|
||||
export const zControlField = zInputFieldValueBase.extend({
|
||||
export const zControlField = z.object({
|
||||
image: zImageField,
|
||||
control_model: zControlNetModel,
|
||||
control_weight: z.union([z.number(), z.array(z.number())]).optional(),
|
||||
@@ -295,11 +295,11 @@ export const zControlField = zInputFieldValueBase.extend({
|
||||
});
|
||||
export type ControlField = z.infer<typeof zControlField>;
|
||||
|
||||
export const zControlInputFieldTemplate = zInputFieldValueBase.extend({
|
||||
export const zControlInputFieldValue = zInputFieldValueBase.extend({
|
||||
type: z.literal('ControlField'),
|
||||
value: zControlField.optional(),
|
||||
});
|
||||
export type ControlInputFieldValue = z.infer<typeof zControlInputFieldTemplate>;
|
||||
export type ControlInputFieldValue = z.infer<typeof zControlInputFieldValue>;
|
||||
|
||||
export const zModelType = z.enum([
|
||||
'onnx',
|
||||
@@ -492,7 +492,7 @@ export const zInputFieldValue = z.discriminatedUnion('type', [
|
||||
zUNetInputFieldValue,
|
||||
zClipInputFieldValue,
|
||||
zVaeInputFieldValue,
|
||||
zControlInputFieldTemplate,
|
||||
zControlInputFieldValue,
|
||||
zEnumInputFieldValue,
|
||||
zMainModelInputFieldValue,
|
||||
zSDXLMainModelInputFieldValue,
|
||||
|
||||
@@ -12,17 +12,21 @@ export const buildWorkflow = (nodesState: NodesState): Workflow => {
|
||||
edges: [],
|
||||
};
|
||||
|
||||
nodes.forEach((node) => {
|
||||
const result = zWorkflowNode.safeParse(node);
|
||||
if (!result.success) {
|
||||
const { message } = fromZodError(result.error, {
|
||||
prefix: 'Unable to parse node',
|
||||
});
|
||||
logger('nodes').warn({ node: parseify(node) }, message);
|
||||
return;
|
||||
}
|
||||
workflow.nodes.push(result.data);
|
||||
});
|
||||
nodes
|
||||
.filter((n) =>
|
||||
['invocation', 'notes'].includes(n.type ?? '__UNKNOWN_NODE_TYPE__')
|
||||
)
|
||||
.forEach((node) => {
|
||||
const result = zWorkflowNode.safeParse(node);
|
||||
if (!result.success) {
|
||||
const { message } = fromZodError(result.error, {
|
||||
prefix: 'Unable to parse node',
|
||||
});
|
||||
logger('nodes').warn({ node: parseify(node) }, message);
|
||||
return;
|
||||
}
|
||||
workflow.nodes.push(result.data);
|
||||
});
|
||||
|
||||
edges.forEach((edge) => {
|
||||
const result = zWorkflowEdge.safeParse(edge);
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "3.1.0rc1"
|
||||
__version__ = "3.1.0"
|
||||
|
||||
Reference in New Issue
Block a user