Compare commits

..

3 Commits

Author SHA1 Message Date
Brandon Rising
62d0b8c42b Only kick off on merges to main 2024-01-22 10:00:39 -05:00
Brandon Rising
1869f34fba Fix repo_name 2024-01-22 09:59:57 -05:00
Brandon Rising
e225cf0613 Trigger for pushes to InvokeAI's main branch 2024-01-22 09:59:07 -05:00
12 changed files with 40 additions and 120 deletions

40
.github/pr_labels.yml vendored
View File

@@ -1,40 +0,0 @@
Root:
- changed-files:
- any-glob-to-any-file: '*'
PythonDeps:
- changed-files:
- any-glob-to-any-file: 'pyproject.toml'
Python:
- changed-files:
- any-glob-to-any-file:
- 'invokeai/**'
- '!invokeai/frontend/web/**'
- 'tests/**'
Invocations:
- changed-files:
- any-glob-to-any-file: 'invokeai/app/invocations/**'
Backend:
- changed-files:
- any-glob-to-any-file: 'invokeai/backend/**'
Api:
- changed-files:
- any-glob-to-any-file: 'invokeai/app/api/**'
Services:
- changed-files:
- any-glob-to-any-file: 'invokeai/app/services/**'
FrontendDeps:
- changed-files:
- any-glob-to-any-file:
- '**/*/package.json'
- '**/*/pnpm-lock.yaml'
Frontend:
- changed-files:
- any-glob-to-any-file: 'invokeai/frontend/web/**'

29
.github/workflows/change-monitor.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Trigger Target Workflow
on:
push:
branches:
- main
workflow_dispatch:
jobs:
trigger:
runs-on: ubuntu-latest
steps:
- name: Trigger Workflow in Another Repository
run: |
# Set the required variables
repo_owner="invoke-ai"
repo_name="Invoke"
event_type="invokeai-pr-merge"
service=${{ github.event.inputs.target_service }}"
version="${{ github.event.inputs.target_version }}"
curl -L \
-X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.PAT }}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
https://api.github.com/repos/$repo_owner/$repo_name/dispatches \
-d "{\"event_type\": \"$event_type\", \"client_payload\": {\"service\": \"$service\", \"version\": \"$version\", \"unit\": false, \"integration\": true}}"

View File

@@ -1,16 +0,0 @@
name: "Pull Request Labeler"
on:
- pull_request_target
jobs:
labeler:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/labeler@v5
with:
configuration-path: .github/pr_labels.yml

View File

@@ -25,6 +25,7 @@ To use a community workflow, download the the `.json` node graph file and load i
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
+ [Grid to Gif](#grid-to-gif)
+ [Halftone](#halftone)
+ [Ideal Size](#ideal-size)
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
+ [Image Dominant Color](#image-dominant-color)
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
@@ -195,6 +196,13 @@ CMYK Halftone Output:
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
--------------------------------
### Ideal Size
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
**Node Link:** https://github.com/JPPhoto/ideal-size-node
--------------------------------
### Image and Mask Composition Pack

View File

@@ -36,7 +36,6 @@ their descriptions.
| Integer Math | Perform basic math operations on two integers |
| Convert Image Mode | Converts an image to a different mode. |
| Crop Image | Crops an image to a specified box. The box can be outside of the image. |
| Ideal Size | Calculates an ideal image size for latents for a first pass of a multi-pass upscaling to avoid duplication and other artifacts |
| Image Hue Adjustment | Adjusts the Hue of an image. |
| Inverse Lerp Image | Inverse linear interpolation of all pixels of an image |
| Image Primitive | An image primitive value |

View File

@@ -24,7 +24,6 @@ download_queue_router = APIRouter(prefix="/v1/download_queue", tags=["download_q
)
async def list_downloads() -> List[DownloadJob]:
"""Get a list of active and inactive jobs."""
print("test")
queue = ApiDependencies.invoker.services.download_queue
return queue.list_jobs()

View File

@@ -1,6 +1,5 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
import math
from contextlib import ExitStack
from functools import singledispatchmethod
from typing import List, Literal, Optional, Union
@@ -75,8 +74,6 @@ from .model import ModelInfo, UNetField, VaeField
if choose_torch_device() == torch.device("mps"):
from torch import mps
print("test")
DEFAULT_PRECISION = choose_precision(choose_torch_device())
SAMPLER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())]
@@ -1231,57 +1228,3 @@ class CropLatentsCoreInvocation(BaseInvocation):
context.services.latents.save(name, cropped_latents)
return build_latents_output(latents_name=name, latents=cropped_latents)
@invocation_output("ideal_size_output")
class IdealSizeOutput(BaseInvocationOutput):
"""Base class for invocations that output an image"""
width: int = OutputField(description="The ideal width of the image (in pixels)")
height: int = OutputField(description="The ideal height of the image (in pixels)")
@invocation(
"ideal_size",
title="Ideal Size",
tags=["latents", "math", "ideal_size"],
version="1.0.2",
)
class IdealSizeInvocation(BaseInvocation):
"""Calculates the ideal size for generation to avoid duplication"""
width: int = InputField(default=1024, description="Final image width")
height: int = InputField(default=576, description="Final image height")
unet: UNetField = InputField(default=None, description=FieldDescriptions.unet)
multiplier: float = InputField(
default=1.0,
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large)",
)
def trim_to_multiple_of(self, *args, multiple_of=LATENT_SCALE_FACTOR):
return tuple((x - x % multiple_of) for x in args)
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
aspect = self.width / self.height
dimension = 512
if self.unet.unet.base_model == BaseModelType.StableDiffusion2:
dimension = 768
elif self.unet.unet.base_model == BaseModelType.StableDiffusionXL:
dimension = 1024
dimension = dimension * self.multiplier
min_dimension = math.floor(dimension * 0.5)
model_area = dimension * dimension # hardcoded for now since all models are trained on square images
if aspect > 1.0:
init_height = max(min_dimension, math.sqrt(model_area / aspect))
init_width = init_height * aspect
else:
init_width = max(min_dimension, math.sqrt(model_area * aspect))
init_height = init_width / aspect
scaled_width, scaled_height = self.trim_to_multiple_of(
math.floor(init_width),
math.floor(init_height),
)
return IdealSizeOutput(width=scaled_width, height=scaled_height)

View File

@@ -23,7 +23,6 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
self.__threadLimit = BoundedSemaphore(1)
self.__invoker = invoker
self.__stop_event = Event()
print("test")
self.__invoker_thread = Thread(
name="invoker_processor",
target=self.__process,

View File

@@ -113,7 +113,7 @@ class ModelPatcher:
for layer_key, layer in lora.layers.items():
if not layer_key.startswith(prefix):
continue
print("test")
# TODO(ryand): A non-negligible amount of time is currently spent resolving LoRA keys. This
# should be improved in the following ways:
# 1. The key mapping could be more-efficiently pre-computed. This would save time every time a

View File

@@ -20,7 +20,6 @@
],
"scripts": {
"dev": "concurrently \"vite dev\" \"pnpm run theme:watch\"",
"hi": "echo test",
"dev:host": "concurrently \"vite dev --host\" \"pnpm run theme:watch\"",
"build": "pnpm run lint && vite build",
"typegen": "node scripts/typegen.js",

View File

@@ -16,7 +16,7 @@ export const useAddControlAdapter = (type: ControlAdapterType) => {
const firstCompatibleModel = models.filter((m) =>
baseModel ? m.base_model === baseModel : true
)[0];
console.log("test")
if (firstCompatibleModel) {
return firstCompatibleModel;
}

View File

@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "InvokeAI"
description = "An test implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
requires-python = ">=3.10, <3.12"
readme = { content-type = "text/markdown", file = "README.md" }
keywords = ["stable-diffusion", "AI"]