mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
Merge branch 'main' into fix/sdxl-dora-partial-loading
This commit is contained in:
60
.github/workflows/deploy-docs.yml
vendored
60
.github/workflows/deploy-docs.yml
vendored
@@ -10,18 +10,62 @@ on:
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
deploy_target:
|
||||
description: 'Deploy target (custom = invoke.ai, ghpages = invoke-ai.github.io/InvokeAI)'
|
||||
type: choice
|
||||
options:
|
||||
- custom
|
||||
- ghpages
|
||||
default: custom
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
pull-requests: read
|
||||
|
||||
concurrency:
|
||||
group: 'pages'
|
||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docs: ${{ steps.manual.outputs.docs || steps.filter.outputs.docs }}
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: mark manual run
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
id: manual
|
||||
run: echo "docs=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: detect docs-related changes
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
id: filter
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
docs:
|
||||
- '.github/workflows/deploy-docs.yml'
|
||||
- 'docs/**'
|
||||
- 'scripts/generate_docs_json.py'
|
||||
- 'invokeai/app/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'pyproject.toml'
|
||||
- 'uv.lock'
|
||||
|
||||
check-and-build:
|
||||
needs: changes
|
||||
if: |
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'pull_request' &&
|
||||
github.event.pull_request.draft == false &&
|
||||
needs.changes.outputs.docs == 'true') ||
|
||||
(github.event_name == 'push' && needs.changes.outputs.docs == 'true')
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
@@ -41,8 +85,10 @@ jobs:
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
# generate_docs_json.py only needs the invokeai package importable
|
||||
# (pydantic + invokeai.app/backend). Skip the [test] extra to keep CI fast.
|
||||
- name: install python dependencies
|
||||
run: uv pip install --editable .[test]
|
||||
run: uv pip install --editable .
|
||||
|
||||
# Node (needed for docs build)
|
||||
- name: setup node
|
||||
@@ -68,6 +114,8 @@ jobs:
|
||||
- name: build docs
|
||||
run: pnpm build
|
||||
working-directory: docs
|
||||
env:
|
||||
DEPLOY_TARGET: ${{ github.event_name == 'workflow_dispatch' && inputs.deploy_target || github.ref == 'refs/heads/main' && 'ghpages' || 'custom' }}
|
||||
|
||||
# Upload artifact for deploy (main branch only)
|
||||
- name: upload pages artifact
|
||||
@@ -80,6 +128,10 @@ jobs:
|
||||
if: github.ref == 'refs/heads/main'
|
||||
needs: check-and-build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
49
.github/workflows/mkdocs-material.yml
vendored
49
.github/workflows/mkdocs-material.yml
vendored
@@ -1,49 +0,0 @@
|
||||
# This is a mostly a copy-paste from https://github.com/squidfunk/mkdocs-material/blob/master/docs/publishing-your-site.md
|
||||
|
||||
name: mkdocs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
|
||||
REPO_NAME: '${{ github.repository }}'
|
||||
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: set cache id
|
||||
run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
|
||||
- name: use cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
|
||||
- name: install dependencies
|
||||
run: python -m pip install ".[docs]"
|
||||
|
||||
- name: build & deploy
|
||||
run: mkdocs gh-deploy --force
|
||||
30
crowdin.yml
30
crowdin.yml
@@ -1,30 +0,0 @@
|
||||
# Crowdin Configuration
|
||||
# https://developer.crowdin.com/configuration-file/
|
||||
|
||||
project_id_env: CROWDIN_PROJECT_ID
|
||||
api_token_env: CROWDIN_PERSONAL_TOKEN
|
||||
|
||||
preserve_hierarchy: true
|
||||
|
||||
# Map Crowdin's zh-TW to zh-Hant to match the existing file convention
|
||||
languages_mapping:
|
||||
locale:
|
||||
zh-TW: zh-Hant
|
||||
|
||||
files:
|
||||
# Web App UI Translations
|
||||
- source: /invokeai/frontend/web/public/locales/en.json
|
||||
translation: /invokeai/frontend/web/public/locales/%locale%.json
|
||||
|
||||
# Documentation - Starlight UI Strings
|
||||
- source: /docs/src/content/i18n/en.json
|
||||
translation: /docs/src/content/i18n/%locale%.json
|
||||
|
||||
# Documentation - Content Pages (MD and MDX)
|
||||
- source: /docs/src/content/docs/**/*.{md,mdx}
|
||||
translation: /docs/src/content/docs/%locale%/**/%original_file_name%
|
||||
# Exclude translations directory to avoid re-uploading them as source files
|
||||
ignore:
|
||||
- /docs/src/content/docs/%locale%/**/*
|
||||
# Translate full paragraphs rather than splitting into sentences
|
||||
content_segmentation: 0
|
||||
@@ -8,11 +8,16 @@ import starlightLlmsText from 'starlight-llms-txt';
|
||||
import starlightChangelogs, { makeChangelogsSidebarLinks } from 'starlight-changelogs';
|
||||
// import starlightContextualMenu from 'starlight-contextual-menu';
|
||||
|
||||
// Deployment target: 'custom' (default, custom domain at invoke.ai) or 'ghpages'
|
||||
// (GitHub Pages project URL at invoke-ai.github.io/InvokeAI). Drive site/base from this
|
||||
// so the same source can be deployed to either target.
|
||||
const deployTarget = process.env.DEPLOY_TARGET ?? 'custom';
|
||||
const isGhPages = deployTarget === 'ghpages';
|
||||
|
||||
// https://astro.build/config
|
||||
export default defineConfig({
|
||||
site: 'https://invoke.ai',
|
||||
// base is only needed if no custom domain is available, or if the site is hosted in a subdirectory
|
||||
// base: '/InvokeAI',
|
||||
site: isGhPages ? 'https://invoke-ai.github.io' : 'https://invoke.ai',
|
||||
base: isGhPages ? '/InvokeAI' : undefined,
|
||||
integrations: [
|
||||
starlight({
|
||||
// Content
|
||||
@@ -24,7 +29,7 @@ export default defineConfig({
|
||||
alt: 'InvokeAI Logo',
|
||||
replacesTitle: true,
|
||||
},
|
||||
favicon: './src/assets/invoke-icon.svg',
|
||||
favicon: 'favicon.svg',
|
||||
editLink: {
|
||||
baseUrl: 'https://github.com/invoke-ai/InvokeAI/edit/main/docs',
|
||||
},
|
||||
|
||||
1
docs/public/CNAME
Normal file
1
docs/public/CNAME
Normal file
@@ -0,0 +1 @@
|
||||
invoke.ai
|
||||
@@ -1 +1,11 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 128 128"><path fill-rule="evenodd" d="M81 36 64 0 47 36l-1 2-9-10a6 6 0 0 0-9 9l10 10h-2L0 64l36 17h2L28 91a6 6 0 1 0 9 9l9-10 1 2 17 36 17-36v-2l9 10a6 6 0 1 0 9-9l-9-9 2-1 36-17-36-17-2-1 9-9a6 6 0 1 0-9-9l-9 10v-2Zm-17 2-2 5c-4 8-11 15-19 19l-5 2 5 2c8 4 15 11 19 19l2 5 2-5c4-8 11-15 19-19l5-2-5-2c-8-4-15-11-19-19l-2-5Z" clip-rule="evenodd"/><path d="M118 19a6 6 0 0 0-9-9l-3 3a6 6 0 1 0 9 9l3-3Zm-96 4c-2 2-6 2-9 0l-3-3a6 6 0 1 1 9-9l3 3c3 2 3 6 0 9Zm0 82c-2-2-6-2-9 0l-3 3a6 6 0 1 0 9 9l3-3c3-2 3-6 0-9Zm96 4a6 6 0 0 1-9 9l-3-3a6 6 0 1 1 9-9l3 3Z"/><style>path{fill:#000}@media (prefers-color-scheme:dark){path{fill:#fff}}</style></svg>
|
||||
<svg width="512" height="512" viewBox="0 0 512 512" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_98_2)">
|
||||
<rect width="512" height="512" rx="54" fill="#E6FD13"/>
|
||||
<path d="M313.561 165.334H416V96H96V165.334H198.439L313.561 346.666H416V416H96V346.666H198.439" stroke="black" stroke-width="31"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_98_2">
|
||||
<rect width="512" height="512" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 696 B After Width: | Height: | Size: 432 B |
@@ -130,4 +130,4 @@ In the current UI, the `Seed Behaviour` setting controls how seeds are reused ac
|
||||
- Be careful with multiple groups, because the number of combinations grows quickly.
|
||||
- Review the expanded prompt list before launching a large batch.
|
||||
- Use dynamic prompting for variation, not to avoid thinking through the base prompt.
|
||||
- When one specific term needs more emphasis, use [Prompting Syntax](/concepts/prompt-syntax) instead of adding more dynamic groups.
|
||||
- When one specific term needs more emphasis, use [Prompting Syntax](../prompt-syntax) instead of adding more dynamic groups.
|
||||
|
||||
@@ -43,17 +43,17 @@ There are two prompt boxes: **Positive Prompt** & **Negative Prompt**.
|
||||
<LinkCard
|
||||
title="Prompting Guide"
|
||||
description="Learn how to structure prompts, use positive and negative prompts well, and iterate toward better results."
|
||||
href="/concepts/prompting-guide"
|
||||
href="../prompting-guide"
|
||||
/>
|
||||
<LinkCard
|
||||
title="Prompting Syntax"
|
||||
description="Learn InvokeAI's advanced prompt weighting and composition syntax, including `+`, `-`, `.blend()`, and `.and()`."
|
||||
href="/concepts/prompt-syntax"
|
||||
href="../prompt-syntax"
|
||||
/>
|
||||
<LinkCard
|
||||
title="Dynamic Prompting"
|
||||
description="Expand one prompt into many prompt variations with curly-brace syntax."
|
||||
href="/concepts/dynamic-prompting"
|
||||
href="../dynamic-prompting"
|
||||
/>
|
||||
</CardGrid>
|
||||
|
||||
@@ -78,7 +78,7 @@ Invoke offers a number of different workflows for interacting with models to pro
|
||||
<Steps>
|
||||
1. **Fine-tuning your prompt:**
|
||||
|
||||
The more specific you are, the closer the image will turn out to what is in your head. Adding more details in the Positive or Negative Prompt can help add or remove parts of the image. You can also use advanced techniques like upweighting and downweighting to control the influence of specific words. Learn more in the [Prompting Guide](/concepts/prompting-guide) and [Prompting Syntax](/concepts/prompt-syntax).
|
||||
The more specific you are, the closer the image will turn out to what is in your head. Adding more details in the Positive or Negative Prompt can help add or remove parts of the image. You can also use advanced techniques like upweighting and downweighting to control the influence of specific words. Learn more in the [Prompting Guide](../prompting-guide) and [Prompting Syntax](../prompt-syntax).
|
||||
|
||||
:::tip
|
||||
If you're seeing poor results, try adding the things you don't like about the image to your negative prompt. E.g. *distorted, low quality, unrealistic, etc.*
|
||||
@@ -99,7 +99,7 @@ Invoke offers a number of different workflows for interacting with models to pro
|
||||
|
||||
5. **Explore Advanced Settings:**
|
||||
|
||||
InvokeAI has a full suite of tools available to allow you complete control over your image creation process. Check out our [docs if you want to learn more](https://invoke-ai.github.io/InvokeAI/features/).
|
||||
InvokeAI has a full suite of tools available to allow you complete control over your image creation process. Check out our [features docs](../../features/gallery) if you want to learn more.
|
||||
</Steps>
|
||||
|
||||
## Terms & Concepts
|
||||
|
||||
@@ -53,4 +53,4 @@ In this situation, you may need to provide some additional information to identi
|
||||
Add `:v2` to the repo ID and use that when installing the model: `monster-labs/control_v1p_sd15_qrcode_monster:v2`
|
||||
:::
|
||||
|
||||
[set up in the config file]: /configuration/invokeai-yaml
|
||||
[set up in the config file]: ../../configuration/invokeai-yaml
|
||||
|
||||
@@ -10,18 +10,18 @@ import { Card, LinkCard, CardGrid } from '@astrojs/starlight/components';
|
||||
<CardGrid>
|
||||
<LinkCard
|
||||
title="Prompting Guide"
|
||||
href="/concepts/prompting-guide"
|
||||
href="../prompting-guide"
|
||||
description="Learn how to write effective prompts for InvokeAI."
|
||||
/>
|
||||
|
||||
<LinkCard
|
||||
title="Dynamic Prompting"
|
||||
href="/concepts/dynamic-prompting"
|
||||
href="../dynamic-prompting"
|
||||
description="Learn how to create many prompt variations from a single template."
|
||||
/>
|
||||
</CardGrid>
|
||||
|
||||
InvokeAI supports Compel-style prompt weighting and prompt functions for `SD 1.5` and `SDXL` text conditioning workflows. Recent model families, including `FLUX`, `Z-Image`, `CogView4`, and `Qwen Image`, bypass Compel and do not use the syntax documented on this page. This page documents syntax for those Compel-based workflows only. If you want general advice on writing better prompts, start with [Prompting Guide](/concepts/prompting-guide).
|
||||
InvokeAI supports Compel-style prompt weighting and prompt functions for `SD 1.5` and `SDXL` text conditioning workflows. Recent model families, including `FLUX`, `Z-Image`, `CogView4`, and `Qwen Image`, bypass Compel and do not use the syntax documented on this page. This page documents syntax for those Compel-based workflows only. If you want general advice on writing better prompts, start with [Prompting Guide](../prompting-guide).
|
||||
|
||||
:::note[Compatibility note]
|
||||
If a weighted prompt seems to be ignored, check whether you are using an `SD 1.5` or `SDXL` workflow. Compel syntax on this page does not apply to newer model families such as `FLUX`, `Z-Image`, `CogView4`, and `Qwen Image`.
|
||||
@@ -134,5 +134,5 @@ Use unescaped parentheses only when you mean grouping or weighting.
|
||||
|
||||
## Related pages
|
||||
|
||||
- For practical prompt-writing advice, read [Prompting Guide](/concepts/prompting-guide).
|
||||
- For prompt expansion and permutations, read [Dynamic Prompting](/concepts/dynamic-prompting).
|
||||
- For practical prompt-writing advice, read [Prompting Guide](../prompting-guide).
|
||||
- For prompt expansion and permutations, read [Dynamic Prompting](../dynamic-prompting).
|
||||
|
||||
@@ -10,13 +10,13 @@ import { Card, CardGrid, Steps, LinkCard } from '@astrojs/starlight/components';
|
||||
<CardGrid>
|
||||
<LinkCard
|
||||
title="Prompting Syntax"
|
||||
href="/concepts/prompt-syntax"
|
||||
href="../prompt-syntax"
|
||||
description="Learn how to weight prompt terms, blend concepts, and use prompt conjunctions for more control."
|
||||
/>
|
||||
|
||||
<LinkCard
|
||||
title="Dynamic Prompting"
|
||||
href="/concepts/dynamic-prompting"
|
||||
href="../dynamic-prompting"
|
||||
description="Learn how to create many prompt variations from a single template."
|
||||
/>
|
||||
</CardGrid>
|
||||
@@ -82,7 +82,7 @@ Good negative prompts usually name specific failure modes: `blurry`, `distorted
|
||||
|
||||
5. Escalate only when needed
|
||||
|
||||
If the result is close but one element is too weak or too strong, move to [Prompting Syntax](/concepts/prompt-syntax) for weighting. If you want lots of variations, use [Dynamic Prompting](/concepts/dynamic-prompting).
|
||||
If the result is close but one element is too weak or too strong, move to [Prompting Syntax](../prompt-syntax) for weighting. If you want lots of variations, use [Dynamic Prompting](../dynamic-prompting).
|
||||
</Steps>
|
||||
|
||||
Here is the same idea refined in stages:
|
||||
@@ -108,10 +108,10 @@ The same prompt can behave very differently across models.
|
||||
|
||||
Reach for advanced syntax when a normal comma-separated prompt is almost right, but you need more control.
|
||||
|
||||
- Use [Prompting Syntax](/concepts/prompt-syntax) when one term needs more or less influence.
|
||||
- Use [Prompting Syntax](../prompt-syntax) when one term needs more or less influence.
|
||||
- Use `.blend()` when you want to mix concepts or styles deliberately.
|
||||
- Use `.and()` when you want separate prompt clauses encoded individually.
|
||||
- Use [Dynamic Prompting](/concepts/dynamic-prompting) when you want many prompt variations from one template.
|
||||
- Use [Dynamic Prompting](../dynamic-prompting) when you want many prompt variations from one template.
|
||||
|
||||
## Common mistakes
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import SystemRequirementsLink from '@components/SystemRequirmentsLink.astro'
|
||||
Docker Desktop on Windows [includes GPU support](https://www.docker.com/blog/wsl-2-gpu-support-for-docker-desktop-on-nvidia-gpus/).
|
||||
</TabItem>
|
||||
<TabItem label="MacOS" icon="apple">
|
||||
Docker can not access the GPU on macOS, so your generation speeds will be slow. Use the [launcher](/start-here/installation) instead.
|
||||
Docker can not access the GPU on macOS, so your generation speeds will be slow. Use the [launcher](../../start-here/installation) instead.
|
||||
</TabItem>
|
||||
<TabItem label="Linux" icon="linux">
|
||||
Configure Docker to access your machine's GPU.
|
||||
|
||||
@@ -10,7 +10,7 @@ If you're a new contributor to InvokeAI or Open Source Projects, this is the gui
|
||||
## New Contributor Checklist
|
||||
|
||||
<Steps>
|
||||
1. Set up your local development environment & fork of InvokAI by following [the steps outlined here](/development/setup/dev-environment/#initial-setup)
|
||||
1. Set up your local development environment & fork of InvokAI by following [the steps outlined here](../../development/setup/dev-environment/#initial-setup)
|
||||
|
||||
2. Set up your local tooling with [this guide](../LOCAL_DEVELOPMENT.md). Feel free to skip this step if you already have tooling you're comfortable with.
|
||||
|
||||
|
||||
@@ -10,11 +10,11 @@ import { Steps, LinkCard } from '@astrojs/starlight/components';
|
||||
<LinkCard
|
||||
title="Invocations"
|
||||
description="Learn about the invocation system, which is the foundation for creating nodes in InvokeAI."
|
||||
href="/development/architecture/invocations" />
|
||||
href="../../architecture/invocations" />
|
||||
|
||||
2. Make sure the node is contained in a new Python (.py) file. Preferably, the node is in a repo with a README detailing the nodes usage & examples to help others more easily use your node. Including the tag "invokeai-node" in your repository's README can also help other users find it more easily.
|
||||
|
||||
3. Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list
|
||||
3. Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](../../../workflows/community-nodes) list
|
||||
|
||||
Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node.
|
||||
|
||||
@@ -23,7 +23,7 @@ import { Steps, LinkCard } from '@astrojs/starlight/components';
|
||||
|
||||
### Community Node Template
|
||||
|
||||
Append the following template to your pull request and the [Community Nodes](/workflows/community-nodes) page when submitting a node to be added to the community nodes list:
|
||||
Append the following template to your pull request and the [Community Nodes](../../../workflows/community-nodes) page when submitting a node to be added to the community nodes list:
|
||||
|
||||
```md
|
||||
---
|
||||
|
||||
@@ -13,35 +13,35 @@ This section of the documentation is for developers interested in contributing t
|
||||
<Card title="Setup" icon="download">
|
||||
Instructions for setting up your local development environment, including how to run the project locally and how to set up your tooling.
|
||||
|
||||
<LinkButton href="/development/setup/dev-environment/" icon="right-arrow" variant="primary">
|
||||
<LinkButton href="./setup/dev-environment/" icon="right-arrow" variant="primary">
|
||||
Learn more
|
||||
</LinkButton>
|
||||
</Card>
|
||||
<Card title="Front End" icon="laptop">
|
||||
An introduction to the front end codebase, including the technologies used and how to get started.
|
||||
|
||||
<LinkButton href="/development/front-end/" icon="right-arrow" variant="secondary">
|
||||
<LinkButton href="./front-end/" icon="right-arrow" variant="secondary">
|
||||
Learn more
|
||||
</LinkButton>
|
||||
</Card>
|
||||
<Card title="Guides" icon="open-book">
|
||||
A collection of guides for common development tasks, such as adding new model architectures, making tests, and more.
|
||||
|
||||
<LinkButton href="/development/guides/models" icon="right-arrow" variant="secondary">
|
||||
<LinkButton href="./guides/models" icon="right-arrow" variant="secondary">
|
||||
Learn more
|
||||
</LinkButton>
|
||||
</Card>
|
||||
<Card title="Architecture" icon="puzzle">
|
||||
An overview of the InvokeAI architecture, including the major components and how they interact.
|
||||
|
||||
<LinkButton href="/development/architecture/overview/" icon="right-arrow" variant="secondary">
|
||||
<LinkButton href="./architecture/overview/" icon="right-arrow" variant="secondary">
|
||||
Learn more
|
||||
</LinkButton>
|
||||
</Card>
|
||||
<Card title="Process" icon="list-format">
|
||||
An overview of the development processes we follow, including our pull request merge policy and release process.
|
||||
|
||||
<LinkButton href="/development/process/pr-merge-policy/" icon="right-arrow" variant="secondary">
|
||||
<LinkButton href="./process/pr-merge-policy/" icon="right-arrow" variant="secondary">
|
||||
Learn more
|
||||
</LinkButton>
|
||||
</Card>
|
||||
|
||||
@@ -27,5 +27,5 @@ The text is committed to a raster layer when you press **Enter**. Press **Esc**
|
||||
<LinkCard
|
||||
title="Canvas Text Tool"
|
||||
description="Learn about the implementation of the Text tool, including the editor overlay, rasterization"
|
||||
href="/development/front-end/text-tool/"
|
||||
href="../../development/front-end/text-tool/"
|
||||
/>
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
title: AI Image Generation<br /> for Creatives
|
||||
title: AI Image Generation for Creatives
|
||||
description: A leading creative engine built to empower professionals and enthusiasts alike.
|
||||
template: splash
|
||||
hero:
|
||||
title: AI Image Generation<br /> for Creatives
|
||||
tagline: Invoke is a free and open-source creative engine for AI-powered image generation. Built by creatives, for creatives. Self-hosted, fully customizable, and Apache 2.0 licensed.
|
||||
actions:
|
||||
- text: Get Started
|
||||
|
||||
@@ -13,13 +13,13 @@ import SystemRequirementsLink from '@components/SystemRequirmentsLink.astro'
|
||||
<LinkCard
|
||||
title="Local Installation Guide"
|
||||
description="If you want to use Invoke locally, you should probably use the launcher instead."
|
||||
href="/start-here/installation"
|
||||
href="../installation"
|
||||
/>
|
||||
|
||||
<LinkCard
|
||||
title="Developer Installation Guide"
|
||||
description="If you want to contribute to InvokeAI or run the app on the main branch, follow the developer installation guide instead."
|
||||
href="/development"
|
||||
href="../../development"
|
||||
/>
|
||||
|
||||
## Walkthrough
|
||||
@@ -188,6 +188,6 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
|
||||
If you run Invoke on a headless server, you might want to install and run Invoke on the command line.
|
||||
|
||||
We do not plan to maintain scripts to do this moving forward, instead focusing our dev resources on the GUI [launcher](/start-here/installation).
|
||||
We do not plan to maintain scripts to do this moving forward, instead focusing our dev resources on the GUI [launcher](../installation).
|
||||
|
||||
You can create your own scripts for this by copying the handful of commands in this guide. `uv`'s [`pip` interface docs](https://docs.astral.sh/uv/reference/cli/#uv-pip-install) may be useful.
|
||||
|
||||
@@ -36,7 +36,7 @@ The requirements below are rough guidelines for best performance. GPUs with less
|
||||
## Python
|
||||
|
||||
:::tip[The launcher installs python for you]
|
||||
You don't need to do this if you are installing with the [Invoke Launcher](/start-here/installation).
|
||||
You don't need to do this if you are installing with the [Invoke Launcher](../installation).
|
||||
:::
|
||||
|
||||
Invoke requires python `3.11` through `3.12`. If you don't already have one of these versions installed, we suggest installing `3.12`, as it will be supported for longer.
|
||||
@@ -126,12 +126,12 @@ Confirm that `rocm-smi` displays driver and CUDA versions after installation.
|
||||
|
||||
An alternative to installing ROCm locally is to use a [ROCm docker container] to run the application in a container.
|
||||
|
||||
[Low VRAM Guide]: /configuration/low-vram-mode
|
||||
[Low VRAM Guide]: ../../configuration/low-vram-mode
|
||||
[Nvidia Container Runtime]: https://developer.nvidia.com/container-runtime
|
||||
[an official installer]: https://www.python.org/downloads/
|
||||
[using `uv` to manage your python installation]: https://docs.astral.sh/uv/concepts/python-versions/#installing-a-python-version
|
||||
[Microsoft Visual C++ Redistributable]: https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
[Invoke Launcher]: /start-here/installation
|
||||
[Invoke Launcher]: ../installation
|
||||
[CUDA Toolkit Downloads]: https://developer.nvidia.com/cuda-downloads
|
||||
[Cuda Docs]: https://developer.nvidia.com/cudnn
|
||||
[cuDNN support matrix]: https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html
|
||||
|
||||
@@ -12,7 +12,7 @@ If the troubleshooting steps on this page don't get you up and running, please e
|
||||
<LinkCard
|
||||
title="Installation Guide"
|
||||
description="Step-by-step instructions to get Invoke up and running."
|
||||
href="/start-here/installation"
|
||||
href="../../start-here/installation"
|
||||
/>
|
||||
|
||||
## Downloading models and using existing models
|
||||
@@ -109,9 +109,9 @@ To better understand how the `glibc` memory allocator works, see these reference
|
||||
|
||||
Note the differences between memory allocated as chunks in an arena vs. memory allocated with `mmap`. Under `glibc`'s default configuration, most model tensors get allocated as chunks in an arena making them vulnerable to the problem of fragmentation.
|
||||
|
||||
[model install docs]: /concepts/models
|
||||
[system requirements]: /start-here/system-requirements
|
||||
[Low VRAM mode guide]: /configuration/low-vram-mode
|
||||
[model install docs]: ../../concepts/models
|
||||
[system requirements]: ../../start-here/system-requirements
|
||||
[Low VRAM mode guide]: ../../configuration/low-vram-mode
|
||||
[create an issue]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[configuration docs]: /configuration/invokeai-yaml
|
||||
[configuration docs]: ../../configuration/invokeai-yaml
|
||||
|
||||
@@ -136,7 +136,7 @@ InvokeAI's node system is extensible. Community-created nodes can add new capabi
|
||||
To install a community node pack:
|
||||
|
||||
<Steps>
|
||||
1. Find a node pack from the [Community Nodes](/workflows/community-nodes) list.
|
||||
1. Find a node pack from the [Community Nodes](../community-nodes) list.
|
||||
2. Clone or download the node pack into the `nodes` folder inside your InvokeAI installation directory.
|
||||
3. Restart InvokeAI. The new nodes will appear in the node picker.
|
||||
</Steps>
|
||||
@@ -145,4 +145,4 @@ To install a community node pack:
|
||||
The recommended method is `git clone`, which makes it easy to update node packs later with `git pull`.
|
||||
:::
|
||||
|
||||
For more details and a full catalog of available community nodes, see the [Community Nodes](/workflows/community-nodes) page.
|
||||
For more details and a full catalog of available community nodes, see the [Community Nodes](../community-nodes) page.
|
||||
|
||||
@@ -12,7 +12,7 @@ If you're coming to InvokeAI from ComfyUI, welcome! You'll find things are simil
|
||||
InvokeAI's nodes tend to be more granular than default nodes in Comfy. This means each node in Invoke will do a specific task, and you might need to use multiple nodes to achieve the same result. The added granularity improves the control you have over your workflows.
|
||||
</Card>
|
||||
<Card title="Backend Differences" icon="puzzle">
|
||||
InvokeAI's backend and ComfyUI's backend are very different, which means Comfy workflows are not able to be imported directly into InvokeAI. However, we have created a [list of popular workflows](/workflows/community-nodes) for you to get started with Nodes in InvokeAI!
|
||||
InvokeAI's backend and ComfyUI's backend are very different, which means Comfy workflows are not able to be imported directly into InvokeAI. However, we have created a [list of popular workflows](../community-nodes) for you to get started with Nodes in InvokeAI!
|
||||
</Card>
|
||||
|
||||
## Node Equivalents
|
||||
|
||||
@@ -13,7 +13,7 @@ The workflow editor is a blank canvas allowing for the use of individual functio
|
||||
A node graph is composed of multiple nodes that are connected together to create a workflow. Nodes' inputs and outputs are connected by dragging connectors from node to node. Inputs and outputs are color-coded for ease of use.
|
||||
|
||||
:::tip[New to Diffusion?]
|
||||
If you're not familiar with Diffusion, take a look at our [Diffusion Overview](/concepts/diffusion). Understanding how diffusion works will enable you to more easily use the Workflow Editor and build workflows to suit your needs.
|
||||
If you're not familiar with Diffusion, take a look at our [Diffusion Overview](../../concepts/diffusion). Understanding how diffusion works will enable you to more easily use the Workflow Editor and build workflows to suit your needs.
|
||||
:::
|
||||
|
||||
## Features
|
||||
|
||||
@@ -17,7 +17,7 @@ You can read more about nodes and how to use the node editor by checking out the
|
||||
<LinkCard
|
||||
title="Node Editor Deep Dive"
|
||||
description="Learn how to interact with the Node Editor, connect nodes, and build powerful custom workflows."
|
||||
href="/workflows/editor-interface"
|
||||
href="../workflows/editor-interface"
|
||||
/>
|
||||
|
||||
## Downloading New Nodes
|
||||
@@ -27,5 +27,5 @@ To download a new node and enhance your workflows with new features, visit our l
|
||||
<LinkCard
|
||||
title="Explore Community Nodes"
|
||||
description="Discover and download new nodes created by the InvokeAI community to extend your workflow capabilities."
|
||||
href="/workflows/community-nodes"
|
||||
href="../workflows/community-nodes"
|
||||
/>
|
||||
|
||||
6
docs/src/lib/base-path.ts
Normal file
6
docs/src/lib/base-path.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export const withBase = (path: string, baseUrl: string) => {
|
||||
const normalizedBase = baseUrl.endsWith('/') ? baseUrl : `${baseUrl}/`;
|
||||
const normalizedPath = path.replace(/^\//, '');
|
||||
|
||||
return `${normalizedBase}${normalizedPath}`;
|
||||
};
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
import { LinkCard, Icon, LinkButton } from '@astrojs/starlight/components';
|
||||
import { type StarlightIcon } from '@astrojs/starlight/types';
|
||||
import { withBase } from '../base-path';
|
||||
|
||||
type LauncherDownloadOption = {
|
||||
icon: StarlightIcon;
|
||||
@@ -45,7 +46,7 @@ const manualDownloadOptions = {
|
||||
docker: {
|
||||
headline: 'Run with Docker',
|
||||
description: 'For users who want to run Invoke without installing dependencies directly on their system.',
|
||||
href: '/configuration/docker/',
|
||||
href: withBase('/configuration/docker/', import.meta.env.BASE_URL),
|
||||
},
|
||||
};
|
||||
---
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
---
|
||||
import { LinkCard } from '@astrojs/starlight/components';
|
||||
import { withBase } from '../base-path';
|
||||
---
|
||||
|
||||
<LinkCard
|
||||
title="System Requirements"
|
||||
description="Please check the system requirements page to make sure your hardware is capable of running the desired models."
|
||||
href="/start-here/system-requirements"
|
||||
href={withBase('/start-here/system-requirements/', import.meta.env.BASE_URL)}
|
||||
/>
|
||||
|
||||
@@ -0,0 +1,360 @@
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
// Mock model configs returned by selectors - these simulate what RTK Query provides
|
||||
const mockAnimaQwen3Encoder = {
|
||||
key: 'qwen3-06b-key',
|
||||
hash: 'qwen3-06b-hash',
|
||||
name: 'Qwen3 0.6B Encoder',
|
||||
base: 'any' as const,
|
||||
type: 'qwen3_encoder' as const,
|
||||
variant: 'qwen3_06b' as const,
|
||||
format: 'qwen3_encoder' as const,
|
||||
};
|
||||
|
||||
const mockAnimaVAE = {
|
||||
key: 'anima-vae-key',
|
||||
hash: 'anima-vae-hash',
|
||||
name: 'Anima VAE',
|
||||
base: 'anima' as const,
|
||||
type: 'vae' as const,
|
||||
format: 'diffusers' as const,
|
||||
};
|
||||
|
||||
const mockT5Encoder = {
|
||||
key: 't5-xxl-key',
|
||||
hash: 't5-xxl-hash',
|
||||
name: 'T5-XXL Encoder',
|
||||
base: 'any' as const,
|
||||
type: 't5_encoder' as const,
|
||||
format: 't5_encoder' as const,
|
||||
};
|
||||
|
||||
const mockAnimaMainModel = {
|
||||
key: 'anima-main-key',
|
||||
hash: 'anima-main-hash',
|
||||
name: 'Anima Generate',
|
||||
base: 'anima' as const,
|
||||
type: 'main' as const,
|
||||
};
|
||||
|
||||
const mockFluxMainModel = {
|
||||
key: 'flux-main-key',
|
||||
hash: 'flux-main-hash',
|
||||
name: 'FLUX.1 Dev',
|
||||
base: 'flux' as const,
|
||||
type: 'main' as const,
|
||||
};
|
||||
|
||||
// Track dispatched actions
|
||||
const dispatched: Array<{ type: string; payload: unknown }> = [];
|
||||
const mockDispatch = vi.fn((action: { type: string; payload: unknown }) => {
|
||||
dispatched.push(action);
|
||||
});
|
||||
|
||||
// Mock logger
|
||||
vi.mock('app/logging/logger', () => ({
|
||||
logger: () => ({
|
||||
debug: vi.fn(),
|
||||
error: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
info: vi.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
// Mock toast
|
||||
vi.mock('features/toast/toast', () => ({
|
||||
toast: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock i18next
|
||||
vi.mock('i18next', () => ({
|
||||
t: (key: string) => key,
|
||||
}));
|
||||
|
||||
// Mock model selectors from RTK Query hooks
|
||||
|
||||
const mockSelectAnimaQwen3EncoderModels = vi.fn((_state: unknown) => [mockAnimaQwen3Encoder]);
|
||||
|
||||
const mockSelectAnimaVAEModels = vi.fn((_state: unknown) => [mockAnimaVAE]);
|
||||
|
||||
const mockSelectT5EncoderModels = vi.fn((_state: unknown) => [mockT5Encoder]);
|
||||
|
||||
vi.mock('services/api/hooks/modelsByType', () => ({
|
||||
selectAnimaQwen3EncoderModels: (state: unknown) => mockSelectAnimaQwen3EncoderModels(state),
|
||||
selectAnimaVAEModels: (state: unknown) => mockSelectAnimaVAEModels(state),
|
||||
selectT5EncoderModels: (state: unknown) => mockSelectT5EncoderModels(state),
|
||||
selectQwen3EncoderModels: vi.fn(() => []),
|
||||
selectZImageDiffusersModels: vi.fn(() => []),
|
||||
selectFluxVAEModels: vi.fn(() => []),
|
||||
selectGlobalRefImageModels: vi.fn(() => []),
|
||||
selectRegionalRefImageModels: vi.fn(() => []),
|
||||
}));
|
||||
|
||||
// Mock model configs adapter
|
||||
vi.mock('services/api/endpoints/models', () => ({
|
||||
modelConfigsAdapterSelectors: { selectById: vi.fn() },
|
||||
selectModelConfigsQuery: vi.fn(() => ({ data: undefined })),
|
||||
}));
|
||||
|
||||
vi.mock('services/api/types', () => ({
|
||||
isFluxKontextModelConfig: vi.fn(() => false),
|
||||
isFluxReduxModelConfig: vi.fn(() => false),
|
||||
}));
|
||||
|
||||
// Mock canvas selectors
|
||||
vi.mock('features/controlLayers/store/canvasStagingAreaSlice', () => ({
|
||||
buildSelectIsStaging: vi.fn(() => vi.fn(() => false)),
|
||||
selectCanvasSessionId: vi.fn(() => null),
|
||||
}));
|
||||
|
||||
vi.mock('features/controlLayers/store/selectors', () => ({
|
||||
selectAllEntitiesOfType: vi.fn(() => []),
|
||||
selectBboxModelBase: vi.fn(() => 'anima'),
|
||||
selectCanvasSlice: vi.fn(() => ({})),
|
||||
}));
|
||||
|
||||
vi.mock('features/controlLayers/store/refImagesSlice', () => ({
|
||||
refImageConfigChanged: vi.fn(),
|
||||
refImageModelChanged: vi.fn(),
|
||||
selectReferenceImageEntities: vi.fn(() => []),
|
||||
}));
|
||||
|
||||
vi.mock('features/controlLayers/store/types', async () => {
|
||||
const actual = await vi.importActual('features/controlLayers/store/types');
|
||||
return {
|
||||
...(actual as Record<string, unknown>),
|
||||
getEntityIdentifier: vi.fn(),
|
||||
isFlux2ReferenceImageConfig: vi.fn(() => false),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('features/controlLayers/store/util', () => ({
|
||||
initialFlux2ReferenceImage: {},
|
||||
initialFluxKontextReferenceImage: {},
|
||||
initialFLUXRedux: {},
|
||||
initialIPAdapter: {},
|
||||
}));
|
||||
|
||||
vi.mock('features/modelManagerV2/models', () => ({
|
||||
SUPPORTS_REF_IMAGES_BASE_MODELS: ['sd-1', 'sdxl', 'flux', 'flux2'],
|
||||
}));
|
||||
|
||||
vi.mock('features/controlLayers/store/canvasSlice', () => ({
|
||||
bboxSyncedToOptimalDimension: vi.fn(() => ({ type: 'bboxSyncedToOptimalDimension' })),
|
||||
rgRefImageModelChanged: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('features/controlLayers/store/lorasSlice', () => ({
|
||||
loraIsEnabledChanged: vi.fn((payload: unknown) => ({ type: 'loraIsEnabledChanged', payload })),
|
||||
}));
|
||||
|
||||
// Capture the listener effect so we can call it directly
|
||||
let capturedEffect: ((action: unknown, api: unknown) => void) | null = null;
|
||||
|
||||
// Import actual action creators for assertion matching
|
||||
const paramsSliceActual = (await vi.importActual('features/controlLayers/store/paramsSlice')) as {
|
||||
animaQwen3EncoderModelSelected: { type: string };
|
||||
animaT5EncoderModelSelected: { type: string };
|
||||
animaVaeModelSelected: { type: string };
|
||||
};
|
||||
const { animaQwen3EncoderModelSelected, animaT5EncoderModelSelected, animaVaeModelSelected } = paramsSliceActual;
|
||||
|
||||
// Import after mocks are set up
|
||||
const { addModelSelectedListener } = await import('./modelSelected');
|
||||
const { modelSelected } = await import('features/parameters/store/actions');
|
||||
const { zParameterModel } = await import('features/parameters/types/parameterSchemas');
|
||||
|
||||
// Capture the effect
|
||||
addModelSelectedListener(((config: { effect: typeof capturedEffect }) => {
|
||||
capturedEffect = config.effect;
|
||||
}) as never);
|
||||
|
||||
function buildMockState(overrides: Record<string, unknown> = {}) {
|
||||
return {
|
||||
params: {
|
||||
model: null,
|
||||
vae: null,
|
||||
zImageVaeModel: null,
|
||||
zImageQwen3EncoderModel: null,
|
||||
zImageQwen3SourceModel: null,
|
||||
animaVaeModel: null,
|
||||
animaQwen3EncoderModel: null,
|
||||
animaT5EncoderModel: null,
|
||||
animaScheduler: 'euler',
|
||||
kleinVaeModel: null,
|
||||
kleinQwen3EncoderModel: null,
|
||||
zImageScheduler: 'euler',
|
||||
...overrides,
|
||||
},
|
||||
loras: { loras: [] },
|
||||
canvas: {},
|
||||
};
|
||||
}
|
||||
|
||||
describe('modelSelected listener - Anima defaulting', () => {
|
||||
beforeEach(() => {
|
||||
dispatched.length = 0;
|
||||
mockDispatch.mockClear();
|
||||
mockSelectAnimaQwen3EncoderModels.mockReturnValue([mockAnimaQwen3Encoder]);
|
||||
mockSelectAnimaVAEModels.mockReturnValue([mockAnimaVAE]);
|
||||
mockSelectT5EncoderModels.mockReturnValue([mockT5Encoder]);
|
||||
});
|
||||
|
||||
it('should dispatch encoder models with full ModelIdentifierField payloads when switching to Anima', () => {
|
||||
const state = buildMockState({ model: mockFluxMainModel });
|
||||
const action = modelSelected(zParameterModel.parse(mockAnimaMainModel));
|
||||
|
||||
capturedEffect!(action, {
|
||||
getState: () => state,
|
||||
dispatch: mockDispatch,
|
||||
});
|
||||
|
||||
// Find the dispatched actions for Anima encoders
|
||||
const qwen3Dispatch = dispatched.find((a) => a.type === animaQwen3EncoderModelSelected.type);
|
||||
const t5Dispatch = dispatched.find((a) => a.type === animaT5EncoderModelSelected.type);
|
||||
const vaeDispatch = dispatched.find((a) => a.type === animaVaeModelSelected.type);
|
||||
|
||||
// All three should have been dispatched
|
||||
expect(qwen3Dispatch).toBeDefined();
|
||||
expect(t5Dispatch).toBeDefined();
|
||||
expect(vaeDispatch).toBeDefined();
|
||||
|
||||
// The payloads must pass zModelIdentifierField validation (the actual schema used by reducers)
|
||||
expect(zModelIdentifierField.safeParse(qwen3Dispatch!.payload).success).toBe(true);
|
||||
expect(zModelIdentifierField.safeParse(t5Dispatch!.payload).success).toBe(true);
|
||||
expect(zModelIdentifierField.safeParse(vaeDispatch!.payload).success).toBe(true);
|
||||
});
|
||||
|
||||
it('should include hash and type in Qwen3 encoder payload', () => {
|
||||
const state = buildMockState({ model: mockFluxMainModel });
|
||||
const action = modelSelected(zParameterModel.parse(mockAnimaMainModel));
|
||||
|
||||
capturedEffect!(action, {
|
||||
getState: () => state,
|
||||
dispatch: mockDispatch,
|
||||
});
|
||||
|
||||
const qwen3Dispatch = dispatched.find((a) => a.type === animaQwen3EncoderModelSelected.type);
|
||||
expect(qwen3Dispatch!.payload).toMatchObject({
|
||||
key: mockAnimaQwen3Encoder.key,
|
||||
hash: mockAnimaQwen3Encoder.hash,
|
||||
name: mockAnimaQwen3Encoder.name,
|
||||
base: mockAnimaQwen3Encoder.base,
|
||||
type: mockAnimaQwen3Encoder.type,
|
||||
});
|
||||
});
|
||||
|
||||
it('should include hash and type in T5 encoder payload', () => {
|
||||
const state = buildMockState({ model: mockFluxMainModel });
|
||||
const action = modelSelected(zParameterModel.parse(mockAnimaMainModel));
|
||||
|
||||
capturedEffect!(action, {
|
||||
getState: () => state,
|
||||
dispatch: mockDispatch,
|
||||
});
|
||||
|
||||
const t5Dispatch = dispatched.find((a) => a.type === animaT5EncoderModelSelected.type);
|
||||
expect(t5Dispatch!.payload).toMatchObject({
|
||||
key: mockT5Encoder.key,
|
||||
hash: mockT5Encoder.hash,
|
||||
name: mockT5Encoder.name,
|
||||
base: mockT5Encoder.base,
|
||||
type: mockT5Encoder.type,
|
||||
});
|
||||
});
|
||||
|
||||
it('should not dispatch encoder defaults when Anima models are already set', () => {
|
||||
const existingQwen3 = { key: 'existing', hash: 'h', name: 'Existing', base: 'any', type: 'qwen3_encoder' };
|
||||
const existingT5 = { key: 'existing-t5', hash: 'h', name: 'Existing T5', base: 'any', type: 't5_encoder' };
|
||||
const existingVae = { key: 'existing-vae', hash: 'h', name: 'Existing VAE', base: 'anima', type: 'vae' };
|
||||
|
||||
const state = buildMockState({
|
||||
model: mockFluxMainModel,
|
||||
animaQwen3EncoderModel: existingQwen3,
|
||||
animaT5EncoderModel: existingT5,
|
||||
animaVaeModel: existingVae,
|
||||
});
|
||||
|
||||
const action = modelSelected(zParameterModel.parse(mockAnimaMainModel));
|
||||
|
||||
capturedEffect!(action, {
|
||||
getState: () => state,
|
||||
dispatch: mockDispatch,
|
||||
});
|
||||
|
||||
// Should NOT dispatch any encoder model selections since they're already set
|
||||
const qwen3Dispatch = dispatched.find((a) => a.type === animaQwen3EncoderModelSelected.type);
|
||||
const t5Dispatch = dispatched.find((a) => a.type === animaT5EncoderModelSelected.type);
|
||||
const vaeDispatch = dispatched.find((a) => a.type === animaVaeModelSelected.type);
|
||||
|
||||
expect(qwen3Dispatch).toBeUndefined();
|
||||
expect(t5Dispatch).toBeUndefined();
|
||||
expect(vaeDispatch).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not dispatch encoder defaults when no encoder models are available', () => {
|
||||
mockSelectAnimaQwen3EncoderModels.mockReturnValue([]);
|
||||
mockSelectAnimaVAEModels.mockReturnValue([]);
|
||||
|
||||
const state = buildMockState({ model: mockFluxMainModel });
|
||||
const action = modelSelected(zParameterModel.parse(mockAnimaMainModel));
|
||||
|
||||
capturedEffect!(action, {
|
||||
getState: () => state,
|
||||
dispatch: mockDispatch,
|
||||
});
|
||||
|
||||
const qwen3Dispatch = dispatched.find((a) => a.type === animaQwen3EncoderModelSelected.type);
|
||||
const t5Dispatch = dispatched.find((a) => a.type === animaT5EncoderModelSelected.type);
|
||||
const vaeDispatch = dispatched.find((a) => a.type === animaVaeModelSelected.type);
|
||||
|
||||
expect(qwen3Dispatch).toBeUndefined();
|
||||
expect(t5Dispatch).toBeUndefined();
|
||||
expect(vaeDispatch).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should clear Anima models when switching away from Anima', () => {
|
||||
const existingQwen3 = { key: 'existing', hash: 'h', name: 'Existing', base: 'any', type: 'qwen3_encoder' };
|
||||
const existingT5 = { key: 'existing-t5', hash: 'h', name: 'Existing T5', base: 'any', type: 't5_encoder' };
|
||||
const existingVae = { key: 'existing-vae', hash: 'h', name: 'Existing VAE', base: 'anima', type: 'vae' };
|
||||
|
||||
const state = buildMockState({
|
||||
model: mockAnimaMainModel,
|
||||
animaQwen3EncoderModel: existingQwen3,
|
||||
animaT5EncoderModel: existingT5,
|
||||
animaVaeModel: existingVae,
|
||||
});
|
||||
|
||||
const action = modelSelected(zParameterModel.parse(mockFluxMainModel));
|
||||
|
||||
capturedEffect!(action, {
|
||||
getState: () => state,
|
||||
dispatch: mockDispatch,
|
||||
});
|
||||
|
||||
// Should dispatch null for all three
|
||||
const qwen3Dispatch = dispatched.find((a) => a.type === animaQwen3EncoderModelSelected.type);
|
||||
const t5Dispatch = dispatched.find((a) => a.type === animaT5EncoderModelSelected.type);
|
||||
const vaeDispatch = dispatched.find((a) => a.type === animaVaeModelSelected.type);
|
||||
|
||||
expect(qwen3Dispatch).toBeDefined();
|
||||
expect(qwen3Dispatch!.payload).toBeNull();
|
||||
expect(t5Dispatch).toBeDefined();
|
||||
expect(t5Dispatch!.payload).toBeNull();
|
||||
expect(vaeDispatch).toBeDefined();
|
||||
expect(vaeDispatch!.payload).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('zModelIdentifierField schema validation', () => {
|
||||
it('should reject payloads missing hash and type', () => {
|
||||
const incomplete = { key: 'some-key', name: 'Some Model', base: 'any' };
|
||||
expect(zModelIdentifierField.safeParse(incomplete).success).toBe(false);
|
||||
});
|
||||
|
||||
it('should accept payloads with all required fields', () => {
|
||||
const complete = { key: 'some-key', hash: 'some-hash', name: 'Some Model', base: 'any', type: 'qwen3_encoder' };
|
||||
expect(zModelIdentifierField.safeParse(complete).success).toBe(true);
|
||||
});
|
||||
});
|
||||
@@ -200,8 +200,10 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
dispatch(
|
||||
animaQwen3EncoderModelSelected({
|
||||
key: qwen3Encoder.key,
|
||||
hash: qwen3Encoder.hash,
|
||||
name: qwen3Encoder.name,
|
||||
base: qwen3Encoder.base,
|
||||
type: qwen3Encoder.type,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -221,8 +223,10 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
dispatch(
|
||||
animaT5EncoderModelSelected({
|
||||
key: t5Encoder.key,
|
||||
hash: t5Encoder.hash,
|
||||
name: t5Encoder.name,
|
||||
base: t5Encoder.base,
|
||||
type: t5Encoder.type,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
@@ -221,17 +221,14 @@ const slice = createSlice({
|
||||
}
|
||||
state.animaVaeModel = result.data;
|
||||
},
|
||||
animaQwen3EncoderModelSelected: (
|
||||
state,
|
||||
action: PayloadAction<{ key: string; name: string; base: string } | null>
|
||||
) => {
|
||||
animaQwen3EncoderModelSelected: (state, action: PayloadAction<ParameterT5EncoderModel | null>) => {
|
||||
const result = zParamsState.shape.animaQwen3EncoderModel.safeParse(action.payload);
|
||||
if (!result.success) {
|
||||
return;
|
||||
}
|
||||
state.animaQwen3EncoderModel = result.data;
|
||||
},
|
||||
animaT5EncoderModelSelected: (state, action: PayloadAction<{ key: string; name: string; base: string } | null>) => {
|
||||
animaT5EncoderModelSelected: (state, action: PayloadAction<ParameterT5EncoderModel | null>) => {
|
||||
const result = zParamsState.shape.animaT5EncoderModel.safeParse(action.payload);
|
||||
if (!result.success) {
|
||||
return;
|
||||
|
||||
@@ -9,7 +9,7 @@ dev_addr: '127.0.0.1:8080'
|
||||
# Repository
|
||||
repo_name: 'invoke-ai/InvokeAI'
|
||||
repo_url: 'https://github.com/invoke-ai/InvokeAI'
|
||||
edit_uri: edit/main/docs/
|
||||
edit_uri: edit/main/docs-old/
|
||||
|
||||
# Copyright
|
||||
copyright: Copyright © 2022-2024 InvokeAI Team
|
||||
|
||||
Reference in New Issue
Block a user