mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
fix iterations for all API models
This commit is contained in:
committed by
Mary Hipp Rogers
parent
f004fc31f1
commit
72afb9c3fd
@@ -4,6 +4,7 @@ import { range } from 'es-toolkit/compat';
|
||||
import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import type { components } from 'services/api/schema';
|
||||
import type { Batch, EnqueueBatchArg, Invocation } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -18,7 +19,7 @@ const getExtendedPrompts = (arg: {
|
||||
// Normally, the seed behaviour implicity determines the batch size. But when we use models without seeds (like
|
||||
// ChatGPT 4o) in conjunction with the per-prompt seed behaviour, we lose out on that implicit batch size. To rectify
|
||||
// this, we need to create a batch of the right size by repeating the prompts.
|
||||
if (seedBehaviour === 'PER_PROMPT' || model.base === 'chatgpt-4o' || model.base === 'flux-kontext') {
|
||||
if (seedBehaviour === 'PER_PROMPT' || API_BASE_MODELS.includes(model.base)) {
|
||||
return range(iterations).flatMap(() => prompts);
|
||||
}
|
||||
return prompts;
|
||||
|
||||
Reference in New Issue
Block a user