Skip to content

Commit 72afb9c

Browse files
Mary Hippmaryhipp
authored andcommitted
fix iterations for all API models
1 parent f004fc3 commit 72afb9c

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import { range } from 'es-toolkit/compat';
44
import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
55
import type { ModelIdentifierField } from 'features/nodes/types/common';
66
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
7+
import { API_BASE_MODELS } from 'features/parameters/types/constants';
78
import type { components } from 'services/api/schema';
89
import type { Batch, EnqueueBatchArg, Invocation } from 'services/api/types';
910
import { assert } from 'tsafe';
@@ -18,7 +19,7 @@ const getExtendedPrompts = (arg: {
1819
// Normally, the seed behaviour implicity determines the batch size. But when we use models without seeds (like
1920
// ChatGPT 4o) in conjunction with the per-prompt seed behaviour, we lose out on that implicit batch size. To rectify
2021
// this, we need to create a batch of the right size by repeating the prompts.
21-
if (seedBehaviour === 'PER_PROMPT' || model.base === 'chatgpt-4o' || model.base === 'flux-kontext') {
22+
if (seedBehaviour === 'PER_PROMPT' || API_BASE_MODELS.includes(model.base)) {
2223
return range(iterations).flatMap(() => prompts);
2324
}
2425
return prompts;

0 commit comments

Comments
 (0)