File tree Expand file tree Collapse file tree 1 file changed +2
-1
lines changed
invokeai/frontend/web/src/features/nodes/util/graph Expand file tree Collapse file tree 1 file changed +2
-1
lines changed Original file line number Diff line number Diff line change @@ -4,6 +4,7 @@ import { range } from 'es-toolkit/compat';
4
4
import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice' ;
5
5
import type { ModelIdentifierField } from 'features/nodes/types/common' ;
6
6
import type { Graph } from 'features/nodes/util/graph/generation/Graph' ;
7
+ import { API_BASE_MODELS } from 'features/parameters/types/constants' ;
7
8
import type { components } from 'services/api/schema' ;
8
9
import type { Batch , EnqueueBatchArg , Invocation } from 'services/api/types' ;
9
10
import { assert } from 'tsafe' ;
@@ -18,7 +19,7 @@ const getExtendedPrompts = (arg: {
18
19
// Normally, the seed behaviour implicity determines the batch size. But when we use models without seeds (like
19
20
// ChatGPT 4o) in conjunction with the per-prompt seed behaviour, we lose out on that implicit batch size. To rectify
20
21
// this, we need to create a batch of the right size by repeating the prompts.
21
- if ( seedBehaviour === 'PER_PROMPT' || model . base === 'chatgpt-4o' || model . base === 'flux-kontext' ) {
22
+ if ( seedBehaviour === 'PER_PROMPT' || API_BASE_MODELS . includes ( model . base ) ) {
22
23
return range ( iterations ) . flatMap ( ( ) => prompts ) ;
23
24
}
24
25
return prompts ;
You can’t perform that action at this time.
0 commit comments