@@ -11,7 +11,13 @@ import {
11
11
OpenAISVG ,
12
12
QwenIcon ,
13
13
} from "@/components/icons/icons" ;
14
- import { WellKnownLLMProviderDescriptor , LLMProviderView } from "./interfaces" ;
14
+ import {
15
+ WellKnownLLMProviderDescriptor ,
16
+ LLMProviderView ,
17
+ FetchModelsConfig ,
18
+ OllamaModelResponse ,
19
+ ModelConfiguration ,
20
+ } from "./interfaces" ;
15
21
import { PopupSpec } from "@/components/admin/connectors/Popup" ;
16
22
17
23
export const getProviderIcon = (
@@ -75,7 +81,7 @@ export const fetchModels = async (
75
81
setPopup ?: ( popup : PopupSpec ) => void
76
82
) => {
77
83
// Provider-specific configurations
78
- const providerConfigs = {
84
+ const providerConfigs : Record < string , FetchModelsConfig > = {
79
85
bedrock : {
80
86
endpoint : "/api/admin/llm/bedrock/available-models" ,
81
87
validationCheck : ( ) => ! ! values . custom_config ?. AWS_REGION_NAME ,
@@ -88,7 +94,7 @@ export const fetchModels = async (
88
94
values . custom_config ?. AWS_BEARER_TOKEN_BEDROCK ,
89
95
provider_name : existingLlmProvider ?. name ,
90
96
} ) ,
91
- processResponse : ( data : string [ ] ) =>
97
+ processResponse : ( data : string [ ] ) : ModelConfiguration [ ] =>
92
98
data . map ( ( modelName ) => {
93
99
const existingConfig =
94
100
llmProviderDescriptor . model_configurations . find (
@@ -112,13 +118,7 @@ export const fetchModels = async (
112
118
requestBody : ( ) => ( {
113
119
api_base : values . api_base ,
114
120
} ) ,
115
- processResponse : (
116
- data : {
117
- name : string ;
118
- max_input_tokens : number ;
119
- supports_image_input : boolean ;
120
- } [ ]
121
- ) =>
121
+ processResponse : ( data : OllamaModelResponse [ ] ) : ModelConfiguration [ ] =>
122
122
data . map ( ( modelData ) => {
123
123
const existingConfig =
124
124
llmProviderDescriptor . model_configurations . find (
@@ -131,13 +131,8 @@ export const fetchModels = async (
131
131
supports_image_input : modelData . supports_image_input ,
132
132
} ;
133
133
} ) ,
134
- getModelNames : (
135
- data : {
136
- name : string ;
137
- max_input_tokens : number ;
138
- supports_image_input : boolean ;
139
- } [ ]
140
- ) => data . map ( ( model ) => model . name ) ,
134
+ getModelNames : ( data : OllamaModelResponse [ ] ) =>
135
+ data . map ( ( model ) => model . name ) ,
141
136
successMessage : ( count : number ) =>
142
137
`Successfully fetched ${ count } models from Ollama.` ,
143
138
} ,
0 commit comments