Skip to content

Commit 71766b4

Browse files
committed
cleanup ollama support
1 parent 71834c2 commit 71766b4

File tree

6 files changed

+40
-46
lines changed

6 files changed

+40
-46
lines changed

backend/onyx/llm/llm_provider_options.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ class WellKnownLLMProviderDescriptor(BaseModel):
3939
model_configurations: list[ModelConfigurationView]
4040
default_model: str | None = None
4141
default_fast_model: str | None = None
42+
default_api_base: str | None = None
4243
# set for providers like Azure, which require a deployment name.
4344
deployment_name_required: bool = False
4445
# set for providers like Azure, which support a single model per deployment.
@@ -210,6 +211,7 @@ def fetch_available_well_known_llms() -> list[WellKnownLLMProviderDescriptor]:
210211
),
211212
default_model=None,
212213
default_fast_model=None,
214+
default_api_base="http://127.0.0.1:11434",
213215
),
214216
WellKnownLLMProviderDescriptor(
215217
name=ANTHROPIC_PROVIDER_NAME,

web/src/app/admin/configuration/llm/FetchModelsButton.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,11 @@ import { LoadingAnimation } from "@/components/Loading";
33
import Text from "@/components/ui/text";
44
import { fetchModels } from "./utils";
55
import {
6-
ProviderFetchModelsConfig,
6+
ProviderFetchModelsButtonConfig,
77
FetchModelsButtonProps,
88
} from "./interfaces";
99

10-
const providerConfigs: Record<string, ProviderFetchModelsConfig> = {
10+
const providerConfigs: Record<string, ProviderFetchModelsButtonConfig> = {
1111
bedrock: {
1212
buttonText: "Fetch Available Models for Region",
1313
loadingText: "Fetching Models...",

web/src/app/admin/configuration/llm/LLMProviderUpdateForm.tsx

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,8 @@ export function LLMProviderUpdateForm({
6868
api_key: existingLlmProvider?.api_key ?? "",
6969
api_base:
7070
existingLlmProvider?.api_base ??
71-
(llmProviderDescriptor.name === "ollama" ? "http://127.0.0.1:11434" : ""),
71+
llmProviderDescriptor.default_api_base ??
72+
"",
7273
api_version: existingLlmProvider?.api_version ?? "",
7374
// For Azure OpenAI, combine api_base and api_version into target_uri
7475
target_uri:

web/src/app/admin/configuration/llm/interfaces.ts

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ export interface ModelConfiguration {
1616
name: string;
1717
is_visible: boolean;
1818
max_input_tokens: number | null;
19-
supports_image_input: boolean;
19+
supports_image_input: boolean | null;
2020
}
2121

2222
export interface WellKnownLLMProviderDescriptor {
@@ -33,6 +33,7 @@ export interface WellKnownLLMProviderDescriptor {
3333
model_configurations: ModelConfiguration[];
3434
default_model: string | null;
3535
default_fast_model: string | null;
36+
default_api_base: string | null;
3637
is_public: boolean;
3738
groups: number[];
3839
}
@@ -81,7 +82,7 @@ export interface LLMProviderDescriptor {
8182
model_configurations: ModelConfiguration[];
8283
}
8384

84-
export interface ProviderFetchModelsConfig {
85+
export interface ProviderFetchModelsButtonConfig {
8586
buttonText: string;
8687
loadingText: string;
8788
helperText: string | React.ReactNode;
@@ -99,3 +100,22 @@ export interface FetchModelsButtonProps {
99100
setFetchModelsError: (error: string) => void;
100101
setPopup?: (popup: PopupSpec) => void;
101102
}
103+
104+
export interface OllamaModelResponse {
105+
name: string;
106+
max_input_tokens: number;
107+
supports_image_input: boolean;
108+
}
109+
110+
export interface FetchModelsConfig<
111+
TApiResponse = any,
112+
TProcessedResponse = ModelConfiguration,
113+
> {
114+
endpoint: string;
115+
validationCheck: () => boolean;
116+
validationError: string;
117+
requestBody: () => Record<string, any>;
118+
processResponse: (data: TApiResponse) => TProcessedResponse[];
119+
getModelNames: (data: TApiResponse) => string[];
120+
successMessage: (count: number) => string;
121+
}

web/src/app/admin/configuration/llm/utils.ts

Lines changed: 12 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,13 @@ import {
1111
OpenAISVG,
1212
QwenIcon,
1313
} from "@/components/icons/icons";
14-
import { WellKnownLLMProviderDescriptor, LLMProviderView } from "./interfaces";
14+
import {
15+
WellKnownLLMProviderDescriptor,
16+
LLMProviderView,
17+
FetchModelsConfig,
18+
OllamaModelResponse,
19+
ModelConfiguration,
20+
} from "./interfaces";
1521
import { PopupSpec } from "@/components/admin/connectors/Popup";
1622

1723
export const getProviderIcon = (
@@ -75,7 +81,7 @@ export const fetchModels = async (
7581
setPopup?: (popup: PopupSpec) => void
7682
) => {
7783
// Provider-specific configurations
78-
const providerConfigs = {
84+
const providerConfigs: Record<string, FetchModelsConfig> = {
7985
bedrock: {
8086
endpoint: "/api/admin/llm/bedrock/available-models",
8187
validationCheck: () => !!values.custom_config?.AWS_REGION_NAME,
@@ -88,7 +94,7 @@ export const fetchModels = async (
8894
values.custom_config?.AWS_BEARER_TOKEN_BEDROCK,
8995
provider_name: existingLlmProvider?.name,
9096
}),
91-
processResponse: (data: string[]) =>
97+
processResponse: (data: string[]): ModelConfiguration[] =>
9298
data.map((modelName) => {
9399
const existingConfig =
94100
llmProviderDescriptor.model_configurations.find(
@@ -112,13 +118,7 @@ export const fetchModels = async (
112118
requestBody: () => ({
113119
api_base: values.api_base,
114120
}),
115-
processResponse: (
116-
data: {
117-
name: string;
118-
max_input_tokens: number;
119-
supports_image_input: boolean;
120-
}[]
121-
) =>
121+
processResponse: (data: OllamaModelResponse[]): ModelConfiguration[] =>
122122
data.map((modelData) => {
123123
const existingConfig =
124124
llmProviderDescriptor.model_configurations.find(
@@ -131,13 +131,8 @@ export const fetchModels = async (
131131
supports_image_input: modelData.supports_image_input,
132132
};
133133
}),
134-
getModelNames: (
135-
data: {
136-
name: string;
137-
max_input_tokens: number;
138-
supports_image_input: boolean;
139-
}[]
140-
) => data.map((model) => model.name),
134+
getModelNames: (data: OllamaModelResponse[]) =>
135+
data.map((model) => model.name),
141136
successMessage: (count: number) =>
142137
`Successfully fetched ${count} models from Ollama.`,
143138
},

web/src/lib/hooks.ts

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1005,30 +1005,6 @@ export function getDisplayNameForModel(modelName: string): string {
10051005
return MODEL_DISPLAY_NAMES[modelName] || modelName;
10061006
}
10071007

1008-
export const defaultModelsByProvider: { [name: string]: string[] } = {
1009-
openai: [
1010-
"gpt-4",
1011-
"gpt-4o",
1012-
"gpt-4o-mini",
1013-
"gpt-4.1",
1014-
"o3-mini",
1015-
"o1-mini",
1016-
"o1",
1017-
"o4-mini",
1018-
"o3",
1019-
],
1020-
bedrock: [
1021-
"meta.llama3-1-70b-instruct-v1:0",
1022-
"meta.llama3-1-8b-instruct-v1:0",
1023-
"anthropic.claude-3-opus-20240229-v1:0",
1024-
"mistral.mistral-large-2402-v1:0",
1025-
"anthropic.claude-3-5-sonnet-20241022-v2:0",
1026-
"anthropic.claude-3-7-sonnet-20250219-v1:0",
1027-
],
1028-
anthropic: ["claude-3-opus-20240229", "claude-3-5-sonnet-20241022"],
1029-
ollama: ["llama3.2", "phi3:mini", "mistral-small"],
1030-
};
1031-
10321008
// Get source metadata for configured sources - deduplicated by source type
10331009
function getConfiguredSources(
10341010
availableSources: ValidSources[]

0 commit comments

Comments
 (0)