diff --git a/backend/onyx/llm/chat_llm.py b/backend/onyx/llm/chat_llm.py
index 556a37f1c2a..f64417c1c6c 100644
--- a/backend/onyx/llm/chat_llm.py
+++ b/backend/onyx/llm/chat_llm.py
@@ -36,7 +36,6 @@
from onyx.llm.interfaces import LLM
from onyx.llm.interfaces import LLMConfig
from onyx.llm.interfaces import ToolChoiceOptions
-from onyx.llm.llm_provider_options import CREDENTIALS_FILE_CUSTOM_CONFIG_KEY
from onyx.llm.utils import model_is_reasoning_model
from onyx.server.utils import mask_string
from onyx.utils.logger import setup_logger
@@ -51,7 +50,8 @@
litellm.telemetry = False
_LLM_PROMPT_LONG_TERM_LOG_CATEGORY = "llm_prompt"
-VERTEX_CREDENTIALS_KWARG = "vertex_credentials"
+VERTEX_CREDENTIALS_FILE_KWARG = "vertex_credentials"
+VERTEX_LOCATION_KWARG = "vertex_location"
class LLMTimeoutError(Exception):
@@ -294,13 +294,12 @@ def __init__(
# Specifically pass in "vertex_credentials" / "vertex_location" as a
# model_kwarg to the completion call for vertex AI. More details here:
# https://docs.litellm.ai/docs/providers/vertex
- vertex_location_key = "vertex_location"
for k, v in custom_config.items():
if model_provider == "vertex_ai":
- if k == VERTEX_CREDENTIALS_KWARG:
+ if k == VERTEX_CREDENTIALS_FILE_KWARG:
model_kwargs[k] = v
continue
- elif k == vertex_location_key:
+ elif k == VERTEX_LOCATION_KWARG:
model_kwargs[k] = v
continue
@@ -378,13 +377,6 @@ def _completion(
processed_prompt = _prompt_to_dict(prompt)
self._record_call(processed_prompt)
- final_model_kwargs = {**self._model_kwargs}
- if (
- VERTEX_CREDENTIALS_KWARG not in final_model_kwargs
- and self.config.credentials_file
- ):
- final_model_kwargs[VERTEX_CREDENTIALS_KWARG] = self.config.credentials_file
-
try:
return litellm.completion(
mock_response=MOCK_LLM_RESPONSE,
@@ -430,7 +422,7 @@ def _completion(
if structured_response_format
else {}
),
- **final_model_kwargs,
+ **self._model_kwargs,
)
except Exception as e:
self._record_error(processed_prompt, e)
@@ -446,7 +438,7 @@ def _completion(
@property
def config(self) -> LLMConfig:
credentials_file: str | None = (
- self._custom_config.get(CREDENTIALS_FILE_CUSTOM_CONFIG_KEY, None)
+ self._custom_config.get(VERTEX_CREDENTIALS_FILE_KWARG, None)
if self._custom_config
else None
)
diff --git a/backend/onyx/llm/llm_provider_options.py b/backend/onyx/llm/llm_provider_options.py
index 392f16a2f02..502c5b5ebc4 100644
--- a/backend/onyx/llm/llm_provider_options.py
+++ b/backend/onyx/llm/llm_provider_options.py
@@ -3,6 +3,8 @@
import litellm # type: ignore
from pydantic import BaseModel
+from onyx.llm.chat_llm import VERTEX_CREDENTIALS_FILE_KWARG
+from onyx.llm.chat_llm import VERTEX_LOCATION_KWARG
from onyx.llm.utils import model_supports_image_input
from onyx.server.manage.llm.models import ModelConfigurationView
@@ -24,6 +26,7 @@ class CustomConfigKey(BaseModel):
is_required: bool = True
is_secret: bool = False
key_type: CustomConfigKeyType = CustomConfigKeyType.TEXT_INPUT
+ default_value: str | None = None
class WellKnownLLMProviderDescriptor(BaseModel):
@@ -154,9 +157,6 @@ class WellKnownLLMProviderDescriptor(BaseModel):
}
-CREDENTIALS_FILE_CUSTOM_CONFIG_KEY = "CREDENTIALS_FILE"
-
-
def fetch_available_well_known_llms() -> list[WellKnownLLMProviderDescriptor]:
return [
WellKnownLLMProviderDescriptor(
@@ -240,13 +240,23 @@ def fetch_available_well_known_llms() -> list[WellKnownLLMProviderDescriptor]:
),
custom_config_keys=[
CustomConfigKey(
- name=CREDENTIALS_FILE_CUSTOM_CONFIG_KEY,
+ name=VERTEX_CREDENTIALS_FILE_KWARG,
display_name="Credentials File",
description="This should be a JSON file containing some private credentials.",
is_required=True,
is_secret=False,
key_type=CustomConfigKeyType.FILE_INPUT,
),
+ CustomConfigKey(
+ name=VERTEX_LOCATION_KWARG,
+ display_name="Location",
+ description="The location of the Vertex AI model. Please refer to the "
+ "[Vertex AI configuration docs](https://docs.onyx.app/gen_ai_configs/vertex_ai) for all possible values.",
+ is_required=False,
+ is_secret=False,
+ key_type=CustomConfigKeyType.TEXT_INPUT,
+ default_value="us-east1",
+ ),
],
default_model=VERTEXAI_DEFAULT_MODEL,
default_fast_model=VERTEXAI_DEFAULT_MODEL,
diff --git a/web/src/app/admin/configuration/llm/LLMProviderUpdateForm.tsx b/web/src/app/admin/configuration/llm/LLMProviderUpdateForm.tsx
index 94e9c37ccad..3aa4b3fcc69 100644
--- a/web/src/app/admin/configuration/llm/LLMProviderUpdateForm.tsx
+++ b/web/src/app/admin/configuration/llm/LLMProviderUpdateForm.tsx
@@ -1,3 +1,4 @@
+import ReactMarkdown from "react-markdown";
import { LoadingAnimation } from "@/components/Loading";
import { AdvancedOptionsToggle } from "@/components/AdvancedOptionsToggle";
import Text from "@/components/ui/text";
@@ -16,7 +17,6 @@ import { useState } from "react";
import { useSWRConfig } from "swr";
import {
LLMProviderView,
- ModelConfiguration,
ModelConfigurationUpsertRequest,
WellKnownLLMProviderDescriptor,
} from "./interfaces";
@@ -134,6 +134,14 @@ export function LLMProviderUpdateForm({
selected_model_names: Yup.array().of(Yup.string()),
});
+ const customLinkRenderer = ({ href, children }: any) => {
+ return (
+
+ {children}
+
+ );
+ };
+
return (
+ {customConfigKey.description}
+
}
- subtext={customConfigKey.description || undefined}
+ placeholder={customConfigKey.default_value || undefined}
/>
);
diff --git a/web/src/app/admin/configuration/llm/interfaces.ts b/web/src/app/admin/configuration/llm/interfaces.ts
index a4e5aa7a117..a19d0c8ce6c 100644
--- a/web/src/app/admin/configuration/llm/interfaces.ts
+++ b/web/src/app/admin/configuration/llm/interfaces.ts
@@ -4,9 +4,12 @@ export interface CustomConfigKey {
description: string | null;
is_required: boolean;
is_secret: boolean;
- key_type: "text_input" | "file_input";
+ key_type: CustomConfigKeyType;
+ default_value?: string;
}
+export type CustomConfigKeyType = "text_input" | "file_input";
+
export interface ModelConfigurationUpsertRequest {
name: string;
is_visible: boolean;