Skip to content

Commit dd0b4ec

Browse files
Improve ai error handling (#4180)
* Introduce a result type * Update AI error handling to use the result type * Handle ollama json parse error * Migrate using Error as the type that represents errors * Remove now useless condition * asdfasdf * Use andThen * Correct unit tests
1 parent 518cc8b commit dd0b4ec

File tree

11 files changed

+330
-136
lines changed

11 files changed

+330
-136
lines changed

app/src/lib/ai/anthropicClient.ts

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,12 @@
11
import { SHORT_DEFAULT_COMMIT_TEMPLATE, SHORT_DEFAULT_BRANCH_TEMPLATE } from '$lib/ai/prompts';
2+
import { type AIClient, type AnthropicModelName, type Prompt } from '$lib/ai/types';
3+
import { buildFailureFromAny, ok, type Result } from '$lib/result';
24
import { fetch, Body } from '@tauri-apps/api/http';
3-
import type { AIClient, AnthropicModelName, Prompt } from '$lib/ai/types';
45

5-
type AnthropicAPIResponse = { content: { text: string }[] };
6+
type AnthropicAPIResponse = {
7+
content: { text: string }[];
8+
error: { type: string; message: string };
9+
};
610

711
export class AnthropicAIClient implements AIClient {
812
defaultCommitTemplate = SHORT_DEFAULT_COMMIT_TEMPLATE;
@@ -13,7 +17,7 @@ export class AnthropicAIClient implements AIClient {
1317
private modelName: AnthropicModelName
1418
) {}
1519

16-
async evaluate(prompt: Prompt) {
20+
async evaluate(prompt: Prompt): Promise<Result<string, Error>> {
1721
const body = Body.json({
1822
messages: prompt,
1923
max_tokens: 1024,
@@ -30,6 +34,12 @@ export class AnthropicAIClient implements AIClient {
3034
body
3135
});
3236

33-
return response.data.content[0].text;
37+
if (response.ok && response.data?.content?.[0]?.text) {
38+
return ok(response.data.content[0].text);
39+
} else {
40+
return buildFailureFromAny(
41+
`Anthropic returned error code ${response.status} ${response.data?.error?.message}`
42+
);
43+
}
3444
}
3545
}

app/src/lib/ai/butlerClient.ts

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import { SHORT_DEFAULT_BRANCH_TEMPLATE, SHORT_DEFAULT_COMMIT_TEMPLATE } from '$lib/ai/prompts';
2+
import { map, type Result } from '$lib/result';
23
import type { AIClient, ModelKind, Prompt } from '$lib/ai/types';
34
import type { HttpClient } from '$lib/backend/httpClient';
45

@@ -12,16 +13,19 @@ export class ButlerAIClient implements AIClient {
1213
private modelKind: ModelKind
1314
) {}
1415

15-
async evaluate(prompt: Prompt) {
16-
const response = await this.cloud.post<{ message: string }>('evaluate_prompt/predict.json', {
17-
body: {
18-
messages: prompt,
19-
max_tokens: 400,
20-
model_kind: this.modelKind
21-
},
22-
token: this.userToken
23-
});
16+
async evaluate(prompt: Prompt): Promise<Result<string, Error>> {
17+
const response = await this.cloud.postSafe<{ message: string }>(
18+
'evaluate_prompt/predict.json',
19+
{
20+
body: {
21+
messages: prompt,
22+
max_tokens: 400,
23+
model_kind: this.modelKind
24+
},
25+
token: this.userToken
26+
}
27+
);
2428

25-
return response.message;
29+
return map(response, ({ message }) => message);
2630
}
2731
}

app/src/lib/ai/ollamaClient.ts

Lines changed: 33 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { LONG_DEFAULT_BRANCH_TEMPLATE, LONG_DEFAULT_COMMIT_TEMPLATE } from '$lib/ai/prompts';
22
import { MessageRole, type PromptMessage, type AIClient, type Prompt } from '$lib/ai/types';
3+
import { andThen, buildFailureFromAny, ok, wrap, wrapAsync, type Result } from '$lib/result';
34
import { isNonEmptyObject } from '$lib/utils/typeguards';
45
import { fetch, Body, Response } from '@tauri-apps/api/http';
56

@@ -81,15 +82,22 @@ export class OllamaClient implements AIClient {
8182
private modelName: string
8283
) {}
8384

84-
async evaluate(prompt: Prompt) {
85+
async evaluate(prompt: Prompt): Promise<Result<string, Error>> {
8586
const messages = this.formatPrompt(prompt);
86-
const response = await this.chat(messages);
87-
const rawResponse = JSON.parse(response.message.content);
88-
if (!isOllamaChatMessageFormat(rawResponse)) {
89-
throw new Error('Invalid response: ' + response.message.content);
90-
}
9187

92-
return rawResponse.result;
88+
const responseResult = await this.chat(messages);
89+
90+
return andThen(responseResult, (response) => {
91+
const rawResponseResult = wrap<unknown, Error>(() => JSON.parse(response.message.content));
92+
93+
return andThen(rawResponseResult, (rawResponse) => {
94+
if (!isOllamaChatMessageFormat(rawResponse)) {
95+
return buildFailureFromAny('Invalid response: ' + response.message.content);
96+
}
97+
98+
return ok(rawResponse.result);
99+
});
100+
});
93101
}
94102

95103
/**
@@ -124,31 +132,32 @@ ${JSON.stringify(OLLAMA_CHAT_MESSAGE_FORMAT_SCHEMA, null, 2)}`
124132
* @param request - The OllamaChatRequest object containing the request details.
125133
* @returns A Promise that resolves to the Response object.
126134
*/
127-
private async fetchChat(request: OllamaChatRequest): Promise<Response<any>> {
135+
private async fetchChat(request: OllamaChatRequest): Promise<Result<Response<any>, Error>> {
128136
const url = new URL(OllamaAPEndpoint.Chat, this.endpoint);
129137
const body = Body.json(request);
130-
const result = await fetch(url.toString(), {
131-
method: 'POST',
132-
headers: {
133-
'Content-Type': 'application/json'
134-
},
135-
body
136-
});
137-
return result;
138+
return await wrapAsync(
139+
async () =>
140+
await fetch(url.toString(), {
141+
method: 'POST',
142+
headers: {
143+
'Content-Type': 'application/json'
144+
},
145+
body
146+
})
147+
);
138148
}
139149

140150
/**
141151
* Sends a chat message to the LLM model and returns the response.
142152
*
143153
* @param messages - An array of LLMChatMessage objects representing the chat messages.
144154
* @param options - Optional LLMRequestOptions object for specifying additional options.
145-
* @throws Error if the response is invalid.
146155
* @returns A Promise that resolves to an LLMResponse object representing the response from the LLM model.
147156
*/
148157
private async chat(
149158
messages: Prompt,
150159
options?: OllamaRequestOptions
151-
): Promise<OllamaChatResponse> {
160+
): Promise<Result<OllamaChatResponse, Error>> {
152161
const result = await this.fetchChat({
153162
model: this.modelName,
154163
stream: false,
@@ -157,10 +166,12 @@ ${JSON.stringify(OLLAMA_CHAT_MESSAGE_FORMAT_SCHEMA, null, 2)}`
157166
format: 'json'
158167
});
159168

160-
if (!isOllamaChatResponse(result.data)) {
161-
throw new Error('Invalid response\n' + JSON.stringify(result.data));
162-
}
169+
return andThen(result, (result) => {
170+
if (!isOllamaChatResponse(result.data)) {
171+
return buildFailureFromAny('Invalid response\n' + JSON.stringify(result.data));
172+
}
163173

164-
return result.data;
174+
return ok(result.data);
175+
});
165176
}
166177
}

app/src/lib/ai/openAIClient.ts

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import { SHORT_DEFAULT_BRANCH_TEMPLATE, SHORT_DEFAULT_COMMIT_TEMPLATE } from '$lib/ai/prompts';
2+
import { andThen, buildFailureFromAny, ok, wrapAsync, type Result } from '$lib/result';
23
import type { OpenAIModelName, Prompt, AIClient } from '$lib/ai/types';
34
import type OpenAI from 'openai';
5+
import type { ChatCompletion } from 'openai/resources/index.mjs';
46

57
export class OpenAIClient implements AIClient {
68
defaultCommitTemplate = SHORT_DEFAULT_COMMIT_TEMPLATE;
@@ -11,13 +13,21 @@ export class OpenAIClient implements AIClient {
1113
private openAI: OpenAI
1214
) {}
1315

14-
async evaluate(prompt: Prompt) {
15-
const response = await this.openAI.chat.completions.create({
16-
messages: prompt,
17-
model: this.modelName,
18-
max_tokens: 400
16+
async evaluate(prompt: Prompt): Promise<Result<string, Error>> {
17+
const responseResult = await wrapAsync<ChatCompletion, Error>(async () => {
18+
return await this.openAI.chat.completions.create({
19+
messages: prompt,
20+
model: this.modelName,
21+
max_tokens: 400
22+
});
1923
});
2024

21-
return response.choices[0].message.content || '';
25+
return andThen(responseResult, (response) => {
26+
if (response.choices[0]?.message.content) {
27+
return ok(response.choices[0]?.message.content);
28+
} else {
29+
return buildFailureFromAny('Open AI generated an empty message');
30+
}
31+
});
2232
}
2333
}

0 commit comments

Comments
 (0)