1
1
import * as messageTools from '/modules/messageTools.mjs' ;
2
2
3
3
const SUMMARY_PREFIX = "[SUMMARY]"
4
- /**
5
- * Ollama has a default context window of 2048 tokens. Tokens are approximately 4 characters each.
6
- * Any content over this limit is ignores.
7
- * Typically, the content we are interested in is the start of the email, with the trailing content
8
- * being quoted replies or signatures.
9
- * So, we must trim the email to the first 2000 characters.
10
- */
11
- const MAX_CONTENT_LENGTH = 2000 * 4
4
+ const DEFAULT_CONTEXT_WINDOW = 2048
12
5
13
6
class ResponseError extends Error {
14
7
constructor ( message , res ) {
@@ -23,6 +16,8 @@ class ResponseError extends Error {
23
16
messenger . messages . onNewMailReceived . addListener ( async ( folder , messages ) => {
24
17
console . log ( "New mail received" )
25
18
19
+ const contextLength = await getContextLength ( )
20
+
26
21
/*
27
22
We use the messageTools.iterateMessagePages function to iterate over all the messages in the folder.
28
23
*/
@@ -45,7 +40,8 @@ messenger.messages.onNewMailReceived.addListener(async (folder, messages) => {
45
40
/*
46
41
Get the text content of the email, stripping out HTML and CSS
47
42
*/
48
- const content = ( await getBody ( message ) ) . substring ( 0 , MAX_CONTENT_LENGTH )
43
+ const content = await getBody ( message )
44
+ . then ( body => body . substring ( 0 , contextLength ) )
49
45
50
46
/*
51
47
Call Ollama to generate a summary of the email. The service may be (re)starting,
@@ -75,14 +71,42 @@ messenger.messages.onNewMailReceived.addListener(async (folder, messages) => {
75
71
}
76
72
} )
77
73
78
- async function sendNewEmail ( message , summary ) {
79
- const getItem = await browser . storage . local . get ( )
74
+ async function getEmailAddress ( ) {
75
+ return await browser . storage . local . get ( )
76
+ . then ( getItem => getItem . email ?. trim ( ) || "" )
77
+ }
78
+
79
+ async function getEmailAlias ( ) {
80
+ return await browser . storage . local . get ( )
81
+ . then ( getItem => getItem . alias ?. trim ( ) || "" )
82
+ }
83
+
84
+ async function getModel ( ) {
85
+ return await browser . storage . local . get ( )
86
+ . then ( getItem => getItem . model ?. trim ( ) || "llama3.2" )
87
+ }
80
88
81
- if ( getItem . email . trim ( ) . length === 0 ) {
89
+ async function getContextLength ( ) {
90
+ return await browser . storage . local . get ( )
91
+ . then ( getItem => getItem . contextwindow ?. trim ( ) || "2048" )
92
+ . then ( contextWindow => parseInt ( contextWindow ) )
93
+ . then ( contextWindow => isNaN ( contextWindow ) || contextWindow < 0
94
+ ? DEFAULT_CONTEXT_WINDOW
95
+ : contextWindow )
96
+ /*
97
+ Context window measures the number of tokens. There are approx 4 chars per token.
98
+ To give us a buffer and allow for the prompt template we subtract 256 tokens and
99
+ multiply by 4 to get the number of characters that can be passed into the model.
100
+ */
101
+ . then ( contextWindow => ( contextWindow - 256 ) * 4 )
102
+ }
103
+
104
+ async function sendNewEmail ( message , summary ) {
105
+ if ( getEmailAddress ( ) . length === 0 ) {
82
106
return
83
107
}
84
108
85
- const emailWithAlias = getToAddress ( getItem . email , getItem . alias )
109
+ const emailWithAlias = getToAddress ( getEmailAddress ( ) , getEmailAlias ( ) )
86
110
87
111
const composeTab = await browser . compose . beginNew ( {
88
112
to : emailWithAlias ,
@@ -123,6 +147,45 @@ async function getBody(message) {
123
147
return plainTextParts . join ( "\n" )
124
148
}
125
149
150
+ function getPrompt ( content ) {
151
+ const model = getModel ( )
152
+
153
+ if ( model . startsWith ( "phi" ) ) {
154
+ return getPhiPrompt ( content )
155
+ }
156
+
157
+ return getLlamaPrompt ( content )
158
+ }
159
+
160
+ function getPhiPrompt ( content ) {
161
+ return "<|im_start|>system<|im_sep|>" +
162
+ "You are an expert in reading and summarizing emails." +
163
+ "<|im_end|>" +
164
+ "<|im_start|>system<|im_sep|>" +
165
+ "The email content is: " + content +
166
+ "<|im_end|>" +
167
+ "<|im_start|>user<|im_sep|>" +
168
+ "Provide a two paragraph summary of the email. " +
169
+ "The summary must highlight the important points, dates, people, questions, and action items." +
170
+ "<|im_end|>" +
171
+ "<|im_start|>assistant<|im_sep|>"
172
+ }
173
+
174
+ function getLlamaPrompt ( content ) {
175
+ return "<|begin_of_text|>" +
176
+ "<|start_header_id|>system<|end_header_id|>" +
177
+ "You are an expert in reading and summarizing emails." +
178
+ "<|eot_id|>" +
179
+ "<|start_header_id|>system<|end_header_id|>" +
180
+ "The email content is: " + content +
181
+ "<|eot_id|>" +
182
+ "<|start_header_id|>user<|end_header_id|>" +
183
+ "Provide a two paragraph summary of the email. " +
184
+ "The summary must highlight the important points, dates, people, questions, and action items." +
185
+ "<|eot_id|>" +
186
+ "<|start_header_id|>assistant<|end_header_id|>"
187
+ }
188
+
126
189
/**
127
190
* Call Ollama to generate a summary of the email
128
191
* @param content The plain text context of the email
@@ -135,19 +198,8 @@ function getSummary(content) {
135
198
method : "POST" ,
136
199
body : JSON . stringify (
137
200
{
138
- "model" : "llama3.2" ,
139
- "prompt" : "<|begin_of_text|>" +
140
- "<|start_header_id|>system<|end_header_id|>" +
141
- "You are an expert in reading and summarizing emails." +
142
- "<|eot_id|>" +
143
- "<|start_header_id|>system<|end_header_id|>" +
144
- "The email content is: " + content +
145
- "<|eot_id|>" +
146
- "<|start_header_id|>user<|end_header_id|>" +
147
- "Provide a two paragraph summary of the email. " +
148
- "The summary must highlight the important points, dates, people, questions, and action items." +
149
- "<|eot_id|>" +
150
- "<|start_header_id|>assistant<|end_header_id|>" ,
201
+ "model" : getModel ( ) ,
202
+ "prompt" : getPrompt ( content ) ,
151
203
"stream" : false
152
204
}
153
205
) ,
0 commit comments