Skip to content

Commit edda3a1

Browse files
committed
docs: Add documentation and comments to LLMFilterCompressionRetriever class
feat: add ChatLitellm component and LitellmApi credential added: new component to connect to litellm api
1 parent ddba891 commit edda3a1

File tree

4 files changed

+169
-1
lines changed

4 files changed

+169
-1
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import { INodeParams, INodeCredential } from '../src/Interface'
2+
3+
class LitellmApi implements INodeCredential {
4+
label: string
5+
name: string
6+
version: number
7+
inputs: INodeParams[]
8+
9+
constructor() {
10+
this.label = 'Litellm API'
11+
this.name = 'litellmApi'
12+
this.version = 1.0
13+
this.inputs = [
14+
{
15+
label: 'API Key',
16+
name: 'litellmApiKey',
17+
type: 'password'
18+
}
19+
]
20+
}
21+
}
22+
23+
module.exports = { credClass: LitellmApi }
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
import { OpenAIChatInput, ChatOpenAI } from '@langchain/openai'
2+
import { BaseCache } from '@langchain/core/caches'
3+
import { BaseLLMParams } from '@langchain/core/language_models/llms'
4+
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
5+
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
6+
7+
class ChatLitellm_ChatModels implements INode {
8+
label: string
9+
name: string
10+
version: number
11+
type: string
12+
icon: string
13+
category: string
14+
description: string
15+
baseClasses: string[]
16+
credential: INodeParams
17+
inputs: INodeParams[]
18+
19+
constructor() {
20+
this.label = 'ChatLitellm'
21+
this.name = 'chatLitellm'
22+
this.version = 1.0
23+
this.type = 'ChatLitellm'
24+
this.icon = 'litellm.jpg'
25+
this.category = 'Chat Models'
26+
this.description = 'Connect to a Litellm server using OpenAI-compatible API'
27+
this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(ChatOpenAI)]
28+
this.credential = {
29+
label: 'Connect Credential',
30+
name: 'credential',
31+
type: 'credential',
32+
credentialNames: ['litellmApi'],
33+
optional: true
34+
}
35+
this.inputs = [
36+
{
37+
label: 'Cache',
38+
name: 'cache',
39+
type: 'BaseCache',
40+
optional: true
41+
},
42+
{
43+
label: 'Base URL',
44+
name: 'basePath',
45+
type: 'string',
46+
placeholder: 'http://localhost:8000'
47+
},
48+
{
49+
label: 'Model Name',
50+
name: 'modelName',
51+
type: 'string',
52+
placeholder: 'model_name'
53+
},
54+
{
55+
label: 'Temperature',
56+
name: 'temperature',
57+
type: 'number',
58+
step: 0.1,
59+
default: 0.9,
60+
optional: true
61+
},
62+
{
63+
label: 'Streaming',
64+
name: 'streaming',
65+
type: 'boolean',
66+
default: true,
67+
optional: true,
68+
additionalParams: true
69+
},
70+
{
71+
label: 'Max Tokens',
72+
name: 'maxTokens',
73+
type: 'number',
74+
step: 1,
75+
optional: true,
76+
additionalParams: true
77+
},
78+
{
79+
label: 'Top P',
80+
name: 'topP',
81+
type: 'number',
82+
step: 0.1,
83+
optional: true,
84+
additionalParams: true
85+
},
86+
{
87+
label: 'Timeout',
88+
name: 'timeout',
89+
type: 'number',
90+
step: 1,
91+
optional: true,
92+
additionalParams: true
93+
}
94+
]
95+
}
96+
97+
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
98+
const cache = nodeData.inputs?.cache as BaseCache
99+
const basePath = nodeData.inputs?.basePath as string
100+
const modelName = nodeData.inputs?.modelName as string
101+
const temperature = nodeData.inputs?.temperature as string
102+
const streaming = nodeData.inputs?.streaming as boolean
103+
const maxTokens = nodeData.inputs?.maxTokens as string
104+
const topP = nodeData.inputs?.topP as string
105+
const timeout = nodeData.inputs?.timeout as string
106+
107+
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
108+
const apiKey = getCredentialParam('litellmApiKey', credentialData, nodeData)
109+
110+
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
111+
temperature: parseFloat(temperature),
112+
modelName,
113+
streaming: streaming ?? true
114+
}
115+
116+
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
117+
if (topP) obj.topP = parseFloat(topP)
118+
if (timeout) obj.timeout = parseInt(timeout, 10)
119+
if (cache) obj.cache = cache
120+
if (apiKey) obj.openAIApiKey = apiKey
121+
122+
const model = new ChatOpenAI(obj, { basePath })
123+
124+
return model
125+
}
126+
}
127+
128+
module.exports = { nodeClass: ChatLitellm_ChatModels }
Loading

packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts

+18-1
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,17 @@
1+
/**
2+
* Import necessary modules and interfaces for the retriever functionality.
3+
*/
14
import { BaseRetriever } from '@langchain/core/retrievers'
25
import { BaseLanguageModel } from '@langchain/core/language_models/base'
36
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
47
import { LLMChainExtractor } from 'langchain/retrievers/document_compressors/chain_extract'
58
import { handleEscapeCharacters } from '../../../src/utils'
69
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
710

11+
/**
12+
* Class representing a retriever that filters and compresses documents
13+
* using a language model and a vector store retriever.
14+
*/
815
class LLMFilterCompressionRetriever_Retrievers implements INode {
916
label: string
1017
name: string
@@ -18,6 +25,9 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
1825
outputs: INodeOutputsValue[]
1926
badge: string
2027

28+
/**
29+
* Initializes a new instance of the LLMFilterCompressionRetriever_Retrievers class.
30+
*/
2131
constructor() {
2232
this.label = 'LLM Filter Retriever'
2333
this.name = 'llmFilterRetriever'
@@ -69,19 +79,26 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
6979
]
7080
}
7181

82+
/**
83+
* Initializes the retriever with the provided node data and input string.
84+
* @param nodeData - The data for the node, including inputs and outputs.
85+
* @param input - The input string for the retriever.
86+
* @returns The retriever or processed documents based on the output type.
87+
*/
7288
async init(nodeData: INodeData, input: string): Promise<any> {
7389
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
7490
const model = nodeData.inputs?.model as BaseLanguageModel
7591
const query = nodeData.inputs?.query as string
7692
const output = nodeData.outputs?.output as string
7793

78-
if (!model) throw new Error('There must be a LLM model connected to LLM Filter Retriever')
94+
if (!model) throw new Error('There must be a LLM model connected to LLM Filter Retriever') // Ensure a model is provided
7995

8096
const retriever = new ContextualCompressionRetriever({
8197
baseCompressor: LLMChainExtractor.fromLLM(model),
8298
baseRetriever: baseRetriever
8399
})
84100

101+
// Return the appropriate output based on the specified output type
85102
if (output === 'retriever') return retriever
86103
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
87104
else if (output === 'text') {

0 commit comments

Comments
 (0)