1
+ /**
2
+ * Import necessary modules and interfaces for the retriever functionality.
3
+ */
1
4
import { BaseRetriever } from '@langchain/core/retrievers'
2
5
import { BaseLanguageModel } from '@langchain/core/language_models/base'
3
6
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
4
7
import { LLMChainExtractor } from 'langchain/retrievers/document_compressors/chain_extract'
5
8
import { handleEscapeCharacters } from '../../../src/utils'
6
9
import { INode , INodeData , INodeOutputsValue , INodeParams } from '../../../src/Interface'
7
10
11
+ /**
12
+ * Class representing a retriever that filters and compresses documents
13
+ * using a language model and a vector store retriever.
14
+ */
8
15
class LLMFilterCompressionRetriever_Retrievers implements INode {
9
16
label : string
10
17
name : string
@@ -18,6 +25,9 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
18
25
outputs : INodeOutputsValue [ ]
19
26
badge : string
20
27
28
+ /**
29
+ * Initializes a new instance of the LLMFilterCompressionRetriever_Retrievers class.
30
+ */
21
31
constructor ( ) {
22
32
this . label = 'LLM Filter Retriever'
23
33
this . name = 'llmFilterRetriever'
@@ -69,19 +79,26 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
69
79
]
70
80
}
71
81
82
+ /**
83
+ * Initializes the retriever with the provided node data and input string.
84
+ * @param nodeData - The data for the node, including inputs and outputs.
85
+ * @param input - The input string for the retriever.
86
+ * @returns The retriever or processed documents based on the output type.
87
+ */
72
88
async init ( nodeData : INodeData , input : string ) : Promise < any > {
73
89
const baseRetriever = nodeData . inputs ?. baseRetriever as BaseRetriever
74
90
const model = nodeData . inputs ?. model as BaseLanguageModel
75
91
const query = nodeData . inputs ?. query as string
76
92
const output = nodeData . outputs ?. output as string
77
93
78
- if ( ! model ) throw new Error ( 'There must be a LLM model connected to LLM Filter Retriever' )
94
+ if ( ! model ) throw new Error ( 'There must be a LLM model connected to LLM Filter Retriever' ) // Ensure a model is provided
79
95
80
96
const retriever = new ContextualCompressionRetriever ( {
81
97
baseCompressor : LLMChainExtractor . fromLLM ( model ) ,
82
98
baseRetriever : baseRetriever
83
99
} )
84
100
101
+ // Return the appropriate output based on the specified output type
85
102
if ( output === 'retriever' ) return retriever
86
103
else if ( output === 'document' ) return await retriever . getRelevantDocuments ( query ? query : input )
87
104
else if ( output === 'text' ) {
0 commit comments