1+ /**
2+ * Import necessary modules and interfaces for the retriever functionality.
3+ */
14import { BaseRetriever } from '@langchain/core/retrievers'
25import { BaseLanguageModel } from '@langchain/core/language_models/base'
36import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
47import { LLMChainExtractor } from 'langchain/retrievers/document_compressors/chain_extract'
58import { handleEscapeCharacters } from '../../../src/utils'
69import { INode , INodeData , INodeOutputsValue , INodeParams } from '../../../src/Interface'
710
11+ /**
12+ * Class representing a retriever that filters and compresses documents
13+ * using a language model and a vector store retriever.
14+ */
815class LLMFilterCompressionRetriever_Retrievers implements INode {
916 label : string
1017 name : string
@@ -18,6 +25,9 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
1825 outputs : INodeOutputsValue [ ]
1926 badge : string
2027
28+ /**
29+ * Initializes a new instance of the LLMFilterCompressionRetriever_Retrievers class.
30+ */
2131 constructor ( ) {
2232 this . label = 'LLM Filter Retriever'
2333 this . name = 'llmFilterRetriever'
@@ -69,19 +79,26 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
6979 ]
7080 }
7181
82+ /**
83+ * Initializes the retriever with the provided node data and input string.
84+ * @param nodeData - The data for the node, including inputs and outputs.
85+ * @param input - The input string for the retriever.
86+ * @returns The retriever or processed documents based on the output type.
87+ */
7288 async init ( nodeData : INodeData , input : string ) : Promise < any > {
7389 const baseRetriever = nodeData . inputs ?. baseRetriever as BaseRetriever
7490 const model = nodeData . inputs ?. model as BaseLanguageModel
7591 const query = nodeData . inputs ?. query as string
7692 const output = nodeData . outputs ?. output as string
7793
78- if ( ! model ) throw new Error ( 'There must be a LLM model connected to LLM Filter Retriever' )
94+ if ( ! model ) throw new Error ( 'There must be a LLM model connected to LLM Filter Retriever' ) // Ensure a model is provided
7995
8096 const retriever = new ContextualCompressionRetriever ( {
8197 baseCompressor : LLMChainExtractor . fromLLM ( model ) ,
8298 baseRetriever : baseRetriever
8399 } )
84100
101+ // Return the appropriate output based on the specified output type
85102 if ( output === 'retriever' ) return retriever
86103 else if ( output === 'document' ) return await retriever . getRelevantDocuments ( query ? query : input )
87104 else if ( output === 'text' ) {
0 commit comments