|
| 1 | +# |
| 2 | +# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP) |
| 3 | +# (C) Cloudera, Inc. 2025 |
| 4 | +# All rights reserved. |
| 5 | +# |
| 6 | +# Applicable Open Source License: Apache 2.0 |
| 7 | +# |
| 8 | +# NOTE: Cloudera open source products are modular software products |
| 9 | +# made up of hundreds of individual components, each of which was |
| 10 | +# individually copyrighted. Each Cloudera open source product is a |
| 11 | +# collective work under U.S. Copyright Law. Your license to use the |
| 12 | +# collective work is as provided in your written agreement with |
| 13 | +# Cloudera. Used apart from the collective work, this file is |
| 14 | +# licensed for your use pursuant to the open source license |
| 15 | +# identified above. |
| 16 | +# |
| 17 | +# This code is provided to you pursuant a written agreement with |
| 18 | +# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute |
| 19 | +# this code. If you do not have a written agreement with Cloudera nor |
| 20 | +# with an authorized and properly licensed third party, you do not |
| 21 | +# have any rights to access nor to use this code. |
| 22 | +# |
| 23 | +# Absent a written agreement with Cloudera, Inc. ("Cloudera") to the |
| 24 | +# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY |
| 25 | +# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED |
| 26 | +# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO |
| 27 | +# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND |
| 28 | +# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU, |
| 29 | +# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS |
| 30 | +# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE |
| 31 | +# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY |
| 32 | +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR |
| 33 | +# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES |
| 34 | +# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF |
| 35 | +# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF |
| 36 | +# DATA. |
| 37 | +# |
| 38 | + |
| 39 | +import time |
| 40 | +import uuid |
| 41 | +from typing import Optional |
| 42 | + |
| 43 | +from fastapi import HTTPException |
| 44 | + |
| 45 | +from app.services import evaluators, llm_completion |
| 46 | +from app.services.chat.utils import retrieve_chat_history, format_source_nodes |
| 47 | +from app.services.chat_history.chat_history_manager import ( |
| 48 | + Evaluation, |
| 49 | + RagMessage, |
| 50 | + RagStudioChatMessage, |
| 51 | + chat_history_manager, |
| 52 | +) |
| 53 | +from app.services.metadata_apis.session_metadata_api import Session |
| 54 | +from app.services.mlflow import record_rag_mlflow_run, record_direct_llm_mlflow_run |
| 55 | +from app.services.query import querier |
| 56 | +from app.services.query.query_configuration import QueryConfiguration |
| 57 | +from app.ai.vector_stores.vector_store_factory import VectorStoreFactory |
| 58 | +from app.rag_types import RagPredictConfiguration |
| 59 | + |
| 60 | + |
| 61 | +def chat( |
| 62 | + session: Session, |
| 63 | + query: str, |
| 64 | + configuration: RagPredictConfiguration, |
| 65 | + user_name: Optional[str], |
| 66 | +) -> RagStudioChatMessage: |
| 67 | + query_configuration = QueryConfiguration( |
| 68 | + top_k=session.response_chunks, |
| 69 | + model_name=session.inference_model, |
| 70 | + rerank_model_name=session.rerank_model, |
| 71 | + exclude_knowledge_base=configuration.exclude_knowledge_base, |
| 72 | + use_question_condensing=configuration.use_question_condensing, |
| 73 | + use_hyde=session.query_configuration.enable_hyde, |
| 74 | + use_summary_filter=session.query_configuration.enable_summary_filter, |
| 75 | + ) |
| 76 | + |
| 77 | + response_id = str(uuid.uuid4()) |
| 78 | + |
| 79 | + if configuration.exclude_knowledge_base or len(session.data_source_ids) == 0: |
| 80 | + return direct_llm_chat(session, response_id, query, user_name) |
| 81 | + |
| 82 | + total_data_sources_size: int = sum( |
| 83 | + map( |
| 84 | + lambda ds_id: VectorStoreFactory.for_chunks(ds_id).size() or 0, |
| 85 | + session.data_source_ids, |
| 86 | + ) |
| 87 | + ) |
| 88 | + if total_data_sources_size == 0: |
| 89 | + return direct_llm_chat(session, response_id, query, user_name) |
| 90 | + |
| 91 | + new_chat_message: RagStudioChatMessage = _run_chat( |
| 92 | + session, response_id, query, query_configuration, user_name |
| 93 | + ) |
| 94 | + |
| 95 | + chat_history_manager.append_to_history(session.id, [new_chat_message]) |
| 96 | + return new_chat_message |
| 97 | + |
| 98 | + |
| 99 | +def _run_chat( |
| 100 | + session: Session, |
| 101 | + response_id: str, |
| 102 | + query: str, |
| 103 | + query_configuration: QueryConfiguration, |
| 104 | + user_name: Optional[str], |
| 105 | +) -> RagStudioChatMessage: |
| 106 | + if len(session.data_source_ids) != 1: |
| 107 | + raise HTTPException( |
| 108 | + status_code=400, detail="Only one datasource is supported for chat." |
| 109 | + ) |
| 110 | + |
| 111 | + data_source_id: int = session.data_source_ids[0] |
| 112 | + response, condensed_question = querier.query( |
| 113 | + data_source_id, |
| 114 | + query, |
| 115 | + query_configuration, |
| 116 | + retrieve_chat_history(session.id), |
| 117 | + ) |
| 118 | + if condensed_question and (condensed_question.strip() == query.strip()): |
| 119 | + condensed_question = None |
| 120 | + relevance, faithfulness = evaluators.evaluate_response( |
| 121 | + query, response, session.inference_model |
| 122 | + ) |
| 123 | + response_source_nodes = format_source_nodes(response, data_source_id) |
| 124 | + new_chat_message = RagStudioChatMessage( |
| 125 | + id=response_id, |
| 126 | + session_id=session.id, |
| 127 | + source_nodes=response_source_nodes, |
| 128 | + inference_model=session.inference_model, |
| 129 | + rag_message=RagMessage( |
| 130 | + user=query, |
| 131 | + assistant=response.response, |
| 132 | + ), |
| 133 | + evaluations=[ |
| 134 | + Evaluation(name="relevance", value=relevance), |
| 135 | + Evaluation(name="faithfulness", value=faithfulness), |
| 136 | + ], |
| 137 | + timestamp=time.time(), |
| 138 | + condensed_question=condensed_question, |
| 139 | + ) |
| 140 | + |
| 141 | + record_rag_mlflow_run( |
| 142 | + new_chat_message, query_configuration, response_id, session, user_name |
| 143 | + ) |
| 144 | + return new_chat_message |
| 145 | + |
| 146 | + |
| 147 | +def direct_llm_chat( |
| 148 | + session: Session, response_id: str, query: str, user_name: Optional[str] |
| 149 | +) -> RagStudioChatMessage: |
| 150 | + record_direct_llm_mlflow_run(response_id, session, user_name) |
| 151 | + |
| 152 | + chat_response = llm_completion.completion( |
| 153 | + session.id, query, session.inference_model |
| 154 | + ) |
| 155 | + new_chat_message = RagStudioChatMessage( |
| 156 | + id=response_id, |
| 157 | + session_id=session.id, |
| 158 | + source_nodes=[], |
| 159 | + inference_model=session.inference_model, |
| 160 | + evaluations=[], |
| 161 | + rag_message=RagMessage( |
| 162 | + user=query, |
| 163 | + assistant=str(chat_response.message.content), |
| 164 | + ), |
| 165 | + timestamp=time.time(), |
| 166 | + condensed_question=None, |
| 167 | + ) |
| 168 | + chat_history_manager.append_to_history(session.id, [new_chat_message]) |
| 169 | + return new_chat_message |
| 170 | + |
| 171 | + |
0 commit comments