Skip to content

Commit bd93e15

Browse files
committed
fix: oops forgot to remove comment
1 parent fd25bd8 commit bd93e15

File tree

1 file changed

+0
-126
lines changed

1 file changed

+0
-126
lines changed

backend/tests/regression/search_quality/run_search_eval.py

Lines changed: 0 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -159,129 +159,3 @@ def run_search_eval() -> None:
159159
raise e
160160
finally:
161161
SqlEngine.reset_engine()
162-
163-
164-
# def run_search_eval() -> None:
165-
# if MULTI_TENANT:
166-
# raise ValueError("Multi-tenant is not supported currently")
167-
168-
# SqlEngine.init_engine(
169-
# pool_size=POSTGRES_API_SERVER_POOL_SIZE,
170-
# max_overflow=POSTGRES_API_SERVER_POOL_OVERFLOW,
171-
# )
172-
173-
# query_pairs = _load_query_pairs()
174-
# search_parameters = _load_search_parameters()
175-
176-
# with get_session_with_current_tenant() as db_session:
177-
# multilingual_expansion = get_multilingual_expansion(db_session)
178-
# search_settings = get_current_search_settings(db_session)
179-
# document_index = get_default_document_index(search_settings, None)
180-
# rerank_settings = RerankingDetails.from_db_model(search_settings)
181-
182-
# if search_parameters.skip_rerank:
183-
# logger.warning("Reranking is disabled, evaluation will not run")
184-
# elif rerank_settings.rerank_model_name is None:
185-
# raise ValueError(
186-
# "Reranking is enabled but no reranker is configured. "
187-
# "Please set the reranker in the admin panel search settings."
188-
# )
189-
190-
# export_path = Path(search_parameters.export_folder)
191-
# search_result_file = export_path / "search_results.csv"
192-
# eval_result_file = export_path / "eval_results.csv"
193-
# with (
194-
# search_result_file.open("w") as search_file,
195-
# eval_result_file.open("w") as eval_file,
196-
# ):
197-
# search_csv_writer = csv.writer(search_file)
198-
# eval_csv_writer = csv.writer(eval_file)
199-
# search_csv_writer.writerow(
200-
# ["source", "query", "rank", "score", "doc_id", "chunk_id"]
201-
# )
202-
# eval_csv_writer.writerow(
203-
# [
204-
# "query",
205-
# "jaccard_similarity",
206-
# "missing_chunks_ratio",
207-
# "average_rank_change",
208-
# "jaccard_similarity_adj",
209-
# "missing_chunks_ratio_adj",
210-
# "average_rank_change_adj",
211-
# ]
212-
# )
213-
214-
# sum_metrics = [0.0] * 6
215-
# for orig_query, alt_query in query_pairs:
216-
# search_results = _search_one_query(
217-
# alt_query,
218-
# multilingual_expansion,
219-
# document_index,
220-
# db_session,
221-
# search_parameters,
222-
# )
223-
# for rank, result in enumerate(search_results):
224-
# search_csv_writer.writerow(
225-
# [
226-
# "search",
227-
# alt_query,
228-
# rank,
229-
# result.score,
230-
# result.document_id,
231-
# result.chunk_id,
232-
# ]
233-
# )
234-
235-
# if not search_parameters.skip_rerank:
236-
# rerank_results = _rerank_one_query(
237-
# orig_query, search_results, rerank_settings, search_parameters
238-
# )
239-
# for rank, result in enumerate(rerank_results):
240-
# search_csv_writer.writerow(
241-
# [
242-
# "rerank",
243-
# orig_query,
244-
# rank,
245-
# result.score,
246-
# result.document_id,
247-
# result.chunk_id,
248-
# ]
249-
# )
250-
251-
# metrics = _evaluate_one_query(
252-
# search_results, rerank_results, search_parameters
253-
# )
254-
# eval_csv_writer.writerow([orig_query, *metrics])
255-
# sum_metrics = [
256-
# sum_metric + metric
257-
# for sum_metric, metric in zip(sum_metrics, metrics)
258-
# ]
259-
260-
# logger.info(
261-
# f"Exported individual results to {search_result_file} and {eval_result_file}"
262-
# )
263-
264-
# if not search_parameters.skip_rerank:
265-
# average_metrics = [metric / len(query_pairs) for metric in sum_metrics]
266-
# logger.info(f"Jaccard similarity: {average_metrics[0]}")
267-
# logger.info(f"Average missing chunks ratio: {average_metrics[1]}")
268-
# logger.info(f"Average rank change: {average_metrics[2]}")
269-
# logger.info(f"Jaccard similarity (adjusted): {average_metrics[3]}")
270-
# logger.info(f"Average missing chunks ratio (adjusted): {average_metrics[4]}")
271-
# logger.info(f"Average rank change (adjusted): {average_metrics[5]}")
272-
273-
# aggregate_file = export_path / "aggregate_results.csv"
274-
# with aggregate_file.open("w") as file:
275-
# aggregate_csv_writer = csv.writer(file)
276-
# aggregate_csv_writer.writerow(
277-
# [
278-
# "jaccard_similarity",
279-
# "missing_chunks_ratio",
280-
# "average_rank_change",
281-
# "jaccard_similarity_adj",
282-
# "missing_chunks_ratio_adj",
283-
# "average_rank_change_adj",
284-
# ]
285-
# )
286-
# aggregate_csv_writer.writerow(average_metrics)
287-
# logger.info(f"Exported aggregate results to {aggregate_file}")

0 commit comments

Comments
 (0)