Skip to content

Commit 9ce475d

Browse files
Fix pending high-level ui blocking calls #44
1 parent 008ae9a commit 9ce475d

File tree

1 file changed

+21
-19
lines changed

1 file changed

+21
-19
lines changed

ui/app.py

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,8 @@ async def chatbot(q: Q):
308308
n_cols = len(_response_df.columns)
309309
llm_response = f"The selected dataset has total number of {n_cols} columns.\nBelow is quick preview:\n{df_markdown}"
310310
elif q.args.chatbot and (q.args.chatbot.lower() == "recommend questions" or q.args.chatbot.lower() == "recommend qs"):
311-
llm_response = recommend_suggestions(cache_path=q.client.table_info_path, table_name=q.client.table_name)
311+
with concurrent.futures.ThreadPoolExecutor() as pool:
312+
llm_response = await q.exec(pool, recommend_suggestions, cache_path=q.client.table_info_path, table_name=q.client.table_name)
312313
if not llm_response:
313314
llm_response = "Something went wrong, check the API Keys provided."
314315
logging.info(f"Recommended Questions:\n{llm_response}")
@@ -328,15 +329,15 @@ async def chatbot(q: Q):
328329
# Attempts to regenerate response on the last supplied query
329330
logging.info(f"Attempt for regeneration")
330331
if q.client.query is not None and q.client.query.strip() != "":
331-
llm_response, alt_response, err = ask(
332-
question=q.client.query,
333-
sample_queries_path=q.client.sample_qna_path,
334-
table_info_path=q.client.table_info_path,
335-
table_name=q.client.table_name,
336-
model_name=q.client.model_choice_dropdown,
337-
is_regenerate=True,
338-
is_regen_with_options=False
339-
)
332+
with concurrent.futures.ThreadPoolExecutor() as pool:
333+
llm_response, alt_response, err = await q.exec(pool, ask, question=q.client.query,
334+
sample_queries_path=q.client.sample_qna_path,
335+
table_info_path=q.client.table_info_path,
336+
table_name=q.client.table_name,
337+
model_name=q.client.model_choice_dropdown,
338+
is_regenerate=True,
339+
is_regen_with_options=False
340+
)
340341
llm_response = "\n".join(llm_response)
341342
else:
342343
llm_response = (
@@ -347,15 +348,16 @@ async def chatbot(q: Q):
347348
# Attempts to regenerate response on the last supplied query
348349
logging.info(f"Attempt for regeneration with options.")
349350
if q.client.query is not None and q.client.query.strip() != "":
350-
llm_response, alt_response, err = ask(
351-
question=q.client.query,
352-
sample_queries_path=q.client.sample_qna_path,
353-
table_info_path=q.client.table_info_path,
354-
table_name=q.client.table_name,
355-
model_name=q.client.model_choice_dropdown,
356-
is_regenerate=False,
357-
is_regen_with_options=True
358-
)
351+
with concurrent.futures.ThreadPoolExecutor() as pool:
352+
llm_response, alt_response, err = await q.exec(pool, ask,
353+
question=q.client.query,
354+
sample_queries_path=q.client.sample_qna_path,
355+
table_info_path=q.client.table_info_path,
356+
table_name=q.client.table_name,
357+
model_name=q.client.model_choice_dropdown,
358+
is_regenerate=False,
359+
is_regen_with_options=True
360+
)
359361
response = "\n".join(llm_response)
360362
if alt_response:
361363
llm_response = response + "\n\n" + "**Alternate options:**\n" + "\n".join(alt_response)

0 commit comments

Comments
 (0)