@@ -138,7 +138,7 @@ def generate_response(self, context_container, sql_index, input_prompt, attempt_
138138 try :
139139 # Attempt to heal with simple feedback
140140 # Reference: Teaching Large Language Models to Self-Debug, https://arxiv.org/abs/2304.05128
141- logger .info (f"Attempting to heal ...,\n { se } " )
141+ logger .info (f"Attempting to fix syntax error ...,\n { se } " )
142142 system_prompt = DEBUGGING_PROMPT ["system_prompt" ]
143143 user_prompt = DEBUGGING_PROMPT ["user_prompt" ].format (ex_traceback = ex_traceback , qry_txt = qry_txt )
144144 # Role and content
@@ -193,7 +193,7 @@ def generate_tasks(self, table_names: list, input_question: str):
193193 except Exception as se :
194194 raise se
195195
196- def generate_sql (self , table_name : list , input_question : str , _dialect : str = "postgres" ):
196+ def generate_sql (self , table_name : list , input_question : str , _dialect : str = "postgres" , model_name : str = 'gpt-3.5-turbo-0301' ):
197197 _tasks = self .task_formatter (self ._tasks )
198198 context_file = f"{ self .path } /var/lib/tmp/data/context.json"
199199 additional_context = json .load (open (context_file , "r" )) if Path (context_file ).exists () else {}
@@ -217,7 +217,7 @@ def generate_sql(self, table_name: list, input_question: str, _dialect: str = "p
217217 context_container = self .context_builder .build_context_container ()
218218
219219 # Reference: https://github.com/jerryjliu/llama_index/issues/987
220- llm_predictor_gpt3 = LLMPredictor (llm = OpenAI (temperature = 0.7 , model_name = "text-davinci-003" ))
220+ llm_predictor_gpt3 = LLMPredictor (llm = OpenAI (temperature = 0.5 , model_name = model_name ))
221221 service_context_gpt3 = ServiceContext .from_defaults (llm_predictor = llm_predictor_gpt3 , chunk_size_limit = 512 )
222222
223223 index = GPTSQLStructStoreIndex (
0 commit comments