diff --git a/.env Azure sample b/.env Azure sample new file mode 100644 index 0000000..365cba3 --- /dev/null +++ b/.env Azure sample @@ -0,0 +1,10 @@ +# OpenAI Settings +OPENAI_API_KEY=your_openai_key +AI_MODEL=o1-mini + +# Azure OpenAI Settings +USE_AZURE=false # Set to "true" to use Azure OpenAI +AZURE_OPENAI_API_KEY=your_azure_key +AZURE_OPENAI_API_VERSION=2024-10-01-preview +AZURE_OPENAI_ENDPOINT=https://your-resource-name.openai.azure.com +AZURE_DEPLOYMENT_NAME=your_deployment_name \ No newline at end of file diff --git a/README.md b/README.md index 6f5fff4..dc6b517 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ A command-line tool designed to assist developers in managing and interacting wi ## NEW +Added Azure OpenAI as another option. To use Azure OpenAI set USE_AZURE to "true" and make sure you add your AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_ENDPOINT, and AZURE_DEPLOYMENT_NAME in your .env file. Added Grok Engineer to the repo. Make sure you add your XAI_API_KEY in your .env file. Added Streaming. diff --git a/o1-eng.py b/o1-eng.py index 5876970..f481dd5 100644 --- a/o1-eng.py +++ b/o1-eng.py @@ -2,7 +2,7 @@ import fnmatch import logging import time -from openai import OpenAI +from openai import OpenAI, AzureOpenAI from dotenv import load_dotenv from termcolor import colored from prompt_toolkit import prompt @@ -18,9 +18,18 @@ load_dotenv() -MODEL = "o1-mini" -# Initialize OpenAI client -client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) +MODEL = os.getenv("AI_MODEL", "o1-mini") # Default to o1-mini if not specified +USE_AZURE = os.getenv("USE_AZURE", "false").lower() == "true" + +# Initialize OpenAI clients +if USE_AZURE: + client = AzureOpenAI( + api_key=os.getenv("AZURE_OPENAI_API_KEY"), + api_version=os.getenv("AZURE_OPENAI_API_VERSION"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT") + ) +else: + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) CREATE_SYSTEM_PROMPT = """You are an advanced o1 engineer designed to create files and folders based on user instructions. Your primary objective is to generate the content of the files to be created as code blocks. Each code block should specify whether it's a file or folder, along with its path. @@ -437,13 +446,22 @@ def chat_with_ai(user_message, is_edit_request=False, retry_count=0, added_files print(colored("o1 engineer is thinking...", "magenta")) logging.info("Sending general query to AI.") - # Create streaming response - stream = client.chat.completions.create( - model=MODEL, - messages=messages, - max_completion_tokens=60000, - stream=True - ) + # Create streaming response based on provider + if USE_AZURE: + deployment_name = os.getenv("AZURE_DEPLOYMENT_NAME") + stream = client.chat.completions.create( + model=deployment_name, + messages=messages, + max_tokens=60000, + stream=True + ) + else: + stream = client.chat.completions.create( + model=MODEL, + messages=messages, + max_completion_tokens=60000, + stream=True + ) # Initialize response content response_content = "" @@ -469,8 +487,9 @@ def chat_with_ai(user_message, is_edit_request=False, retry_count=0, added_files return last_ai_response except Exception as e: - print(colored(f"\nError while communicating with OpenAI: {e}", "red")) - logging.error(f"Error while communicating with OpenAI: {e}") + provider = "Azure OpenAI" if USE_AZURE else "OpenAI" + print(colored(f"\nError while communicating with {provider}: {e}", "red")) + logging.error(f"Error while communicating with {provider}: {e}") return None diff --git a/requirements.txt b/requirements.txt index 17daa26..409acb3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ termcolor prompt_toolkit rich + python-dotenv \ No newline at end of file