diff --git a/.claude/settings.json b/.claude/settings.json index 8ee1dfe1..a2d3f913 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -4,4 +4,4 @@ ], "deny": [] } -} \ No newline at end of file +} diff --git a/.coveragerc b/.coveragerc index 723a254d..7af63050 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,6 +1,6 @@ [run] source = gemini_server -omit = +omit = */tests/* */venv/* */__pycache__/* @@ -21,4 +21,4 @@ exclude_lines = @(abc\.)?abstractmethod [html] -directory = htmlcov \ No newline at end of file +directory = htmlcov diff --git a/.env.example b/.env.example index 51cfa1a6..5f53f29f 100644 --- a/.env.example +++ b/.env.example @@ -65,7 +65,7 @@ DEFAULT_MODEL=auto # Flash models (2.0) will use system prompt engineering instead # Token consumption per mode: # minimal: 128 tokens - Quick analysis, fastest response -# low: 2,048 tokens - Light reasoning tasks +# low: 2,048 tokens - Light reasoning tasks # medium: 8,192 tokens - Balanced reasoning (good for most cases) # high: 16,384 tokens - Complex analysis (recommended for thinkdeep) # max: 32,768 tokens - Maximum reasoning depth, slowest but most thorough diff --git a/.envrc b/.envrc new file mode 100644 index 00000000..3b11770b --- /dev/null +++ b/.envrc @@ -0,0 +1,2 @@ +use flake +dotenv diff --git a/.gitattributes b/.gitattributes index c8f9e2fa..f9ab13f4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -24,4 +24,4 @@ run_integration_tests text eol=lf *.jpeg binary *.gif binary *.ico binary -*.pdf binary \ No newline at end of file +*.pdf binary diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 774c56a0..8ee78842 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -9,4 +9,3 @@ contact_links: - name: 🀝 Contributing Guide url: https://github.com/BeehiveInnovations/zen-mcp-server/blob/main/CONTRIBUTING.md about: Learn how to contribute to the project - diff --git a/.github/ISSUE_TEMPLATE/documentation.yml b/.github/ISSUE_TEMPLATE/documentation.yml index cb555fa1..caddf823 100644 --- a/.github/ISSUE_TEMPLATE/documentation.yml +++ b/.github/ISSUE_TEMPLATE/documentation.yml @@ -57,4 +57,3 @@ body: - All users validations: required: true - diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index f138e7e9..75590fba 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -49,4 +49,3 @@ body: label: Contribution options: - label: I am willing to submit a Pull Request to implement this feature. - diff --git a/.github/ISSUE_TEMPLATE/tool_addition.yml b/.github/ISSUE_TEMPLATE/tool_addition.yml index 1c2212da..451ec859 100644 --- a/.github/ISSUE_TEMPLATE/tool_addition.yml +++ b/.github/ISSUE_TEMPLATE/tool_addition.yml @@ -72,4 +72,3 @@ body: options: - label: I am willing to submit a Pull Request to implement this new tool. - label: I have checked that this tool doesn't overlap significantly with existing tools (analyze, codereview, debug, thinkdeep, chat). - diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 0e466044..512daa52 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,7 +4,7 @@ ### Version Bumping Types (trigger semantic release): - `feat: ` - New features β†’ **MINOR** version bump (1.1.0 β†’ 1.2.0) -- `fix: ` - Bug fixes β†’ **PATCH** version bump (1.1.0 β†’ 1.1.1) +- `fix: ` - Bug fixes β†’ **PATCH** version bump (1.1.0 β†’ 1.1.1) - `perf: ` - Performance improvements β†’ **PATCH** version bump (1.1.0 β†’ 1.1.1) ### Breaking Changes (trigger MAJOR version bump): @@ -82,4 +82,4 @@ Fixes #(issue number) ## Additional Notes -Any additional information that reviewers should know. \ No newline at end of file +Any additional information that reviewers should know. diff --git a/.github/workflows/docker-pr.yml b/.github/workflows/docker-pr.yml index c05519e4..3d3fa616 100644 --- a/.github/workflows/docker-pr.yml +++ b/.github/workflows/docker-pr.yml @@ -25,7 +25,7 @@ jobs: github.event.action == 'synchronize' || github.event.action == 'reopened' || contains(github.event.pull_request.labels.*.name, 'docker-build') - + steps: - name: Checkout uses: actions/checkout@v4 @@ -82,15 +82,15 @@ jobs: header: docker-build message: | ## 🐳 Docker Build Complete - + **PR**: #${{ github.event.number }} | **Commit**: `${{ github.sha }}` - + ``` ${{ steps.meta.outputs.tags }} ``` - + **Test:** `docker pull ghcr.io/${{ github.repository }}:pr-${{ github.event.number }}` - + **Claude config:** ```json { @@ -103,7 +103,7 @@ jobs: } } ``` - + πŸ’‘ Add `docker-build` label to manually trigger builds diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index 445052a0..7cd408ea 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -18,7 +18,7 @@ jobs: docker: name: Build and Push Docker Image runs-on: ubuntu-latest - + steps: - name: Checkout uses: actions/checkout@v4 @@ -65,24 +65,24 @@ jobs: run: | RELEASE_TAG="${{ github.event.release.tag_name }}" DOCKER_TAGS=$(echo "${{ steps.meta.outputs.tags }}" | tr '\n' ' ') - + # Add Docker information to the release gh release edit "$RELEASE_TAG" --notes-file - << EOF ${{ github.event.release.body }} - + --- - + ## 🐳 Docker Images - + This release is available as Docker images: - + $(echo "$DOCKER_TAGS" | sed 's/ghcr.io/- `ghcr.io/g' | sed 's/ /`\n/g') - + **Quick start with Docker:** \`\`\`bash docker pull ghcr.io/${{ github.repository }}:$RELEASE_TAG \`\`\` - + **Claude Desktop configuration:** \`\`\`json { @@ -113,4 +113,4 @@ jobs: echo "**Images built:**" >> $GITHUB_STEP_SUMMARY echo "\`\`\`" >> $GITHUB_STEP_SUMMARY echo "${{ steps.meta.outputs.tags }}" >> $GITHUB_STEP_SUMMARY - echo "\`\`\`" >> $GITHUB_STEP_SUMMARY \ No newline at end of file + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/semantic-pr.yml b/.github/workflows/semantic-pr.yml index 48c3aa3f..66f49e63 100644 --- a/.github/workflows/semantic-pr.yml +++ b/.github/workflows/semantic-pr.yml @@ -44,4 +44,4 @@ jobs: if: ${{ steps.lint-pr-title.outputs.error_message == null }} with: header: pr-title-lint-error - delete: true \ No newline at end of file + delete: true diff --git a/.gitignore b/.gitignore index be60b015..11624bd0 100644 --- a/.gitignore +++ b/.gitignore @@ -185,6 +185,11 @@ logs/ *.backup /.desktop_configured +# Nix development environment +.nix-venv/ +.direnv/ +result + /worktrees/ test_simulation_files/ .mcp.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d17cf3c9..acb3c846 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,21 @@ --- default_stages: [pre-commit, pre-push] repos: + # Standard pre-commit hooks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-json + - id: check-toml + - id: check-merge-conflict + - id: check-added-large-files + - id: mixed-line-ending + args: ['--fix=lf'] + + # Python formatting and linting - repo: https://github.com/psf/black rev: 25.1.0 hooks: @@ -29,6 +44,8 @@ exclude: | \.venv/| venv/| \.zen_venv/| + \.nix-venv/| + \.direnv/| __pycache__/| \.pytest_cache/| logs/| diff --git a/CLAUDE.md b/CLAUDE.md index 89db9d95..2c382896 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -18,7 +18,7 @@ source venv/bin/activate This script automatically runs: - Ruff linting with auto-fix -- Black code formatting +- Black code formatting - Import sorting with isort - Complete unit test suite (excluding integration tests) - Verification that all checks pass 100% @@ -93,7 +93,7 @@ tail -f logs/mcp_activity.log | grep -E "(TOOL_CALL|TOOL_COMPLETED|ERROR|WARNING # Main server log (all activity including debug info) - 20MB max, 10 backups tail -f logs/mcp_server.log -# Tool activity only (TOOL_CALL, TOOL_COMPLETED, etc.) - 20MB max, 5 backups +# Tool activity only (TOOL_CALL, TOOL_COMPLETED, etc.) - 20MB max, 5 backups tail -f logs/mcp_activity.log ``` @@ -115,7 +115,7 @@ matches = LogUtils.search_logs_for_pattern("TOOL_CALL.*debug") ### Testing Simulation tests are available to test the MCP server in a 'live' scenario, using your configured -API keys to ensure the models are working and the server is able to communicate back and forth. +API keys to ensure the models are working and the server is able to communicate back and forth. **IMPORTANT**: After any code changes, restart your Claude session for the changes to take effect. @@ -317,4 +317,4 @@ isort --check-only . - All dependencies from `requirements.txt` installed - Proper API keys configured in `.env` file -This guide provides everything needed to efficiently work with the Zen MCP Server codebase using Claude. Always run quality checks before and after making changes to ensure code integrity. \ No newline at end of file +This guide provides everything needed to efficiently work with the Zen MCP Server codebase using Claude. Always run quality checks before and after making changes to ensure code integrity. diff --git a/LICENSE b/LICENSE index 2d18748b..cc4a320e 100644 --- a/LICENSE +++ b/LICENSE @@ -194,4 +194,4 @@ Apache License distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/claude_config_example.json b/claude_config_example.json index fa2e7d9e..022acb00 100644 --- a/claude_config_example.json +++ b/claude_config_example.json @@ -8,4 +8,4 @@ "args": ["/path/to/zen-mcp-server/server.py"] } } -} \ No newline at end of file +} diff --git a/code_quality_checks.ps1 b/code_quality_checks.ps1 index 73e3dcc5..dfae581b 100644 --- a/code_quality_checks.ps1 +++ b/code_quality_checks.ps1 @@ -80,7 +80,7 @@ if (Test-Path ".zen_venv") { $pipCmd = ".zen_venv/bin/pip" } } - + if ($pythonCmd) { Write-Emoji "βœ…" "Using venv" -Color Green } @@ -105,7 +105,7 @@ $devTools = @("ruff", "black", "isort", "pytest") foreach ($tool in $devTools) { $toolFound = $false - + # Check in venv if ($IsWindows -or $env:OS -eq "Windows_NT") { if (Test-Path ".zen_venv\Scripts\$tool.exe") { @@ -116,7 +116,7 @@ foreach ($tool in $devTools) { $toolFound = $true } } - + # Check in PATH if (!$toolFound) { try { @@ -126,7 +126,7 @@ foreach ($tool in $devTools) { # Tool not found } } - + if (!$toolFound) { $devDepsNeeded = $true break @@ -214,12 +214,12 @@ if (!$SkipTests) { try { Write-Emoji "πŸƒ" "Running unit tests (excluding integration tests)..." -Color Yellow - + $pytestArgs = @("tests/", "-v", "-x", "-m", "not integration") if ($VerboseOutput) { $pytestArgs += "--verbose" } - + & $pythonCmd -m pytest @pytestArgs if ($LASTEXITCODE -ne 0) { throw "Unit tests failed" diff --git a/code_quality_checks.sh b/code_quality_checks.sh index 85295430..e6a6055c 100755 --- a/code_quality_checks.sh +++ b/code_quality_checks.sh @@ -67,16 +67,16 @@ echo "πŸ“‹ Step 1: Running Linting and Formatting Checks" echo "--------------------------------------------------" echo "πŸ”§ Running ruff linting with auto-fix..." -$RUFF check --fix --exclude test_simulation_files --exclude .zen_venv +$RUFF check --fix --exclude test_simulation_files --exclude .zen_venv --exclude .nix-venv echo "🎨 Running black code formatting..." -$BLACK . --exclude="test_simulation_files/" --exclude=".zen_venv/" +$BLACK . --exclude="test_simulation_files/" --exclude=".zen_venv/" --exclude=".nix-venv/" echo "πŸ“¦ Running import sorting with isort..." -$ISORT . --skip-glob=".zen_venv/*" --skip-glob="test_simulation_files/*" +$ISORT . --skip-glob=".zen_venv/*" --skip-glob=".nix-venv/*" --skip-glob="test_simulation_files/*" echo "βœ… Verifying all linting passes..." -$RUFF check --exclude test_simulation_files --exclude .zen_venv +$RUFF check --exclude test_simulation_files --exclude .zen_venv --exclude .nix-venv echo "βœ… Step 1 Complete: All linting and formatting checks passed!" echo "" @@ -95,9 +95,9 @@ echo "" echo "πŸŽ‰ All Code Quality Checks Passed!" echo "==================================" echo "βœ… Linting (ruff): PASSED" -echo "βœ… Formatting (black): PASSED" +echo "βœ… Formatting (black): PASSED" echo "βœ… Import sorting (isort): PASSED" echo "βœ… Unit tests: PASSED" echo "" echo "πŸš€ Your code is ready for commit and GitHub Actions!" -echo "πŸ’‘ Remember to add simulator tests if you modified tools" \ No newline at end of file +echo "πŸ’‘ Remember to add simulator tests if you modified tools" diff --git a/docker-compose.yml b/docker-compose.yml index 1acd79c4..49585801 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,19 +6,19 @@ services: target: runtime image: zen-mcp-server:latest container_name: zen-mcp-server - + # Container labels for traceability labels: - "com.zen-mcp.service=zen-mcp-server" - "com.zen-mcp.version=1.0.0" - "com.zen-mcp.environment=production" - "com.zen-mcp.description=AI-powered Model Context Protocol server" - + # Environment variables environment: # Default model configuration - DEFAULT_MODEL=${DEFAULT_MODEL:-auto} - + # API Keys (use Docker secrets in production) - GEMINI_API_KEY=${GEMINI_API_KEY} - GOOGLE_API_KEY=${GOOGLE_API_KEY} @@ -32,32 +32,32 @@ services: - CUSTOM_API_URL=${CUSTOM_API_URL} - CUSTOM_API_KEY=${CUSTOM_API_KEY} - CUSTOM_MODEL_NAME=${CUSTOM_MODEL_NAME} - + # Logging configuration - LOG_LEVEL=${LOG_LEVEL:-INFO} - LOG_MAX_SIZE=${LOG_MAX_SIZE:-10MB} - LOG_BACKUP_COUNT=${LOG_BACKUP_COUNT:-5} - + # Advanced configuration - DEFAULT_THINKING_MODE_THINKDEEP=${DEFAULT_THINKING_MODE_THINKDEEP:-high} - DISABLED_TOOLS=${DISABLED_TOOLS} - MAX_MCP_OUTPUT_TOKENS=${MAX_MCP_OUTPUT_TOKENS} - + # Server configuration - PYTHONUNBUFFERED=1 - PYTHONPATH=/app - TZ=${TZ:-UTC} - + # Volumes for persistent data volumes: - ./logs:/app/logs - zen-mcp-config:/app/conf - /etc/localtime:/etc/localtime:ro - + # Network configuration networks: - zen-network - + # Resource limits deploy: resources: @@ -67,7 +67,7 @@ services: reservations: memory: 256M cpus: '0.25' - + # Health check healthcheck: test: ["CMD", "python", "/usr/local/bin/healthcheck.py"] @@ -75,10 +75,10 @@ services: timeout: 10s retries: 3 start_period: 40s - + # Restart policy restart: unless-stopped - + # Security security_opt: - no-new-privileges:true diff --git a/docker/README.md b/docker/README.md index bf8c1e25..c798ffe1 100644 --- a/docker/README.md +++ b/docker/README.md @@ -109,7 +109,7 @@ docker-compose up -d zen-mcp The container includes health checks that verify: - Server process is running - Python modules can be imported -- Log directory is writable +- Log directory is writable - API keys are configured ## Volumes and Persistent Data @@ -333,7 +333,7 @@ python -m json.tool .vscode/mcp.json The Zen MCP Server provides these tools when properly configured: - **chat** - General AI conversation and collaboration -- **thinkdeep** - Multi-stage investigation and reasoning +- **thinkdeep** - Multi-stage investigation and reasoning - **planner** - Interactive sequential planning - **consensus** - Multi-model consensus workflow - **codereview** - Comprehensive code review diff --git a/docker/scripts/deploy.ps1 b/docker/scripts/deploy.ps1 index 92ee4cd6..61e7b29d 100644 --- a/docker/scripts/deploy.ps1 +++ b/docker/scripts/deploy.ps1 @@ -31,13 +31,13 @@ function Test-EnvironmentVariables { # At least one of these API keys must be set $requiredVars = @( "GEMINI_API_KEY", - "GOOGLE_API_KEY", + "GOOGLE_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "DIAL_API_KEY", "OPENROUTER_API_KEY" ) - + $hasApiKey = $false foreach ($var in $requiredVars) { $value = [Environment]::GetEnvironmentVariable($var) @@ -58,7 +58,7 @@ function Test-EnvironmentVariables { # Load environment variables from .env file if (Test-Path ".env") { Write-ColorText "Loading environment variables from .env..." -Color Green - + # Read .env file and set environment variables Get-Content ".env" | ForEach-Object { if ($_ -match '^([^#][^=]*?)=(.*)$') { @@ -85,7 +85,7 @@ function Wait-ForHealth { [int]$MaxAttempts = 6, [int]$InitialDelay = 2 ) - + $attempt = 1 $delay = $InitialDelay @@ -101,11 +101,11 @@ function Wait-ForHealth { $status = "unavailable" } } - + if ($status -eq "healthy") { return $true } - + Write-ColorText "Waiting for service to be healthy... (attempt $attempt/$MaxAttempts, retrying in ${delay}s)" -Color Yellow Start-Sleep -Seconds $delay $delay = $delay * 2 @@ -157,12 +157,12 @@ try { # Wait for health check (unless skipped) if (!$SkipHealthCheck) { Write-ColorText "Waiting for service to be healthy..." -Color Green - + # Try simple timeout first, then use exponential backoff if needed $timeout = $HealthCheckTimeout $elapsed = 0 $healthy = $false - + while ($elapsed -lt $timeout) { try { $containerId = docker-compose ps -q zen-mcp @@ -176,7 +176,7 @@ if (!$SkipHealthCheck) { } catch { # Continue checking } - + Start-Sleep -Seconds 2 $elapsed += 2 } diff --git a/docker/scripts/deploy.sh b/docker/scripts/deploy.sh index b207c5c1..631292c3 100644 --- a/docker/scripts/deploy.sh +++ b/docker/scripts/deploy.sh @@ -13,7 +13,7 @@ echo -e "${GREEN}=== Deploying Zen MCP Server ===${NC}" check_env_vars() { # At least one of these API keys must be set local required_vars=("GEMINI_API_KEY" "GOOGLE_API_KEY" "OPENAI_API_KEY" "XAI_API_KEY" "DIAL_API_KEY" "OPENROUTER_API_KEY") - + local has_api_key=false for var in "${required_vars[@]}"; do if [[ -n "${!var:-}" ]]; then diff --git a/docs/adding_providers.md b/docs/adding_providers.md index dae97850..79a37ddc 100644 --- a/docs/adding_providers.md +++ b/docs/adding_providers.md @@ -165,9 +165,9 @@ from .shared import ( class ExampleProvider(OpenAICompatibleProvider): """Example OpenAI-compatible provider.""" - + FRIENDLY_NAME = "Example" - + # Define models using ModelCapabilities (consistent with other providers) MODEL_CAPABILITIES = { "example-model-large": ModelCapabilities( @@ -180,7 +180,7 @@ class ExampleProvider(OpenAICompatibleProvider): aliases=["large", "big"], ), } - + def __init__(self, api_key: str, **kwargs): kwargs.setdefault("base_url", "https://api.example.com/v1") super().__init__(api_key, **kwargs) @@ -269,7 +269,7 @@ assert capabilities.provider == ProviderType.EXAMPLE ### Provider Priority When a user requests a model, providers are checked in priority order: 1. **Native providers** (Gemini, OpenAI, Example) - handle their specific models -2. **Custom provider** - handles local/self-hosted models +2. **Custom provider** - handles local/self-hosted models 3. **OpenRouter** - catch-all for everything else ### Model Validation @@ -290,7 +290,7 @@ needs additional alias handling beyond the shared behaviour. - **Be specific in model validation** - only accept models you actually support - **Use ModelCapabilities objects** consistently (like Gemini provider) -- **Include descriptive aliases** for better user experience +- **Include descriptive aliases** for better user experience - **Add error handling** and logging for debugging - **Test with real API calls** to verify everything works - **Follow the existing patterns** in `providers/gemini.py` and `providers/custom.py` diff --git a/docs/advanced-usage.md b/docs/advanced-usage.md index 58b99d7a..b561dfc4 100644 --- a/docs/advanced-usage.md +++ b/docs/advanced-usage.md @@ -50,7 +50,7 @@ Regardless of your default configuration, you can specify models per request: | **`llama`** (Llama 3.2) | Custom/Local | 128K tokens | Local inference, privacy | On-device analysis, cost-free processing | | **Any model** | OpenRouter | Varies | Access to GPT-4, Claude, Llama, etc. | User-specified or based on task requirements | -**Mix & Match Providers:** Use multiple providers simultaneously! Set both `OPENROUTER_API_KEY` and `CUSTOM_API_URL` to access +**Mix & Match Providers:** Use multiple providers simultaneously! Set both `OPENROUTER_API_KEY` and `CUSTOM_API_URL` to access cloud models (expensive/powerful) AND local models (free/private) in the same conversation. **Model Capabilities:** @@ -80,7 +80,7 @@ cloud models (expensive/powerful) AND local models (free/private) in the same co GOOGLE_ALLOWED_MODELS=flash,pro OPENAI_ALLOWED_MODELS=o4-mini,o3-mini -# Production: Cost-optimized +# Production: Cost-optimized GOOGLE_ALLOWED_MODELS=flash OPENAI_ALLOWED_MODELS=o4-mini @@ -160,7 +160,7 @@ All tools that work with files support **both individual files and entire direct **`analyze`** - Analyze files or directories - `files`: List of file paths or directories (required) -- `question`: What to analyze (required) +- `question`: What to analyze (required) - `model`: auto|pro|flash|flash-2.0|flashlite|o3|o3-mini|o4-mini|gpt4.1|gpt5|gpt5-mini|gpt5-nano (default: server default) - `analysis_type`: architecture|performance|security|quality|general - `output_format`: summary|detailed|actionable @@ -169,7 +169,7 @@ All tools that work with files support **both individual files and entire direct ``` "Analyze the src/ directory for architectural patterns" (auto mode picks best model) -"Use flash to quickly analyze main.py and tests/ to understand test coverage" +"Use flash to quickly analyze main.py and tests/ to understand test coverage" "Use o3 for logical analysis of the algorithm in backend/core.py" "Use pro for deep analysis of the entire backend/ directory structure" ``` @@ -287,7 +287,7 @@ Session 2: "Continue our RAG discussion with o3" ``` Think hard about designing and developing a fun calculator app in swift. Review your design plans with o3, taking in their suggestions but keep the feature-set realistic and doable without adding bloat. Begin implementing and in between -implementation, get a codereview done by Gemini Pro and chat with Flash if you need to for creative directions. +implementation, get a codereview done by Gemini Pro and chat with Flash if you need to for creative directions. ``` ### Code β†’ Review β†’ Fix @@ -301,16 +301,16 @@ work. Fix medium to critical bugs / concerns / issues and show me the final prod ``` Take a look at these log files saved under subfolder/diagnostics.log there's a bug where the user says the app crashes at launch. Think hard and go over each line, tallying it with corresponding code within the project. After -you've performed initial investigation, ask gemini pro to analyze the log files and the related code where you +you've performed initial investigation, ask gemini pro to analyze the log files and the related code where you suspect lies the bug and then formulate and implement a bare minimal fix. Must not regress. Perform a precommit -with zen in the end using gemini pro to confirm we're okay to publish the fix +with zen in the end using gemini pro to confirm we're okay to publish the fix ``` ### Refactor β†’ Review β†’ Implement β†’ Test ``` -Use zen to analyze this legacy authentication module for decomposition opportunities. The code is getting hard to -maintain and we need to break it down. Use gemini pro with high thinking mode to identify code smells and suggest -a modernization strategy. After reviewing the refactoring plan, implement the changes step by step and then +Use zen to analyze this legacy authentication module for decomposition opportunities. The code is getting hard to +maintain and we need to break it down. Use gemini pro with high thinking mode to identify code smells and suggest +a modernization strategy. After reviewing the refactoring plan, implement the changes step by step and then generate comprehensive tests with zen to ensure nothing breaks. ``` @@ -349,7 +349,7 @@ The Zen MCP server supports vision-capable models for analyzing images, diagrams # Debug with error screenshots "Use zen to debug this error with the stack trace screenshot and error.py" -# Architecture analysis with diagrams +# Architecture analysis with diagrams "Analyze this system architecture diagram with gemini pro for bottlenecks" # UI review with mockups @@ -392,9 +392,9 @@ The MCP protocol has a combined request+response limit of approximately 25K toke User: "Use gemini to review this code: [50,000+ character detailed analysis]" # Server detects the large prompt and responds: -Zen MCP: "The prompt is too large for MCP's token limits (>50,000 characters). -Please save the prompt text to a temporary file named 'prompt.txt' and resend -the request with an empty prompt string and the absolute file path included +Zen MCP: "The prompt is too large for MCP's token limits (>50,000 characters). +Please save the prompt text to a temporary file named 'prompt.txt' and resend +the request with an empty prompt string and the absolute file path included in the files parameter, along with any other files you wish to share as context." # Claude automatically handles this: diff --git a/docs/ai-collaboration.md b/docs/ai-collaboration.md index ac8b3934..8a08ea2a 100644 --- a/docs/ai-collaboration.md +++ b/docs/ai-collaboration.md @@ -31,7 +31,7 @@ This server enables **true AI collaboration** between Claude and multiple AI mod **Independent Work Between Exchanges:** - Claude can work independently between exchanges (analyzing code, implementing fixes, gathering data) -- Return to Gemini with progress updates and additional context +- Return to Gemini with progress updates and additional context - Each exchange shares only incremental information while maintaining full conversation history - Automatically bypasses MCP's 25K token limits through incremental updates @@ -96,4 +96,4 @@ This server enables **true AI collaboration** between Claude and multiple AI mod - **Provide clear context**: Help models understand the broader goal and constraints - **Trust the process**: AI-to-AI conversations can produce insights neither model would reach alone -For more information on conversation persistence and context revival, see the [Context Revival Guide](context-revival.md). \ No newline at end of file +For more information on conversation persistence and context revival, see the [Context Revival Guide](context-revival.md). diff --git a/docs/ai_banter.md b/docs/ai_banter.md index 804a8ed2..f6fe4684 100644 --- a/docs/ai_banter.md +++ b/docs/ai_banter.md @@ -1,7 +1,7 @@ # The Code Comedy Hour -Just when I thought it was a routine test, Claude and it's _thought-partner_ would go off-script with unexpectedly quirky behavior - the smaller the model, -the quirkier the responses. +Just when I thought it was a routine test, Claude and it's _thought-partner_ would go off-script with unexpectedly quirky behavior - the smaller the model, +the quirkier the responses. It's happened more times than I can count, but I figured it’s time I start saving a few. Here are some recent ones. @@ -137,4 +137,4 @@ It's happened more times than I can count, but I figured it’s time I start sav … +16 lines (ctrl+r to expand) ⏺ Great! ChooChoo confirmed its name: "My name is ChooChoo." -``` \ No newline at end of file +``` diff --git a/docs/configuration.md b/docs/configuration.md index 95d54b05..07295eeb 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -30,7 +30,7 @@ OPENAI_API_KEY=your-openai-key GEMINI_API_KEY=your_gemini_api_key_here # Get from: https://makersuite.google.com/app/apikey -# OpenAI API +# OpenAI API OPENAI_API_KEY=your_openai_api_key_here # Get from: https://platform.openai.com/api-keys @@ -154,7 +154,7 @@ DEFAULT_THINKING_MODE_THINKDEEP=high # Available modes and token consumption: # minimal: 128 tokens - Quick analysis, fastest response -# low: 2,048 tokens - Light reasoning tasks +# low: 2,048 tokens - Light reasoning tasks # medium: 8,192 tokens - Balanced reasoning # high: 16,384 tokens - Complex analysis (recommended for thinkdeep) # max: 32,768 tokens - Maximum reasoning depth @@ -171,7 +171,7 @@ Control which models can be used from each provider for cost control, compliance # OpenAI model restrictions OPENAI_ALLOWED_MODELS=o3-mini,o4-mini,mini -# Gemini model restrictions +# Gemini model restrictions GOOGLE_ALLOWED_MODELS=flash,pro # X.AI GROK model restrictions @@ -218,8 +218,8 @@ CUSTOM_MODELS_CONFIG_PATH=/path/to/custom_models.json **Conversation Settings:** ```env # How long AI-to-AI conversation threads persist in memory (hours) -# Conversations are auto-purged when claude closes its MCP connection or -# when a session is quit / re-launched +# Conversations are auto-purged when claude closes its MCP connection or +# when a session is quit / re-launched CONVERSATION_TIMEOUT_HOURS=5 # Maximum conversation turns (each exchange = 2 turns) diff --git a/docs/context-revival.md b/docs/context-revival.md index 29c36f30..0493f3cf 100644 --- a/docs/context-revival.md +++ b/docs/context-revival.md @@ -2,7 +2,7 @@ ## **The Most Profound Feature: Context Revival After Reset** -**This powerful feature cannot be highlighted enough**: The Zen MCP Server implements a simple continuation system that seemingly transcends Claude's context limitations. +**This powerful feature cannot be highlighted enough**: The Zen MCP Server implements a simple continuation system that seemingly transcends Claude's context limitations. ## How Context Revival Works @@ -75,13 +75,13 @@ Claude: "Ah yes, excellent plan! Based on O3's optimization insights and our ear ## Why This Changes Everything -**Before Zen MCP**: Claude's context resets meant losing entire conversation threads. +**Before Zen MCP**: Claude's context resets meant losing entire conversation threads. Complex multi-step analyses were fragmented and had to restart from scratch. You most likely need to re-prompt Claude or to make it re-read some previously saved document / `CLAUDE.md` etc - no need. Zen remembers. **With Zen MCP**: Claude can orchestrate multi-hour, multi-tool workflows where: - **O3** handles logical analysis and debugging -- **Gemini Pro** performs deep architectural reviews +- **Gemini Pro** performs deep architectural reviews - **Flash** provides quick formatting and style checks - **Claude** coordinates everything while maintaining full context @@ -95,7 +95,7 @@ The system is highly configurable: # Maximum conversation turns (default: 20) MAX_CONVERSATION_TURNS=20 -# Thread expiration in hours (default: 3) +# Thread expiration in hours (default: 3) CONVERSATION_TIMEOUT_HOURS=3 ``` diff --git a/docs/contributions.md b/docs/contributions.md index 12147d08..cded03f6 100644 --- a/docs/contributions.md +++ b/docs/contributions.md @@ -7,14 +7,63 @@ Thank you for your interest in contributing to Zen MCP Server! This guide will h 1. **Fork the repository** on GitHub 2. **Clone your fork** locally 3. **Set up the development environment**: + + **Option A: Traditional Setup** ```bash ./run-server.sh ``` + + **Option B: Nix Flake (Recommended for Nix users)** + ```bash + # Automatic environment activation with direnv (one-time setup) + direnv allow + + # Or manually enter development shell + nix develop + + # Build and test the package + nix build + nix run + ``` + 4. **Create a feature branch** from `main`: ```bash git checkout -b feat/your-feature-name ``` +### Nix Development Environment + +The Nix flake provides several advantages for development: + +- **Reproducible Environment**: Identical dependencies across all systems +- **Zero Setup**: No need to install Python, pip, or manage virtual environments +- **Isolated Dependencies**: No conflicts with system packages +- **Cross-Platform**: Works on Linux, macOS, and NixOS + +**Available Nix Commands:** +```bash +# Enter development shell with all dependencies +nix develop + +# Build the package +nix build +./result/bin/zen-mcp-server + +# Run directly without building +nix run + +# Install system-wide +nix profile install . + +# Install from GitHub (for users) +nix profile install github:BeehiveInnovations/zen-mcp-server +``` + +**With direnv** (`.envrc` included): +- Environment automatically activates when entering the directory +- All development tools (Python, pytest, black, ruff, isort) available instantly +- No manual activation needed + ## Development Process ### 1. Code Quality Standards diff --git a/docs/custom_models.md b/docs/custom_models.md index 28c1a850..ceb44a5c 100644 --- a/docs/custom_models.md +++ b/docs/custom_models.md @@ -101,7 +101,7 @@ heuristic described there). OPENROUTER_API_KEY=your-openrouter-api-key ``` -> **Note:** Control which models can be used directly in your OpenRouter dashboard at [openrouter.ai](https://openrouter.ai/). +> **Note:** Control which models can be used directly in your OpenRouter dashboard at [openrouter.ai](https://openrouter.ai/). > This gives you centralized control over model access and spending limits. That's it! The setup script handles all necessary configuration automatically. @@ -198,7 +198,7 @@ CUSTOM_MODEL_NAME=your-loaded-model "Use meta-llama/Llama-2-7b-chat-hf via zen to analyze" ``` -**For OpenRouter:** Check current model pricing at [openrouter.ai/models](https://openrouter.ai/models). +**For OpenRouter:** Check current model pricing at [openrouter.ai/models](https://openrouter.ai/models). **For Local models:** Context window and capabilities are defined in `conf/custom_models.json`. ## Model Provider Selection diff --git a/docs/docker-deployment.md b/docs/docker-deployment.md index fc94d6fd..fb5397e2 100644 --- a/docs/docker-deployment.md +++ b/docs/docker-deployment.md @@ -20,7 +20,7 @@ This guide covers deploying Zen MCP Server using Docker and Docker Compose for p ```bash # Linux/macOS ./docker/scripts/deploy.sh - + # Windows PowerShell .\docker\scripts\deploy.ps1 ``` diff --git a/docs/gemini-setup.md b/docs/gemini-setup.md index bf694ae8..5dd16275 100644 --- a/docs/gemini-setup.md +++ b/docs/gemini-setup.md @@ -40,4 +40,4 @@ Then make it executable: `chmod +x zen-mcp-server` 4. Restart Gemini CLI. -All 15 Zen tools are now available in your Gemini CLI session. \ No newline at end of file +All 15 Zen tools are now available in your Gemini CLI session. diff --git a/docs/getting-started.md b/docs/getting-started.md index 2f1db835..883197c4 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -99,7 +99,7 @@ Create `.mcp.json` in your project root: { "mcpServers": { "zen": { - "command": "sh", + "command": "sh", "args": [ "-c", "for p in $(which uvx 2>/dev/null) $HOME/.local/bin/uvx /opt/homebrew/bin/uvx /usr/local/bin/uvx uvx; do [ -x \"$p\" ] && exec \"$p\" --from git+https://github.com/BeehiveInnovations/zen-mcp-server.git zen-mcp-server; done; echo 'uvx not found' >&2; exit 1" @@ -261,7 +261,7 @@ cd zen-mcp-server **What the setup script does:** - βœ… Creates Python virtual environment -- βœ… Installs all dependencies +- βœ… Installs all dependencies - βœ… Creates .env file for API keys - βœ… Configures Claude integrations - βœ… Provides copy-paste configuration @@ -285,7 +285,7 @@ nano .env Add your API keys (at least one required): ```env # Choose your providers (at least one required) -GEMINI_API_KEY=your-gemini-api-key-here # For Gemini models +GEMINI_API_KEY=your-gemini-api-key-here # For Gemini models OPENAI_API_KEY=your-openai-api-key-here # For O3, GPT-5 XAI_API_KEY=your-xai-api-key-here # For Grok models OPENROUTER_API_KEY=your-openrouter-key # For multiple models @@ -293,7 +293,7 @@ OPENROUTER_API_KEY=your-openrouter-key # For multiple models # DIAL Platform (optional) DIAL_API_KEY=your-dial-api-key-here DIAL_API_HOST=https://core.dialx.ai # Default host (optional) -DIAL_API_VERSION=2024-12-01-preview # API version (optional) +DIAL_API_VERSION=2024-12-01-preview # API version (optional) DIAL_ALLOWED_MODELS=o3,gemini-2.5-pro # Restrict models (optional) # Custom/Local models (Ollama, vLLM, etc.) @@ -354,7 +354,7 @@ Gemini uses a single `timeout` field per server inside `~/.gemini/settings.json` Versions 0.2.1 and newer currently ignore values above ~60 seconds for some transports due to a known regression; if you still see premature disconnects we recommend breaking work into smaller calls or watching the Gemini CLI release notes for the fix. **Important notes:** -- ⭐ **No restart needed** - Changes take effect immediately +- ⭐ **No restart needed** - Changes take effect immediately - ⭐ If multiple APIs configured, native APIs take priority over OpenRouter - ⭐ Configure model aliases in [`conf/custom_models.json`](../conf/custom_models.json) @@ -367,7 +367,7 @@ Versions 0.2.1 and newer currently ignore values above ~60 seconds for some tran ### For Claude Code CLI: 1. Exit any existing Claude session -2. Run `claude` from your project directory +2. Run `claude` from your project directory 3. Try: `"Use zen to chat about Python best practices"` ### For Gemini CLI: @@ -392,7 +392,7 @@ Versions 0.2.1 and newer currently ignore values above ~60 seconds for some tran ``` "Use zen to list available models" "Chat with zen about the best approach for API design" -"Use zen thinkdeep with gemini pro about scaling strategies" +"Use zen thinkdeep with gemini pro about scaling strategies" "Debug this error with o3: [paste error]" ``` @@ -410,7 +410,7 @@ Versions 0.2.1 and newer currently ignore values above ~60 seconds for some tran ``` **Specify the model:** -``` +``` "Use zen with gemini pro to review this complex algorithm" "Debug with o3 using zen for logical analysis" "Get flash to quickly format this code via zen" @@ -419,14 +419,14 @@ Versions 0.2.1 and newer currently ignore values above ~60 seconds for some tran **Multi-model workflows:** ``` "Use zen to get consensus from pro and o3 on this architecture" -"Code review with gemini, then precommit validation with o3" +"Code review with gemini, then precommit validation with o3" "Analyze with flash, then deep dive with pro if issues found" ``` ### Quick Tool Reference: **🀝 Collaboration**: `chat`, `thinkdeep`, `planner`, `consensus` -**πŸ” Code Analysis**: `analyze`, `codereview`, `debug`, `precommit` +**πŸ” Code Analysis**: `analyze`, `codereview`, `debug`, `precommit` **βš’οΈ Development**: `refactor`, `testgen`, `secaudit`, `docgen` **πŸ”§ Utilities**: `challenge`, `tracer`, `listmodels`, `version` @@ -460,7 +460,7 @@ Versions 0.2.1 and newer currently ignore values above ~60 seconds for some tran ### Performance Issues **Slow responses:** -- Use faster models: `flash` instead of `pro` +- Use faster models: `flash` instead of `pro` - Lower thinking modes: `minimal` or `low` instead of `high` - Restrict model access to prevent expensive model selection @@ -501,13 +501,13 @@ OPENAI_ALLOWED_MODELS=o4-mini,o3-mini ``` ### Cost-Optimized Setup -```env +```env DEFAULT_MODEL=flash GEMINI_API_KEY=your-key GOOGLE_ALLOWED_MODELS=flash ``` -### High-Performance Setup +### High-Performance Setup ```env DEFAULT_MODEL=auto GEMINI_API_KEY=your-key diff --git a/docs/logging.md b/docs/logging.md index 312c9c05..ddc229e0 100644 --- a/docs/logging.md +++ b/docs/logging.md @@ -66,4 +66,4 @@ Logs use a standardized format with timestamps: - Use `./run-server.sh -f` for the easiest log monitoring experience - Activity logs show only tool-related events for cleaner output - Main server logs include all operational details -- Logs persist across server restarts \ No newline at end of file +- Logs persist across server restarts diff --git a/docs/testing.md b/docs/testing.md index 4b5f6c68..108b799c 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -42,7 +42,7 @@ To monitor logs during test execution: # Or manually monitor main server logs (includes all tool execution details) tail -f -n 500 logs/mcp_server.log -# Monitor MCP activity logs (tool calls and completions) +# Monitor MCP activity logs (tool calls and completions) tail -f logs/mcp_activity.log # Check log file sizes (logs rotate at 20MB) @@ -149,4 +149,4 @@ python -m pytest -xvs python communication_simulator_test.py ``` -Remember: All tests must pass before submitting a PR. See the [Contributing Guide](./contributions.md) for complete requirements. \ No newline at end of file +Remember: All tests must pass before submitting a PR. See the [Contributing Guide](./contributions.md) for complete requirements. diff --git a/docs/tools/challenge.md b/docs/tools/challenge.md index 5101e742..27bd9f2d 100644 --- a/docs/tools/challenge.md +++ b/docs/tools/challenge.md @@ -1,6 +1,6 @@ # challenge - Challenge an approach or validate ideas with confidence -The `challenge` tool encourages thoughtful critical thinking instead of automatic agreement with the dreaded **You're absolutely right!** responses - especially +The `challenge` tool encourages thoughtful critical thinking instead of automatic agreement with the dreaded **You're absolutely right!** responses - especially when you're not. This tool wraps your comment with instructions that prompt critical thinking and honest analysis instead of blind agreement. ## Quick Example @@ -13,8 +13,8 @@ challenge but do we even need all this extra caching because it'll just slow the challenge I don't think this approach solves my original complaint ``` -Normally, your favorite coding agent will enthusiastically reply with **β€œYou’re absolutely right!”**β€”then proceed to -reverse the _correct_ strategy entirely, without stopping to consider that you might actually be wrong, missing the +Normally, your favorite coding agent will enthusiastically reply with **β€œYou’re absolutely right!”**β€”then proceed to +reverse the _correct_ strategy entirely, without stopping to consider that you might actually be wrong, missing the bigger picture or ignoring architectural constraints. `challenge` fixes this. Claude can even _detect_ when you're challenging something and automatically invokes this tool @@ -32,4 +32,4 @@ AI assistants sometimes tend to agree too readily. The challenge tool helps you: - Get genuine critical evaluation of your ideas - Challenge assumptions constructively - Receive honest feedback on proposals -- Validate approaches with thoughtful analysis \ No newline at end of file +- Validate approaches with thoughtful analysis diff --git a/docs/tools/codereview.md b/docs/tools/codereview.md index 0609840f..fb290c13 100644 --- a/docs/tools/codereview.md +++ b/docs/tools/codereview.md @@ -35,7 +35,7 @@ This tool particularly benefits from Gemini Pro or Flash models due to their 1M ``` Perform a codereview with gemini pro and review auth.py for security issues and potential vulnerabilities. -I need an actionable plan but break it down into smaller quick-wins that we can implement and test rapidly +I need an actionable plan but break it down into smaller quick-wins that we can implement and test rapidly ``` ## Pro Tip: Multiple Parallel Reviews @@ -44,7 +44,7 @@ I need an actionable plan but break it down into smaller quick-wins that we can ``` Start separate sub-tasks for codereview one with o3 finding critical issues and one with flash finding low priority issues -and quick-wins and give me the final single combined review highlighting only the critical issues +and quick-wins and give me the final single combined review highlighting only the critical issues ``` The above prompt will simultaneously run two separate `codereview` tools with two separate models and combine the output into a single summary for you to consume. @@ -116,7 +116,7 @@ The above prompt will simultaneously run two separate `codereview` tools with tw Issues are categorized and prioritized: - **πŸ”΄ CRITICAL**: Security vulnerabilities, crashes, data corruption -- **🟠 HIGH**: Logic errors, performance issues, reliability problems +- **🟠 HIGH**: Logic errors, performance issues, reliability problems - **🟑 MEDIUM**: Code smells, maintainability issues, minor bugs - **🟒 LOW**: Style issues, documentation, minor improvements diff --git a/docs/tools/consensus.md b/docs/tools/consensus.md index 2307d892..b3b37de4 100644 --- a/docs/tools/consensus.md +++ b/docs/tools/consensus.md @@ -35,24 +35,24 @@ The following is a hypothetical example designed to demonstrate how one consensu **For/Against Analysis:** ``` -Use zen consensus with flash taking a supportive stance and pro being critical to evaluate whether +Use zen consensus with flash taking a supportive stance and pro being critical to evaluate whether we should migrate from REST to GraphQL for our API ``` **Multi-Model Technical Decision:** ``` -Get consensus from o3, flash, and pro on our new authentication architecture. Have o3 focus on +Get consensus from o3, flash, and pro on our new authentication architecture. Have o3 focus on security implications, flash on implementation speed, and pro stay neutral for overall assessment ``` **Natural Language Stance Assignment:** ``` -Use consensus tool with gemini being "for" the proposal and grok being "against" to debate +Use consensus tool with gemini being "for" the proposal and grok being "against" to debate whether we should adopt microservices architecture ``` ``` -I want to work on module X and Y, unsure which is going to be more popular with users of my app. +I want to work on module X and Y, unsure which is going to be more popular with users of my app. Get a consensus from gemini supporting the idea for implementing X, grok opposing it, and flash staying neutral ``` diff --git a/docs/tools/debug.md b/docs/tools/debug.md index 383a309b..c72a90e3 100644 --- a/docs/tools/debug.md +++ b/docs/tools/debug.md @@ -2,8 +2,8 @@ **Step-by-step investigation followed by expert debugging assistance** -The `debug` workflow guides Claude through a systematic investigation process where Claude performs methodical code -examination, evidence collection, and hypothesis formation across multiple steps. Once the investigation is complete, +The `debug` workflow guides Claude through a systematic investigation process where Claude performs methodical code +examination, evidence collection, and hypothesis formation across multiple steps. Once the investigation is complete, the tool provides expert analysis from the selected AI model (optionally) based on all gathered findings. ## Example Prompts @@ -17,7 +17,7 @@ You can also ask it to debug on its own, no external model required (**recommend Use debug tool to find out why the app is crashing, here are some app logs [paste app logs] and a crash trace: [paste crash trace] ``` -## How It Works +## How It Works The debug tool implements a **systematic investigation methodology** where Claude is guided through structured debugging steps: @@ -29,10 +29,10 @@ The debug tool implements a **systematic investigation methodology** where Claud 5. **Completion**: Once investigation is thorough, Claude signals completion **Expert Analysis Phase:** -After Claude completes the investigation, it automatically calls the selected AI model with (unless confidence is **certain**, +After Claude completes the investigation, it automatically calls the selected AI model with (unless confidence is **certain**, in which case expert analysis is bypassed): - Complete investigation summary with all steps and findings -- Relevant files and methods identified during investigation +- Relevant files and methods identified during investigation - Final hypothesis and confidence assessment - Error context and supporting evidence - Visual debugging materials if provided diff --git a/docs/tools/docgen.md b/docs/tools/docgen.md index 02ccbec3..25632932 100644 --- a/docs/tools/docgen.md +++ b/docs/tools/docgen.md @@ -161,7 +161,7 @@ Documentation generation excels with analytical models like Gemini Pro or O3, wh - **Objective-C**: /// comments - **Swift**: /// comments - **JavaScript/TypeScript**: /** */ JSDoc style -- **Java**: /** */ Javadoc style +- **Java**: /** */ Javadoc style - **C#**: /// XML documentation comments - **C/C++**: /// for documentation comments - **Go**: // comments above functions/types @@ -201,4 +201,4 @@ Documentation generation excels with analytical models like Gemini Pro or O3, wh - **Use `docgen`** for: Creating comprehensive documentation, adding missing docs, improving existing documentation - **Use `analyze`** for: Understanding code structure without generating documentation - **Use `codereview`** for: Reviewing code quality including documentation completeness -- **Use `refactor`** for: Restructuring code before documentation (cleaner code = better docs) \ No newline at end of file +- **Use `refactor`** for: Restructuring code before documentation (cleaner code = better docs) diff --git a/docs/tools/listmodels.md b/docs/tools/listmodels.md index 59bbb370..37af9245 100644 --- a/docs/tools/listmodels.md +++ b/docs/tools/listmodels.md @@ -49,7 +49,7 @@ The tool displays: β€’ pro (gemini-2.5-pro) - 1M context, thinking modes β€’ flash (gemini-2.0-flash-experimental) - 1M context, ultra-fast -πŸ”Ή OpenAI - βœ… Configured +πŸ”Ή OpenAI - βœ… Configured β€’ o3 (o3) - 200K context, strong reasoning β€’ o3-mini (o3-mini) - 200K context, balanced β€’ o4-mini (o4-mini) - 200K context, latest reasoning @@ -102,4 +102,4 @@ This tool requires no parameters - it simply queries the server configuration an - **Use `listmodels`** for: Understanding available options and model capabilities - **Use `chat`** for: General discussions about which model to use for specific tasks - **Use `version`** for: Server configuration and version information -- **Use other tools** for: Actual analysis, debugging, or development work \ No newline at end of file +- **Use other tools** for: Actual analysis, debugging, or development work diff --git a/docs/tools/planner.md b/docs/tools/planner.md index 548e4c63..45130106 100644 --- a/docs/tools/planner.md +++ b/docs/tools/planner.md @@ -2,7 +2,7 @@ **Break down complex projects into manageable, structured plans through step-by-step thinking** -The `planner` tool helps you break down complex ideas, problems, or projects into multiple manageable steps. Perfect for system design, migration strategies, +The `planner` tool helps you break down complex ideas, problems, or projects into multiple manageable steps. Perfect for system design, migration strategies, architectural planning, and feature development with branching and revision capabilities. ## How It Works @@ -10,7 +10,7 @@ architectural planning, and feature development with branching and revision capa The planner tool enables step-by-step thinking with incremental plan building: 1. **Start with step 1**: Describe the task or problem to plan -2. **Continue building**: Add subsequent steps, building the plan piece by piece +2. **Continue building**: Add subsequent steps, building the plan piece by piece 3. **Revise when needed**: Update earlier decisions as new insights emerge 4. **Branch alternatives**: Explore different approaches when multiple options exist 5. **Continue across sessions**: Resume planning later with full context @@ -18,15 +18,15 @@ The planner tool enables step-by-step thinking with incremental plan building: ## Example Prompts #### Pro Tip -Claude supports `sub-tasks` where it will spawn and run separate background tasks. You can ask Claude to +Claude supports `sub-tasks` where it will spawn and run separate background tasks. You can ask Claude to run Zen's planner with two separate ideas. Then when it's done, use Zen's `consensus` tool to pass the entire plan and get expert perspective from two powerful AI models on which one to work on first! Like performing **AB** testing in one-go without the wait! ``` -Create two separate sub-tasks: in one, using planner tool show me how to add natural language support -to my cooking app. In the other sub-task, use planner to plan how to add support for voice notes to my cooking app. -Once done, start a consensus by sharing both plans to o3 and flash to give me the final verdict. Which one do +Create two separate sub-tasks: in one, using planner tool show me how to add natural language support +to my cooking app. In the other sub-task, use planner to plan how to add support for voice notes to my cooking app. +Once done, start a consensus by sharing both plans to o3 and flash to give me the final verdict. Which one do I implement first? ``` @@ -41,7 +41,7 @@ Using the planner tool, show me how to add CoreData sync to my app, include any ## Key Features - **Step-by-step breakdown**: Build plans incrementally with full context awareness -- **Branching support**: Explore alternative approaches when needed +- **Branching support**: Explore alternative approaches when needed - **Revision capabilities**: Update earlier decisions as new insights emerge - **Multi-session continuation**: Resume planning across multiple sessions with context - **Dynamic adjustment**: Modify step count and approach as planning progresses @@ -76,8 +76,8 @@ Develop a plan using zen for implementing CI/CD pipelines across our development Like all other tools in Zen, you can `continue` with a new plan using the output from a previous plan by simply saying ``` -Continue with zen's consensus tool and find out what o3:for and flash:against think of the plan +Continue with zen's consensus tool and find out what o3:for and flash:against think of the plan ``` -You can mix and match and take one output and feed it into another, continuing from where you left off using a different -tool / model combination. \ No newline at end of file +You can mix and match and take one output and feed it into another, continuing from where you left off using a different +tool / model combination. diff --git a/docs/tools/precommit.md b/docs/tools/precommit.md index 3e8ed0e6..110e73cd 100644 --- a/docs/tools/precommit.md +++ b/docs/tools/precommit.md @@ -54,9 +54,9 @@ How beautiful is that? Claude used `precommit` twice and `codereview` once and a ### Real-world Example -Here's an example where Claude was made to go through changes *it* was asked to make. Its recommendation -to **Publish all Changes** was made after a rigorous set of prompts where it examined the small set of changes -from different angles (performance, bugs, anti-patterns etc). After confirming that unit tests were +Here's an example where Claude was made to go through changes *it* was asked to make. Its recommendation +to **Publish all Changes** was made after a rigorous set of prompts where it examined the small set of changes +from different angles (performance, bugs, anti-patterns etc). After confirming that unit tests were passing, it went ahead with an initial review with O3; came back clean - all good to go. O3 was happy, everything looked great. Then it shared the changes and relevant code with Gemini 2.5 Pro - the following is the outcome: diff --git a/docs/tools/refactor.md b/docs/tools/refactor.md index d5fda944..0fccfd1f 100644 --- a/docs/tools/refactor.md +++ b/docs/tools/refactor.md @@ -38,10 +38,10 @@ The refactor tool excels with models that have large context windows like Gemini "Using zen's refactor decompose the all_in_one_sync_code.swift into maintainable extensions" ``` -πŸ’‘**Example of a powerful prompt** to get the best out of both Claude + Flash's 1M Context: +πŸ’‘**Example of a powerful prompt** to get the best out of both Claude + Flash's 1M Context: ``` "First, think about how the authentication module works, find related classes and find - any code smells, then using zen's refactor ask flash to confirm your findings but ask + any code smells, then using zen's refactor ask flash to confirm your findings but ask it to find additional code smells and any other quick-wins and then fix these issues" ``` @@ -141,7 +141,7 @@ This results in Claude first performing its own expert analysis, encouraging it **Top-Down Analysis:** 1. **File Level**: Identify oversized files that need splitting -2. **Class Level**: Find classes with too many responsibilities +2. **Class Level**: Find classes with too many responsibilities 3. **Function Level**: Locate functions that are too complex or long 4. **Code Quality**: Address smells, modernization, and organization @@ -197,5 +197,5 @@ Analyzes multiple files together to understand: - **Use `refactor`** for: Structural improvements, decomposition, modernization, code organization - **Use `codereview`** for: Finding bugs and security issues with immediate fixes -- **Use `analyze`** for: Understanding code without making change recommendations -- **Use `debug`** for: Solving specific runtime issues rather than structural problems \ No newline at end of file +- **Use `analyze`** for: Understanding code without making change recommendations +- **Use `debug`** for: Solving specific runtime issues rather than structural problems diff --git a/docs/tools/secaudit.md b/docs/tools/secaudit.md index 0e4d2797..a774102f 100644 --- a/docs/tools/secaudit.md +++ b/docs/tools/secaudit.md @@ -2,11 +2,11 @@ **Systematic OWASP-based security assessment with compliance evaluation through workflow-driven investigation** -The `secaudit` tool provides comprehensive security auditing capabilities with systematic OWASP Top 10 assessment, compliance framework evaluation, -and threat modeling. This workflow tool guides Claude through methodical security investigation steps with forced pauses between each step to ensure +The `secaudit` tool provides comprehensive security auditing capabilities with systematic OWASP Top 10 assessment, compliance framework evaluation, +and threat modeling. This workflow tool guides Claude through methodical security investigation steps with forced pauses between each step to ensure thorough vulnerability assessment, security pattern analysis, and compliance verification before providing expert analysis. -**Important**: AI models may not identify all security vulnerabilities. Always perform additional manual security reviews, +**Important**: AI models may not identify all security vulnerabilities. Always perform additional manual security reviews, penetration testing, and verification. ## How the Workflow Works @@ -41,7 +41,7 @@ Perform a secaudit with o3 on this e-commerce web application focusing on paymen ``` ``` -Use secaudit to conduct a comprehensive security audit of the authentication system, threat level high, focus on enterprise +Use secaudit to conduct a comprehensive security audit of the authentication system, threat level high, focus on enterprise security patterns and HIPAA compliance ``` @@ -50,9 +50,9 @@ security patterns and HIPAA compliance **You can run parallel security audits for different application components:** ``` -Start separate sub-tasks, in one start a secaudit for critical payment processing components focusing on PCI DSS with gemini pro, -and in the other for user management focusing on OWASP authentication vulnerabilities with o4-mini, then combine into a unified -security remediation plan using planner +Start separate sub-tasks, in one start a secaudit for critical payment processing components focusing on PCI DSS with gemini pro, +and in the other for user management focusing on OWASP authentication vulnerabilities with o4-mini, then combine into a unified +security remediation plan using planner ``` ## Key Features @@ -164,31 +164,31 @@ Systematic assessment includes: **Comprehensive E-commerce Security Audit:** ``` -"Conduct a comprehensive secaudit with gemini pro for our Node.js e-commerce platform, threat level high, +"Conduct a comprehensive secaudit with gemini pro for our Node.js e-commerce platform, threat level high, compliance requirements PCI DSS and SOC2, focus on payment processing security" ``` **Authentication System Security Review:** ``` -"Use o3 to perform secaudit on authentication microservice, focus on authentication, +"Use o3 to perform secaudit on authentication microservice, focus on authentication, threat level critical, check for OWASP A07 and multi-factor authentication implementation" ``` **API Security Assessment:** ``` -"Secaudit our REST API gateway with gemini pro, audit focus api_security, +"Secaudit our REST API gateway with gemini pro, audit focus api_security, compliance requirements GDPR, threat level medium" ``` **Infrastructure Security Review:** ``` -"Perform secaudit on Kubernetes deployment manifests with o3, focus infrastructure, +"Perform secaudit on Kubernetes deployment manifests with o3, focus infrastructure, threat level high, include container security and network policies" ``` **Quick Security Scan:** ``` -"Fast secaudit of user registration flow with flash, focus authentication, +"Fast secaudit of user registration flow with flash, focus authentication, severity filter critical and high only" ``` diff --git a/docs/tools/testgen.md b/docs/tools/testgen.md index 7539b217..1edb510d 100644 --- a/docs/tools/testgen.md +++ b/docs/tools/testgen.md @@ -217,4 +217,4 @@ For UI components and visual elements: - **Use `testgen`** for: Creating comprehensive test suites, filling test coverage gaps, testing new features - **Use `debug`** for: Diagnosing specific test failures or runtime issues - **Use `codereview`** for: Reviewing existing test quality and coverage -- **Use `analyze`** for: Understanding existing test structure without generating new tests \ No newline at end of file +- **Use `analyze`** for: Understanding existing test structure without generating new tests diff --git a/docs/tools/thinkdeep.md b/docs/tools/thinkdeep.md index 43c3c9af..4a449650 100644 --- a/docs/tools/thinkdeep.md +++ b/docs/tools/thinkdeep.md @@ -11,7 +11,7 @@ The `thinkdeep` tool provides extended reasoning capabilities, offering a second ## Example Prompt ``` -Think deeper about my authentication design with pro using max thinking mode and brainstorm to come up +Think deeper about my authentication design with pro using max thinking mode and brainstorm to come up with the best architecture for my project ``` diff --git a/docs/tools/tracer.md b/docs/tools/tracer.md index 9fffab73..e69fec47 100644 --- a/docs/tools/tracer.md +++ b/docs/tools/tracer.md @@ -11,7 +11,7 @@ The `tracer` tool is a specialized prompt-generation tool that creates structure - Detailed branching analysis and side effects - Shows when and how functions are called throughout the system -**`dependencies` Mode**: For classes/modules/protocols +**`dependencies` Mode**: For classes/modules/protocols - Maps bidirectional dependencies and structural relationships - Identifies coupling and architectural dependencies - Shows how components interact and depend on each other @@ -167,4 +167,4 @@ The `tracer` tool works best when combined with other analysis tools: - **Use `tracer`** for: Creating structured analysis prompts, systematic code exploration planning - **Use `analyze`** for: Direct code analysis without prompt generation - **Use `debug`** for: Specific runtime error investigation -- **Use `chat`** for: Open-ended code discussions and exploration \ No newline at end of file +- **Use `chat`** for: Open-ended code discussions and exploration diff --git a/docs/tools/version.md b/docs/tools/version.md index 51d68dd0..06cebaa6 100644 --- a/docs/tools/version.md +++ b/docs/tools/version.md @@ -63,7 +63,7 @@ The tool provides: πŸ› οΈ Available Tools (12): β€’ chat - General development chat & collaborative thinking -β€’ thinkdeep - Extended reasoning partner +β€’ thinkdeep - Extended reasoning partner β€’ consensus - Multi-model perspective gathering β€’ codereview - Professional code review β€’ precommit - Pre-commit validation @@ -124,4 +124,4 @@ This tool requires no parameters - it provides comprehensive server information - **Use `version`** for: Server diagnostics, configuration verification, troubleshooting - **Use `listmodels`** for: Model availability and capability information - **Use other tools** for: Actual development and analysis tasks -- **Use with support**: Essential information for getting help with issues \ No newline at end of file +- **Use with support**: Essential information for getting help with issues diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d09ec6b9..25e4eb2e 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -117,4 +117,4 @@ If the problem persists after trying these steps: wsl --install -d Ubuntu ``` -Then follow the standard setup inside WSL2. \ No newline at end of file +Then follow the standard setup inside WSL2. diff --git a/docs/vcr-testing.md b/docs/vcr-testing.md index eda9ad12..561f55aa 100644 --- a/docs/vcr-testing.md +++ b/docs/vcr-testing.md @@ -18,7 +18,7 @@ from tests.transport_helpers import inject_transport # Simple one-line setup with automatic transport injection def test_expensive_api_call(monkeypatch): inject_transport(monkeypatch, "tests/openai_cassettes/my_test.json") - + # Make API calls - automatically recorded/replayed with PII sanitization result = await chat_tool.execute({"prompt": "2+2?", "model": "o3-pro"}) ``` @@ -39,7 +39,7 @@ from tests.transport_helpers import inject_transport async def test_with_recording(monkeypatch): # One-line setup - handles all transport injection complexity inject_transport(monkeypatch, "tests/openai_cassettes/my_test.json") - + # Use API normally - recording/replay happens transparently result = await chat_tool.execute({"prompt": "2+2?", "model": "o3-pro"}) ``` @@ -125,4 +125,3 @@ For implementation details, see: - `tests/http_transport_recorder.py` - Core transport implementation - `tests/pii_sanitizer.py` - Sanitization patterns and logic - `tests/transport_helpers.py` - Simplified test integration - diff --git a/docs/wsl-setup.md b/docs/wsl-setup.md index c417ef19..e37d61ad 100644 --- a/docs/wsl-setup.md +++ b/docs/wsl-setup.md @@ -25,7 +25,7 @@ npm install -g @anthropic-ai/claude-code ```bash # Navigate to your home directory or preferred location in WSL cd ~ - + # Clone the repository git clone https://github.com/BeehiveInnovations/zen-mcp-server.git cd zen-mcp-server @@ -42,7 +42,7 @@ npm install -g @anthropic-ai/claude-code ```bash # List configured MCP servers claude mcp list - + # You should see 'zen' listed in the output # If not, the setup script will provide the correct configuration ``` @@ -76,4 +76,4 @@ cat ~/.claude.json | grep -A 10 "zen" ### Performance Tip -For best performance, keep your zen-mcp-server directory in the WSL filesystem (e.g., `~/zen-mcp-server`) rather than in the Windows filesystem (`/mnt/c/...`). \ No newline at end of file +For best performance, keep your zen-mcp-server directory in the WSL filesystem (e.g., `~/zen-mcp-server`) rather than in the Windows filesystem (`/mnt/c/...`). diff --git a/examples/claude_config_macos.json b/examples/claude_config_macos.json index c1657aff..8bcdd450 100644 --- a/examples/claude_config_macos.json +++ b/examples/claude_config_macos.json @@ -8,4 +8,4 @@ "args": ["/path/to/zen-mcp-server/server.py"] } } -} \ No newline at end of file +} diff --git a/examples/claude_config_wsl.json b/examples/claude_config_wsl.json index b0dae6d2..05d33789 100644 --- a/examples/claude_config_wsl.json +++ b/examples/claude_config_wsl.json @@ -11,4 +11,4 @@ ] } } -} \ No newline at end of file +} diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000..fff8092e --- /dev/null +++ b/flake.lock @@ -0,0 +1,61 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1755027561, + "narHash": "sha256-IVft239Bc8p8Dtvf7UAACMG5P3ZV+3/aO28gXpGtMXI=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "005433b926e16227259a1843015b5b2b7f7d1fc3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000..b8355b55 --- /dev/null +++ b/flake.nix @@ -0,0 +1,64 @@ +{ + description = "Zen MCP Server"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + }; + + outputs = { nixpkgs, flake-utils, ... }: + flake-utils.lib.eachDefaultSystem (system: let + pkgs = import nixpkgs { inherit system; }; + python = pkgs.python312; + in { + packages.default = python.pkgs.buildPythonApplication { + pname = "zen-mcp-server"; + version = "1.1.0"; + src = ./.; + format = "pyproject"; + + nativeBuildInputs = with python.pkgs; [ + setuptools + setuptools-scm + wheel + ]; + propagatedBuildInputs = with python.pkgs; [ + mcp google-genai openai pydantic python-dotenv + ]; + + # If packages aren't in nixpkgs, override here + # postInstall = "ln -s $out/bin/server.py $out/bin/zen-mcp-server"; + }; + + devShells.default = pkgs.mkShell { + packages = [ + python + python.pkgs.pip + python.pkgs.virtualenv + # Basic tools + pkgs.git + ] ++ (with python.pkgs; [ + # Only basic packages from nixpkgs + pytest pytest-mock black ruff isort setuptools wheel + ]); + + shellHook = '' + if [ ! -d ".nix-venv" ]; then + echo "Setting up Python environment..." + python -m venv .nix-venv --quiet + source .nix-venv/bin/activate + pip install -q --upgrade pip + pip install -q -e . + pip install -q pytest-asyncio python-semantic-release + deactivate + fi + source .nix-venv/bin/activate + ''; + + # Ensure proper shared library paths + LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [ + pkgs.stdenv.cc.cc.lib + ]; + }; + }); +} diff --git a/pytest.ini b/pytest.ini index ce1a4f2b..d90c1eb8 100644 --- a/pytest.ini +++ b/pytest.ini @@ -4,9 +4,9 @@ python_files = test_*.py python_classes = Test* python_functions = test_* asyncio_mode = auto -addopts = +addopts = -v --strict-markers --tb=short markers = - integration: marks tests as integration tests that make real API calls with local-llama (free to run) \ No newline at end of file + integration: marks tests as integration tests that make real API calls with local-llama (free to run) diff --git a/requirements.txt b/requirements.txt index 6e2b7135..d6887cce 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,4 +8,4 @@ importlib-resources>=5.0.0; python_version<"3.9" # Development dependencies (install with pip install -r requirements-dev.txt) # pytest>=7.4.0 # pytest-asyncio>=0.21.0 -# pytest-mock>=3.11.0 \ No newline at end of file +# pytest-mock>=3.11.0 diff --git a/run-server.ps1 b/run-server.ps1 index b22b7c14..fa85524c 100644 --- a/run-server.ps1 +++ b/run-server.ps1 @@ -94,8 +94,8 @@ param( # ============================================================================ # Zen MCP Server Setup Script for Windows -# -# A Windows-compatible setup script that handles environment setup, +# +# A Windows-compatible setup script that handles environment setup, # dependency installation, and configuration. # ============================================================================ @@ -103,7 +103,7 @@ param( $ErrorActionPreference = "Stop" # ---------------------------------------------------------------------------- -# Constants and Configuration +# Constants and Configuration # ---------------------------------------------------------------------------- $script:VENV_PATH = ".zen_venv" @@ -161,11 +161,11 @@ function Test-Command { # Alternative method to force remove locked directories function Remove-LockedDirectory { param([string]$Path) - + if (!(Test-Path $Path)) { return $true } - + try { # Try standard removal first Remove-Item -Recurse -Force $Path -ErrorAction Stop @@ -173,7 +173,7 @@ function Remove-LockedDirectory { } catch { Write-Warning "Standard removal failed, trying alternative methods..." - + # Method 1: Use takeown and icacls to force ownership try { Write-Info "Attempting to take ownership of locked files..." @@ -185,25 +185,25 @@ function Remove-LockedDirectory { catch { Write-Warning "Ownership method failed" } - + # Method 2: Rename and schedule for deletion on reboot try { $tempName = "$Path.delete_$(Get-Random)" Write-Info "Renaming to: $tempName (will be deleted on next reboot)" Rename-Item $Path $tempName -ErrorAction Stop - + # Schedule for deletion on reboot using movefile if (Get-Command "schtasks" -ErrorAction SilentlyContinue) { Write-Info "Scheduling for deletion on next reboot..." } - + Write-Warning "Environment renamed to $tempName and will be deleted on next reboot" return $true } catch { Write-Warning "Rename method failed" } - + # If all methods fail, return false return $false } @@ -215,23 +215,23 @@ function Manage-ConfigBackups { [string]$ConfigFilePath, [int]$MaxBackups = 3 ) - + if (!(Test-Path $ConfigFilePath)) { Write-Warning "Configuration file not found: $ConfigFilePath" return $null } - + try { # Create new backup with timestamp $timestamp = Get-Date -Format 'yyyyMMdd_HHmmss' $backupPath = "$ConfigFilePath.backup_$timestamp" Copy-Item $ConfigFilePath $backupPath -ErrorAction Stop - + # Find all existing backups for this config file $configDir = Split-Path $ConfigFilePath -Parent $configFileName = Split-Path $ConfigFilePath -Leaf $backupPattern = "$configFileName.backup_*" - + $existingBackups = Get-ChildItem -Path $configDir -Filter $backupPattern -ErrorAction SilentlyContinue | Sort-Object LastWriteTime -Descending @@ -249,7 +249,7 @@ function Manage-ConfigBackups { } Write-Success "Backup retention: kept $MaxBackups most recent backups" } - + Write-Success "Backup created: $(Split-Path $backupPath -Leaf)" return $backupPath @@ -280,11 +280,11 @@ function Get-Version { # Clear Python cache files function Clear-PythonCache { Write-Info "Clearing Python cache files..." - + try { # Remove .pyc files Get-ChildItem -Path . -Recurse -Filter "*.pyc" -ErrorAction SilentlyContinue | Remove-Item -Force - + # Remove __pycache__ directories Get-ChildItem -Path . -Recurse -Name "__pycache__" -Directory -ErrorAction SilentlyContinue | ForEach-Object { Remove-Item -Path $_ -Recurse -Force } @@ -299,7 +299,7 @@ function Clear-PythonCache { # Get absolute path function Get-AbsolutePath { param([string]$Path) - + if (Test-Path $Path) { # Use Resolve-Path for full resolution return Resolve-Path $Path @@ -330,7 +330,7 @@ function Test-PythonVersion { # Find Python installation function Find-Python { $pythonCandidates = @("python", "python3", "py") - + foreach ($cmd in $pythonCandidates) { if (Test-Command $cmd) { if (Test-PythonVersion $cmd) { @@ -340,7 +340,7 @@ function Find-Python { } } } - + # Try Windows Python Launcher with specific versions $pythonVersions = @("3.12", "3.11", "3.10", "3.9") foreach ($version in $pythonVersions) { @@ -354,7 +354,7 @@ function Find-Python { continue } } - + Write-Error "Python 3.10+ not found. Please install Python from https://python.org" return $null } @@ -364,29 +364,29 @@ function Cleanup-Docker { if (Test-Path $DOCKER_CLEANED_FLAG) { return } - + if (!(Test-Command "docker")) { return } - + try { $null = docker info 2>$null } catch { return } - + $foundArtifacts = $false - + # Define containers to remove $containers = @( "gemini-mcp-server", - "gemini-mcp-redis", + "gemini-mcp-redis", "zen-mcp-server", "zen-mcp-redis", "zen-mcp-log-monitor" ) - + # Remove containers foreach ($container in $containers) { try { @@ -405,7 +405,7 @@ function Cleanup-Docker { # Ignore errors } } - + # Remove images $images = @("gemini-mcp-server:latest", "zen-mcp-server:latest") foreach ($image in $images) { @@ -424,7 +424,7 @@ function Cleanup-Docker { # Ignore errors } } - + # Remove volumes $volumes = @("redis_data", "mcp_logs") foreach ($volume in $volumes) { @@ -443,51 +443,51 @@ function Cleanup-Docker { # Ignore errors } } - + if ($foundArtifacts) { Write-Success "Docker cleanup complete" } - + New-Item -Path $DOCKER_CLEANED_FLAG -ItemType File -Force | Out-Null } # Validate API keys function Test-ApiKeys { Write-Step "Validating API Keys" - + if (!(Test-Path ".env")) { Write-Warning "No .env file found. API keys should be configured." return $false } - + $envContent = Get-Content ".env" $hasValidKey = $false - + $keyPatterns = @{ "GEMINI_API_KEY" = "AIza[0-9A-Za-z-_]{35}" "OPENAI_API_KEY" = "sk-[a-zA-Z0-9]{20}T3BlbkFJ[a-zA-Z0-9]{20}" "XAI_API_KEY" = "xai-[a-zA-Z0-9-_]+" "OPENROUTER_API_KEY" = "sk-or-[a-zA-Z0-9-_]+" } - + foreach ($line in $envContent) { if ($line -match '^([^#][^=]*?)=(.*)$') { $key = $matches[1].Trim() $value = $matches[2].Trim() -replace '^["'']|["'']$', '' - + if ($keyPatterns.ContainsKey($key) -and $value -ne "your_${key.ToLower()}_here" -and $value.Length -gt 10) { Write-Success "Found valid $key" $hasValidKey = $true } } } - + if (!$hasValidKey) { Write-Warning "No valid API keys found in .env file" Write-Info "Please edit .env file with your actual API keys" return $false } - + return $true } @@ -499,11 +499,11 @@ function Test-Uv { # Setup environment using uv-first approach function Initialize-Environment { Write-Step "Setting up Python Environment" - + # Try uv first for faster package management if (Test-Uv) { Write-Info "Using uv for faster package management..." - + if (Test-Path $VENV_PATH) { if ($Force) { Write-Warning "Removing existing environment..." @@ -517,7 +517,7 @@ function Initialize-Environment { } } } - + try { Write-Info "Creating virtual environment with uv..." uv venv $VENV_PATH --python 3.12 @@ -530,23 +530,23 @@ function Initialize-Environment { Write-Warning "uv failed, falling back to venv" } } - + # Fallback to standard venv $pythonCmd = Find-Python if (!$pythonCmd) { throw "Python 3.10+ not found" } - + if (Test-Path $VENV_PATH) { if ($Force) { Write-Warning "Removing existing environment..." try { # Stop any Python processes that might be using the venv Get-Process python* -ErrorAction SilentlyContinue | Where-Object { $_.Path -like "*$VENV_PATH*" } | Stop-Process -Force -ErrorAction SilentlyContinue - + # Wait a moment for processes to terminate Start-Sleep -Seconds 2 - + # Use the robust removal function if (Remove-LockedDirectory $VENV_PATH) { Write-Success "Existing environment removed" @@ -572,7 +572,7 @@ function Initialize-Environment { return Get-AbsolutePath "$VENV_PATH\Scripts\python.exe" } } - + Write-Info "Creating virtual environment with $pythonCmd..." if ($pythonCmd.StartsWith("py ")) { Invoke-Expression "$pythonCmd -m venv $VENV_PATH" @@ -580,11 +580,11 @@ function Initialize-Environment { else { & $pythonCmd -m venv $VENV_PATH } - + if ($LASTEXITCODE -ne 0) { throw "Failed to create virtual environment" } - + Write-Success "Virtual environment created" return Get-AbsolutePath "$VENV_PATH\Scripts\python.exe" } @@ -592,17 +592,17 @@ function Initialize-Environment { # Setup virtual environment (legacy function for compatibility) function Initialize-VirtualEnvironment { Write-Step "Setting up Python Virtual Environment" - + if (!$SkipVenv -and (Test-Path $VENV_PATH)) { if ($Force) { Write-Warning "Removing existing virtual environment..." try { # Stop any Python processes that might be using the venv Get-Process python* -ErrorAction SilentlyContinue | Where-Object { $_.Path -like "*$VENV_PATH*" } | Stop-Process -Force -ErrorAction SilentlyContinue - + # Wait a moment for processes to terminate Start-Sleep -Seconds 2 - + # Use the robust removal function if (Remove-LockedDirectory $VENV_PATH) { Write-Success "Existing environment removed" @@ -628,21 +628,21 @@ function Initialize-VirtualEnvironment { return } } - + if ($SkipVenv) { Write-Warning "Skipping virtual environment setup" return } - + $pythonCmd = Find-Python if (!$pythonCmd) { Write-Error "Python 3.10+ not found. Please install Python from https://python.org" exit 1 } - + Write-Info "Using Python: $pythonCmd" Write-Info "Creating virtual environment..." - + try { if ($pythonCmd.StartsWith("py ")) { Invoke-Expression "$pythonCmd -m venv $VENV_PATH" @@ -650,11 +650,11 @@ function Initialize-VirtualEnvironment { else { & $pythonCmd -m venv $VENV_PATH } - + if ($LASTEXITCODE -ne 0) { throw "Failed to create virtual environment" } - + Write-Success "Virtual environment created" } catch { @@ -670,7 +670,7 @@ function Install-Dependencies { [string]$PythonPath, [switch]$InstallDevDependencies = $false ) - + Write-Step "Installing Dependencies" # Build requirements files list @@ -713,7 +713,7 @@ function Install-Dependencies { # Fallback to pip Write-Info "Installing dependencies with pip..." $pipCmd = Join-Path (Split-Path $PythonPath -Parent) "pip.exe" - + try { # Upgrade pip first & $pipCmd install --upgrade pip | Out-Null @@ -745,12 +745,12 @@ function Install-Dependencies { # Test Docker availability and requirements function Test-DockerRequirements { Write-Step "Checking Docker Requirements" - + if (!(Test-Command "docker")) { Write-Error "Docker not found. Please install Docker Desktop from https://docker.com" return $false } - + try { $null = docker version 2>$null Write-Success "Docker is installed and running" @@ -759,7 +759,7 @@ function Test-DockerRequirements { Write-Error "Docker is installed but not running. Please start Docker Desktop." return $false } - + if (!(Test-Command "docker-compose")) { Write-Warning "docker-compose not found. Trying docker compose..." try { @@ -781,9 +781,9 @@ function Test-DockerRequirements { # Build Docker image function Build-DockerImage { param([switch]$Force = $false) - + Write-Step "Building Docker Image" - + # Check if image exists try { $imageExists = docker images --format "{{.Repository}}:{{.Tag}}" | Where-Object { $_ -eq "zen-mcp-server:latest" } @@ -795,7 +795,7 @@ function Build-DockerImage { catch { # Continue if command fails } - + if ($Force -and $imageExists) { Write-Info "Forcing rebuild of Docker image..." try { @@ -805,7 +805,7 @@ function Build-DockerImage { Write-Warning "Could not remove existing image, continuing..." } } - + Write-Info "Building Docker image from Dockerfile..." try { $buildArgs = @() @@ -813,12 +813,12 @@ function Build-DockerImage { # For development builds, we could add specific build args Write-Info "Building with development support..." } - + docker build -t zen-mcp-server:latest . if ($LASTEXITCODE -ne 0) { throw "Docker build failed" } - + Write-Success "Docker image built successfully" return $true } @@ -831,11 +831,11 @@ function Build-DockerImage { # Prepare Docker environment file function Initialize-DockerEnvironment { Write-Step "Preparing Docker Environment" - + # Ensure .env file exists if (!(Test-Path ".env")) { Write-Warning "No .env file found. Creating default .env file..." - + $defaultEnv = @" # API Keys - Replace with your actual keys GEMINI_API_KEY=your_gemini_api_key_here @@ -863,7 +863,7 @@ DEFAULT_THINKING_MODE_THINKDEEP=high #MAX_MCP_OUTPUT_TOKENS= #TZ=UTC "@ - + $defaultEnv | Out-File -FilePath ".env" -Encoding UTF8 Write-Success "Default .env file created" Write-Warning "Please edit .env file with your actual API keys" @@ -871,25 +871,25 @@ DEFAULT_THINKING_MODE_THINKDEEP=high else { Write-Success ".env file exists" } - + # Create logs directory for volume mount Initialize-Logging - + return $true } # Start Docker services function Start-DockerServices { param([switch]$Follow = $false) - + Write-Step "Starting Docker Services" - + # Check if docker-compose.yml exists if (!(Test-Path "docker-compose.yml")) { Write-Error "docker-compose.yml not found in current directory" return $false } - + try { # Stop any existing services Write-Info "Stopping any existing services..." @@ -899,7 +899,7 @@ function Start-DockerServices { else { docker compose down 2>$null } - + # Start services Write-Info "Starting Zen MCP Server with Docker Compose..." if (Test-Command "docker-compose") { @@ -918,11 +918,11 @@ function Start-DockerServices { docker compose up -d --build } } - + if ($LASTEXITCODE -ne 0) { throw "Failed to start Docker services" } - + if (!$Follow) { Write-Success "Docker services started successfully" Write-Info "Container name: zen-mcp-server" @@ -932,7 +932,7 @@ function Start-DockerServices { Write-Host "To stop: " -NoNewline Write-Host "docker-compose down" -ForegroundColor Yellow } - + return $true } catch { @@ -967,7 +967,7 @@ function Get-DockerStatus { # Setup logging directory function Initialize-Logging { Write-Step "Setting up Logging" - + if (!(Test-Path $LOG_DIR)) { New-Item -ItemType Directory -Path $LOG_DIR -Force | Out-Null Write-Success "Logs directory created" @@ -980,17 +980,17 @@ function Initialize-Logging { # Check Docker function Test-Docker { Write-Step "Checking Docker Setup" - + if ($SkipDocker) { Write-Warning "Skipping Docker checks" return } - + if (Test-Command "docker") { try { $null = docker version 2>$null Write-Success "Docker is installed and running" - + if (Test-Command "docker-compose") { Write-Success "Docker Compose is available" } @@ -1070,10 +1070,10 @@ $script:DockerMcpConfig = @{ # Generate Docker MCP configuration using docker run (recommended for all clients) function Get-DockerMcpConfigRun { param([string]$ServerPath) - + $scriptDir = Split-Path $ServerPath -Parent $envFile = Join-Path $scriptDir ".env" - + return @{ command = "docker" args = @("run", "--rm", "-i", "--env-file", $envFile, "zen-mcp-server:latest", "python", "server.py") @@ -1094,7 +1094,7 @@ function Get-PythonMcpConfig { # Check if client uses mcp.json format with servers structure function Test-McpJsonFormat { param([hashtable]$Client) - + $configFileName = Split-Path $Client.ConfigPath -Leaf return $configFileName -eq "mcp.json" } @@ -1102,7 +1102,7 @@ function Test-McpJsonFormat { # Check if client uses the new VS Code Insiders format (servers instead of mcpServers) function Test-VSCodeInsidersFormat { param([hashtable]$Client) - + return $Client.IsVSCodeInsiders -eq $true -and $Client.ConfigJsonPath -eq "servers.zen" } @@ -1114,7 +1114,7 @@ function Get-ExistingMcpConfigType { [Parameter(Mandatory = $true)] [string]$ConfigPath ) - + if (!(Test-Path $ConfigPath)) { return @{ Exists = $false @@ -1122,7 +1122,7 @@ function Get-ExistingMcpConfigType { Details = "No configuration found" } } - + try { $content = Get-Content $ConfigPath -Raw | ConvertFrom-Json -ErrorAction SilentlyContinue if (!$content) { @@ -1132,12 +1132,12 @@ function Get-ExistingMcpConfigType { Details = "Invalid JSON configuration" } } - + # Navigate to zen configuration $pathParts = $Client.ConfigJsonPath.Split('.') $zenKey = $pathParts[-1] $parentPath = $pathParts[0..($pathParts.Length - 2)] - + $targetObject = $content foreach ($key in $parentPath) { if (!$targetObject.PSObject.Properties[$key]) { @@ -1149,7 +1149,7 @@ function Get-ExistingMcpConfigType { } $targetObject = $targetObject.$key } - + if (!$targetObject.PSObject.Properties[$zenKey]) { return @{ Exists = $false @@ -1157,14 +1157,14 @@ function Get-ExistingMcpConfigType { Details = "Zen configuration not found" } } - + $zenConfig = $targetObject.$zenKey - + # Analyze configuration type if ($zenConfig.command -eq "docker") { $dockerType = "Unknown" $details = "Docker configuration" - + if ($zenConfig.args -and $zenConfig.args.Count -gt 0) { if ($zenConfig.args[0] -eq "run") { $dockerType = "Docker Run" @@ -1178,7 +1178,7 @@ function Get-ExistingMcpConfigType { $details = "Docker ($($zenConfig.args[0]))" } } - + return @{ Exists = $true Type = "Docker" @@ -1191,7 +1191,7 @@ function Get-ExistingMcpConfigType { elseif ($zenConfig.command -and $zenConfig.command.EndsWith("python.exe")) { $pythonType = "Python" $details = "Python virtual environment" - + if ($zenConfig.command.Contains(".zen_venv")) { $details = "Python (zen virtual environment)" } @@ -1201,7 +1201,7 @@ function Get-ExistingMcpConfigType { else { $details = "Python (system installation)" } - + return @{ Exists = $true Type = "Python" @@ -1267,7 +1267,7 @@ function Configure-McpClient { Write-Warning "$($Client.Name) user directory not found. Skipping." return } - + # Find most recent settings.json (default or profile) $settingsFiles = @() $defaultSettings = $configPath @@ -1277,7 +1277,7 @@ function Configure-McpClient { LastModified = (Get-Item $defaultSettings).LastWriteTime } } - + $profilesPath = Join-Path $userPath "profiles" if (Test-Path $profilesPath) { Get-ChildItem $profilesPath -Directory | ForEach-Object { @@ -1290,7 +1290,7 @@ function Configure-McpClient { } } } - + if ($settingsFiles.Count -gt 0) { $configPath = ($settingsFiles | Sort-Object LastModified -Descending | Select-Object -First 1).Path } @@ -1303,7 +1303,7 @@ function Configure-McpClient { Write-Warning "$($Client.Name) user directory not found. Skipping." return } - + # Find most recent mcp.json (default or profile) $mcpFiles = @() $defaultMcp = $configPath @@ -1313,7 +1313,7 @@ function Configure-McpClient { LastModified = (Get-Item $defaultMcp).LastWriteTime } } - + $profilesPath = Join-Path $userPath "profiles" if (Test-Path $profilesPath) { Get-ChildItem $profilesPath -Directory | ForEach-Object { @@ -1326,7 +1326,7 @@ function Configure-McpClient { } } } - + if ($mcpFiles.Count -gt 0) { $configPath = ($mcpFiles | Sort-Object LastModified -Descending | Select-Object -First 1).Path } @@ -1335,12 +1335,12 @@ function Configure-McpClient { # Check if already configured and analyze existing configuration $existingConfig = Get-ExistingMcpConfigType -Client $Client -ConfigPath $configPath $newConfigType = if ($UseDocker) { "Docker" } else { "Python" } - + if ($existingConfig.Exists) { Write-Info "Found existing Zen MCP configuration in $($Client.Name)" Write-Info " Current: $($existingConfig.Details)" Write-Info " New: $newConfigType configuration" - + if ($existingConfig.Type -eq $newConfigType) { Write-Warning "Same configuration type ($($existingConfig.Type)) already exists" $response = Read-Host "`nOverwrite existing $($existingConfig.Type) configuration? (y/N)" @@ -1350,12 +1350,12 @@ function Configure-McpClient { Write-Info " Replacing: $($existingConfig.Type) β†’ $newConfigType" $response = Read-Host "`nReplace $($existingConfig.Type) with $newConfigType configuration? (y/N)" } - + if ($response -ne 'y' -and $response -ne 'Y') { Write-Info "Keeping existing configuration in $($Client.Name)" return } - + Write-Info "Proceeding with configuration update..." } else { @@ -1383,7 +1383,7 @@ function Configure-McpClient { $config = New-Object PSObject $usesMcpJsonFormat = Test-McpJsonFormat -Client $Client $usesVSCodeInsidersFormat = Test-VSCodeInsidersFormat -Client $Client - + if (Test-Path $configPath) { $fileContent = Get-Content $configPath -Raw if ($fileContent.Trim()) { @@ -1391,7 +1391,7 @@ function Configure-McpClient { } if ($null -eq $config) { $config = New-Object PSObject } } - + # Initialize structure for mcp.json format files if they don't exist or are empty if ($usesMcpJsonFormat) { if ($usesVSCodeInsidersFormat) { @@ -1407,7 +1407,7 @@ function Configure-McpClient { } } } - + # Initialize MCP structure for VS Code settings.json if it doesn't exist if ($Client.IsVSCode -and $Client.ConfigJsonPath.StartsWith("mcp.")) { if (!$config.PSObject.Properties["mcp"]) { @@ -1419,7 +1419,7 @@ function Configure-McpClient { } # Generate server config - $serverConfig = if ($UseDocker) { + $serverConfig = if ($UseDocker) { # Use docker run for all clients (more reliable than docker exec) Get-DockerMcpConfigRun $ServerPath } @@ -1431,7 +1431,7 @@ function Configure-McpClient { $pathParts = $Client.ConfigJsonPath.Split('.') $zenKey = $pathParts[-1] $parentPath = $pathParts[0..($pathParts.Length - 2)] - + $targetObject = $config foreach ($key in $parentPath) { if (!$targetObject.PSObject.Properties[$key]) { @@ -1462,14 +1462,14 @@ function Invoke-McpClientConfiguration { [string]$PythonPath = "", [string]$ServerPath = "" ) - + Write-Step "Checking Client Integrations" - + # Configure GUI clients foreach ($client in $script:McpClientDefinitions) { Configure-McpClient -Client $client -UseDocker $UseDocker -PythonPath $PythonPath -ServerPath $ServerPath } - + # Handle CLI tools separately (they don't follow JSON config pattern) if (!$UseDocker) { Test-ClaudeCliIntegration $PythonPath $ServerPath @@ -1481,13 +1481,13 @@ function Invoke-McpClientConfiguration { # Keep existing CLI integration functions function Test-ClaudeCliIntegration { param([string]$PythonPath, [string]$ServerPath) - + if (!(Test-Command "claude")) { return } - + Write-Info "Claude CLI detected - checking configuration..." - + try { $claudeConfig = claude config list 2>$null if ($claudeConfig -match "zen") { @@ -1506,21 +1506,21 @@ function Test-ClaudeCliIntegration { function Test-GeminiCliIntegration { param([string]$ScriptDir) - + $zenWrapper = Join-Path $ScriptDir "zen-mcp-server.cmd" - + # Check if Gemini settings file exists (Windows path) $geminiConfig = "$env:USERPROFILE\.gemini\settings.json" if (!(Test-Path $geminiConfig)) { return } - + # Check if zen is already configured $configContent = Get-Content $geminiConfig -Raw -ErrorAction SilentlyContinue if ($configContent -and $configContent -match '"zen"') { return } - + # Ask user if they want to add Zen to Gemini CLI Write-Host "" $response = Read-Host "Configure Zen for Gemini CLI? (y/N)" @@ -1528,7 +1528,7 @@ function Test-GeminiCliIntegration { Write-Info "Skipping Gemini CLI integration" return } - + # Ensure wrapper script exists if (!(Test-Path $zenWrapper)) { Write-Info "Creating wrapper script for Gemini CLI..." @@ -1541,38 +1541,38 @@ if exist ".zen_venv\Scripts\python.exe" ( python server.py %* ) "@ | Out-File -FilePath $zenWrapper -Encoding ASCII - + Write-Success "Created zen-mcp-server.cmd wrapper script" } - + # Update Gemini settings Write-Info "Updating Gemini CLI configuration..." - + try { # Create backup with retention management $backupPath = Manage-ConfigBackups $geminiConfig - + # Read existing config or create new one $config = @{} if (Test-Path $geminiConfig) { $config = Get-Content $geminiConfig -Raw | ConvertFrom-Json } - + # Ensure mcpServers exists if (!$config.mcpServers) { $config | Add-Member -MemberType NoteProperty -Name "mcpServers" -Value @{} -Force } - + # Add zen server $zenConfig = @{ command = $zenWrapper } - + $config.mcpServers | Add-Member -MemberType NoteProperty -Name "zen" -Value $zenConfig -Force - + # Write updated config $config | ConvertTo-Json -Depth 10 | Out-File $geminiConfig -Encoding UTF8 - + Write-Success "Successfully configured Gemini CLI" Write-Host " Config: $geminiConfig" -ForegroundColor Gray Write-Host " Restart Gemini CLI to use Zen MCP Server" -ForegroundColor Gray @@ -1858,9 +1858,9 @@ function Show-ConfigInstructions { [string]$ServerPath = "", [switch]$UseDocker = $false ) - + Write-Step "Configuration Instructions" - + if ($UseDocker) { Write-Host "Docker Configuration:" -ForegroundColor Yellow Write-Host "The MCP clients have been configured to use Docker containers." -ForegroundColor White @@ -1873,10 +1873,10 @@ function Show-ConfigInstructions { Write-Host "Server Path: $ServerPath" -ForegroundColor Cyan Write-Host "" } - + Write-Host "Supported MCP Clients:" -ForegroundColor Green Write-Host "βœ“ Claude Desktop" -ForegroundColor White - Write-Host "βœ“ Claude CLI" -ForegroundColor White + Write-Host "βœ“ Claude CLI" -ForegroundColor White Write-Host "βœ“ VSCode (with MCP extension)" -ForegroundColor White Write-Host "βœ“ VSCode Insiders" -ForegroundColor White Write-Host "βœ“ Cursor" -ForegroundColor White @@ -1896,9 +1896,9 @@ function Show-SetupInstructions { [string]$ServerPath = "", [switch]$UseDocker = $false ) - + Write-Step "Setup Complete" - + if ($UseDocker) { Write-Success "Zen MCP Server is configured for Docker deployment" Write-Host "Docker command: docker exec -i zen-mcp-server python server.py" -ForegroundColor Cyan @@ -1908,7 +1908,7 @@ function Show-SetupInstructions { Write-Host "Python: $PythonPath" -ForegroundColor Cyan Write-Host "Server: $ServerPath" -ForegroundColor Cyan } - + Write-Host "" Write-Host "MCP clients will automatically connect to the server." -ForegroundColor Green Write-Host "For manual configuration, use the paths shown above." -ForegroundColor Gray @@ -1917,19 +1917,19 @@ function Show-SetupInstructions { # Start the server function Start-Server { Write-Step "Starting Zen MCP Server" - + $pythonPath = "$VENV_PATH\Scripts\python.exe" if (!(Test-Path $pythonPath)) { Write-Error "Python virtual environment not found. Please run setup first." return } - + $serverPath = "server.py" if (!(Test-Path $serverPath)) { Write-Error "Server script not found: $serverPath" return } - + try { Write-Info "Launching server..." & $pythonPath $serverPath @@ -1942,16 +1942,16 @@ function Start-Server { # Follow server logs function Follow-Logs { Write-Step "Following Server Logs" - + $logPath = Join-Path $LOG_DIR $LOG_FILE - + if (!(Test-Path $logPath)) { Write-Warning "Log file not found: $logPath" Write-Info "Starting server to generate logs..." Start-Server return } - + try { Write-Info "Following logs at: $logPath" Write-Host "Press Ctrl+C to stop following logs" @@ -1970,7 +1970,7 @@ function Follow-Logs { # Initialize .env file if it doesn't exist function Initialize-EnvFile { Write-Step "Setting up Environment File" - + if (!(Test-Path ".env")) { Write-Info "Creating default .env file..." @" @@ -2000,7 +2000,7 @@ DEFAULT_THINKING_MODE_THINKDEEP=high #MAX_MCP_OUTPUT_TOKENS= #TZ=UTC "@ | Out-File -FilePath ".env" -Encoding UTF8 - + Write-Success "Default .env file created" Write-Warning "Please edit .env file with your actual API keys" } @@ -2015,14 +2015,14 @@ function Import-EnvFile { Write-Warning "No .env file found" return } - + try { $envContent = Get-Content ".env" -ErrorAction Stop foreach ($line in $envContent) { if ($line -match '^([^#][^=]*?)=(.*)$') { $key = $matches[1].Trim() $value = $matches[2].Trim() -replace '^["'']|["'']$', '' - + # Set environment variable for the current session [Environment]::SetEnvironmentVariable($key, $value, "Process") } @@ -2043,26 +2043,26 @@ function Invoke-DockerWorkflow { Write-Step "Starting Docker Workflow" Write-Host "Zen MCP Server" -ForegroundColor Green Write-Host "=================" -ForegroundColor Cyan - + $version = Get-Version Write-Host "Version: $version" Write-Host "Mode: Docker Container" -ForegroundColor Yellow Write-Host "" - + # Docker setup and validation if (!(Test-DockerRequirements)) { exit 1 } if (!(Initialize-DockerEnvironment)) { exit 1 } - + Import-EnvFile Test-ApiKeys - + if (!(Build-DockerImage -Force:$Force)) { exit 1 } - + # Configure MCP clients for Docker Invoke-McpClientConfiguration -UseDocker $true - + Show-SetupInstructions -UseDocker - + # Start Docker services Write-Step "Starting Zen MCP Server" if ($Follow) { @@ -2070,13 +2070,13 @@ function Invoke-DockerWorkflow { Start-DockerServices -Follow exit 0 } - + if (!(Start-DockerServices)) { exit 1 } - + Write-Host "" Write-Success "Zen MCP Server is running in Docker!" Write-Host "" - + Write-Host "Next steps:" -ForegroundColor Cyan Write-Host "1. Restart your MCP clients (Claude Desktop, etc.)" -ForegroundColor White Write-Host "2. The server is now ready to use" -ForegroundColor White @@ -2095,22 +2095,22 @@ function Invoke-PythonWorkflow { Write-Step "Starting Python Virtual Environment Workflow" Write-Host "Zen MCP Server" -ForegroundColor Green Write-Host "=================" -ForegroundColor Cyan - + $version = Get-Version Write-Host "Version: $version" Write-Host "" - + if (!(Test-Path $VENV_PATH)) { Write-Info "Setting up Python environment for first time..." } - + # Python environment setup Cleanup-Docker Clear-PythonCache Initialize-EnvFile Import-EnvFile Test-ApiKeys - + try { $pythonPath = Initialize-Environment } @@ -2118,7 +2118,7 @@ function Invoke-PythonWorkflow { Write-Error "Failed to setup Python environment: $_" exit 1 } - + try { Install-Dependencies $pythonPath -InstallDevDependencies:$Dev } @@ -2126,19 +2126,19 @@ function Invoke-PythonWorkflow { Write-Error "Failed to install dependencies: $_" exit 1 } - + $serverPath = Get-AbsolutePath "server.py" - + # Configure MCP clients for Python Invoke-McpClientConfiguration -UseDocker $false -PythonPath $pythonPath -ServerPath $serverPath - + Show-SetupInstructions $pythonPath $serverPath Initialize-Logging - + Write-Host "" Write-Host "Logs will be written to: $(Get-AbsolutePath $LOG_DIR)\$LOG_FILE" Write-Host "" - + if ($Follow) { Follow-Logs } @@ -2148,7 +2148,7 @@ function Invoke-PythonWorkflow { Write-Host "To update: git pull, then run .\run-server.ps1 again" -ForegroundColor Yellow Write-Host "" Write-Host "Happy coding! πŸŽ‰" -ForegroundColor Green - + $response = Read-Host "`nStart the server now? (y/N)" if ($response -eq 'y' -or $response -eq 'Y') { Start-Server @@ -2171,12 +2171,12 @@ function Start-MainProcess { Show-Help exit 0 } - + if ($Version) { - Show-Version + Show-Version exit 0 } - + if ($ClearCache) { Clear-PythonCache Write-Success "Cache cleared successfully" @@ -2184,7 +2184,7 @@ function Start-MainProcess { Write-Host "You can now run '.\run-server.ps1' normally" exit 0 } - + if ($Config) { # Setup minimal environment to get paths for config display Write-Info "Setting up environment for configuration display..." diff --git a/run-server.sh b/run-server.sh index 5db53534..7bde5ca1 100755 --- a/run-server.sh +++ b/run-server.sh @@ -81,7 +81,7 @@ clear_python_cache() { # Get cross-platform Python executable path from venv get_venv_python_path() { local venv_path="$1" - + # Convert to absolute path for consistent behavior across shell environments local abs_venv_path abs_venv_path=$(cd "$(dirname "$venv_path")" && pwd)/$(basename "$venv_path") @@ -885,7 +885,7 @@ install_dependencies() { # If pip is still not available after retries, try to bootstrap it if [[ "$pip_available" == false ]]; then print_warning "pip is not available in the Python environment after $max_attempts attempts" - + # Enhanced diagnostic information for debugging print_info "Diagnostic information:" print_info " Python executable: $python_cmd" @@ -893,7 +893,7 @@ install_dependencies() { print_info " Python executable permissions: $(ls -la "$python_cmd" 2>/dev/null || echo "Cannot check")" print_info " Virtual environment path: $VENV_PATH" print_info " Virtual environment exists: $(if [[ -d "$VENV_PATH" ]]; then echo "Yes"; else echo "No"; fi)" - + print_info "Attempting to bootstrap pip..." # Extract the base python command for bootstrap (fallback to python3) diff --git a/run_integration_tests.ps1 b/run_integration_tests.ps1 index 539fd0a2..46c94b4a 100644 --- a/run_integration_tests.ps1 +++ b/run_integration_tests.ps1 @@ -79,7 +79,7 @@ $activateScript = if ($IsWindows -or $env:OS -eq "Windows_NT") { if (Test-Path $venvPath) { Write-Emoji "βœ…" "Virtual environment found" -Color Green - + # Activate virtual environment (for PowerShell on Windows) if ($IsWindows -or $env:OS -eq "Windows_NT") { if (Test-Path "$venvPath\Scripts\Activate.ps1") { @@ -109,27 +109,27 @@ function Test-ApiKey { param( [string]$KeyName ) - + # Check environment variable $envValue = [Environment]::GetEnvironmentVariable($KeyName) if (![string]::IsNullOrWhiteSpace($envValue)) { return $true } - + # Check .env file if (Test-Path ".env") { $envContent = Get-Content ".env" -ErrorAction SilentlyContinue $found = $envContent | Where-Object { $_ -match "^$KeyName\s*=" -and $_ -notmatch "^$KeyName\s*=\s*$" } return $found.Count -gt 0 } - + return $false } # Check API keys $apiKeys = @( "GEMINI_API_KEY", - "OPENAI_API_KEY", + "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY", "CUSTOM_API_URL" @@ -169,18 +169,18 @@ Write-ColorText "------------------------------" -Color Cyan try { # Build pytest command $pytestArgs = @("tests/", "-v", "-m", "integration", "--tb=short") - + if ($VerboseOutput) { $pytestArgs += "--verbose" } - + # Run pytest python -m pytest @pytestArgs - + if ($LASTEXITCODE -ne 0) { throw "Integration tests failed" } - + Write-Host "" Write-Emoji "βœ…" "Integration tests completed!" -Color Green } catch { @@ -195,14 +195,14 @@ if ($WithSimulator) { Write-Host "" Write-Emoji "πŸ€–" "Running simulator tests..." -Color Cyan Write-ColorText "----------------------------" -Color Cyan - + try { if ($VerboseOutput) { python communication_simulator_test.py --verbose } else { python communication_simulator_test.py } - + if ($LASTEXITCODE -ne 0) { Write-Host "" Write-Emoji "❌" "Simulator tests failed!" -Color Red diff --git a/run_integration_tests.sh b/run_integration_tests.sh index 1733367f..996be94b 100755 --- a/run_integration_tests.sh +++ b/run_integration_tests.sh @@ -11,13 +11,15 @@ echo "==============================================" echo "These tests use real API calls with your configured keys" echo "" -# Activate virtual environment -if [[ -f ".zen_venv/bin/activate" ]]; then +# Activate virtual environment (skip if in Nix environment) +if [[ -n "$NIX_BUILD_TOP" ]] || [[ -n "$IN_NIX_SHELL" ]]; then + echo "βœ… Using Nix development environment" +elif [[ -f ".zen_venv/bin/activate" ]]; then source .zen_venv/bin/activate echo "βœ… Using virtual environment" else echo "❌ No virtual environment found!" - echo "Please run: ./run-server.sh first" + echo "Please run: ./run-server.sh first or use 'nix develop'" exit 1 fi @@ -87,4 +89,4 @@ echo "πŸ’‘ Tips:" echo "- Run './run_integration_tests.sh' for integration tests only" echo "- Run './run_integration_tests.sh --with-simulator' to also run simulator tests" echo "- Run './code_quality_checks.sh' for unit tests and linting" -echo "- Check logs in logs/mcp_server.log if tests fail" \ No newline at end of file +echo "- Check logs in logs/mcp_server.log if tests fail" diff --git a/tests/openai_cassettes/o3_pro_basic_math.json b/tests/openai_cassettes/o3_pro_basic_math.json index 4ccd4dff..4b9c97b3 100644 --- a/tests/openai_cassettes/o3_pro_basic_math.json +++ b/tests/openai_cassettes/o3_pro_basic_math.json @@ -87,4 +87,4 @@ } } ] -} \ No newline at end of file +} diff --git a/zen-mcp-server b/zen-mcp-server index 93753078..3b8ac3b8 100755 --- a/zen-mcp-server +++ b/zen-mcp-server @@ -8,4 +8,4 @@ DIR="$(cd "$(dirname "$0")" && pwd)" cd "$DIR" # Execute the Python server with all arguments passed through -exec .zen_venv/bin/python server.py "$@" \ No newline at end of file +exec .zen_venv/bin/python server.py "$@"