Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions .github/workflows/agno.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
name: Agno CI

on:
push:
paths:
- '.github/workflows/agno.yaml'
- 'agno/**'
branches:
- main
pull_request:
paths:
- '.github/workflows/agno.yaml'
- 'agno/**'

permissions:
contents: read

jobs:
check:
name: Format & Type Check
runs-on: ubuntu-latest

defaults:
run:
working-directory: agno/agent

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Install uv
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH # Make uv available

- name: Check format
run: uv run ruff format --check

- name: Check lint
run: uv run ruff check

- name: Check types
run: uv run pyright

build:
name: Build
runs-on: ubuntu-latest
defaults:
run:
working-directory: agno

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Set up Docker Compose
uses: ./.github/actions/setup-compose

- name: Build Docker images
run: docker compose build
3 changes: 2 additions & 1 deletion agno/.gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
.mcp.env
/secret.*
/.mcp.env
19 changes: 19 additions & 0 deletions agno/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,24 @@ docker compose -f compose.yaml -f compose.offload.yaml up --build

That's all! The agents will spin up automatically. Open **http://localhost:3000** in your browser to interact with the multi-agent system.

# 🧠 Inference Options

By default, this project uses [Docker Model Runner] to handle LLM inference locally — no internet connection or external API key is required.

If you’d prefer to use OpenAI instead:

1. Create a `secret.openai-api-key` file with your OpenAI API key:

```
sk-...
```

2. Restart the project with the OpenAI configuration:

```
docker compose down -v
docker compose -f compose.yaml -f compose.openai.yaml up

# ❓ What Can It Do?

Give it any public GitHub repository and watch the agents collaborate to deliver a comprehensive analysis:
Expand Down Expand Up @@ -134,3 +152,4 @@ docker compose down -v
[GitHub MCP Server]: https://github.com/modelcontextprotocol/servers
[Docker Compose]: https://github.com/docker/compose
[Docker Desktop]: https://www.docker.com/products/docker-desktop/
[Docker Model Runner]: https://docs.docker.com/ai/model-runner/
6 changes: 4 additions & 2 deletions agno/agent/.dockerignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
.venv
.mypy_cache
*
!pyproject.toml
!uv.lock
!playground.py
3 changes: 3 additions & 0 deletions agno/agent/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
/.venv
/.ruff_cache
/.mypy_cache
5 changes: 5 additions & 0 deletions agno/agent/.ruff.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[lint]
select = ["E4", "E7", "E9", "F", "I", "RUF022"]

[lint.isort]
force-sort-within-sections = true
36 changes: 28 additions & 8 deletions agno/agent/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,11 +1,31 @@
FROM python

RUN apt-get update && apt-get install -y socat netcat-openbsd && \
ln -sf /bin/nc.openbsd /usr/bin/nc
FROM python:3.13-slim
ENV PYTHONUNBUFFERED=1
RUN pip install uv

WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
COPY pyproject.toml uv.lock ./
RUN --mount=type=cache,target=/root/.cache/uv \
UV_COMPILE_BYTECODE=1 uv pip install --system .
COPY playground.py .
RUN python -m compileall -q .
COPY <<EOF entrypoint.sh
#!/bin/sh

if test -f /run/secrets/openai-api-key; then
export OPENAI_API_KEY=$(cat /run/secrets/openai-api-key)
fi

ENTRYPOINT ["python", "playground.py"]
if test -n "\${OPENAI_API_KEY}"; then
echo "Using OpenAI with \${OPENAI_MODEL_NAME}"
export MODEL_PROVIDER=openai
export MODEL_NAME=\${OPENAI_MODEL_NAME}
else
echo "Using Docker Model Runner with \${MODEL_RUNNER_MODEL}"
export MODEL_PROVIDER=docker
export MODEL_NAME=\${MODEL_RUNNER_MODEL}
export OPENAI_API_KEY=cannot_be_empty
fi
exec python playground.py
EOF
RUN chmod +x entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
137 changes: 47 additions & 90 deletions agno/agent/playground.py
Original file line number Diff line number Diff line change
@@ -1,69 +1,47 @@
import asyncio
import os
import socket
import sys
from typing import Optional
from urllib.parse import urlparse

import nest_asyncio
import yaml

from agno import agent, team
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.playground import Playground, serve_playground_app
from agno.tools.mcp import MCPTools, Toolkit
from agno.tools.reasoning import ReasoningTools
from agno.team import Team
from agno.tools import Toolkit
from agno.tools.mcp import MCPTools
from fastapi.middleware.cors import CORSMiddleware
import nest_asyncio
import yaml

# Allow nested event loops
nest_asyncio.apply()

DOCKER_MODEL_PROVIDER = "docker"

class Agent(agent.Agent):
@property
def is_streamable(self) -> bool:
if self.stream is not None:
return self.stream
return super().is_streamable


class Team(team.Team):
@property
def is_streamable(self) -> bool:
stream = getattr(self, "stream")
if stream is not None:
return stream
return super().is_streamable


def should_stream(model_provider: str, tools: list[Toolkit]) -> Optional[bool]:
"""Returns whether a model with the given provider and tools can stream"""
if model_provider == DOCKER_MODEL_PROVIDER and len(tools) > 0:
# DMR doesn't yet support tools with streaming
return True
# Let the model/options decide
return None


def create_model_from_config(entity_data: dict, entity_id: str) -> tuple[OpenAIChat, str]:
def create_model_from_config(entity_data: dict, entity_id: str) -> OpenAIChat:
"""Create a model instance from entity configuration data."""
model_name = entity_data.get("model")
if not model_name:
model_name = os.getenv("MODEL_RUNNER_MODEL")
temperature = entity_data.get("temperature", None)
provider = entity_data.get("model_provider", "docker")
model = create_model(model_name, provider, temperature)
return model, provider
model = entity_data.get("model", {})
name = model.get("name")
if not name:
raise ValueError(
f"Model name not specified for {entity_id}. Please set 'model.name' in the configuration."
)
provider = model.get("provider", "")
temperature = entity_data.get("temperature")
return create_model(name, provider, temperature)


def create_model(model_name: str, provider: str, temperature: float) -> OpenAIChat:
def create_model(
model_name: str, provider: str, temperature: float | None
) -> OpenAIChat:
"""Create a model instance based on the model name and provider."""
print(f"creating model {model_name} with provider {provider} and temperature {temperature}")
if provider == DOCKER_MODEL_PROVIDER:
print(
f"creating model {model_name} with provider {provider} and temperature {temperature}"
)
if provider == "docker":
base_url = os.getenv("MODEL_RUNNER_URL")
if base_url is None:
base_url = "http://model-runner.docker.internal/engines/llama.cpp/v1"
raise ValueError(
f"MODEL_RUNNER_URL environment variable not set for {model_name}."
)
model = OpenAIChat(id=model_name, base_url=base_url, temperature=temperature)
model.role_map = {
"system": "system",
Expand Down Expand Up @@ -91,36 +69,16 @@ async def create_mcp_tools(tools_list: list[str], entity_type: str) -> list[Tool

tool_names = [name.split(":", 1)[1] for name in tools_list]

# Always use socat, but the endpoint can be different (mock vs real gateway)
endpoint = os.environ['MCPGATEWAY_ENDPOINT']
print(f"DEBUG: {entity_type} connecting to MCP gateway at {endpoint}")

# Parse endpoint to extract host and port
try:
# Handle both URL format (http://host:port/path) and host:port format
if endpoint.startswith('http://') or endpoint.startswith('https://'):
parsed = urlparse(endpoint)
host = parsed.hostname
port = parsed.port
tcp_endpoint = f"{host}:{port}"
else:
# Legacy host:port format
host, port = endpoint.split(':')
port = int(port)
tcp_endpoint = endpoint

# Test TCP connection first
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((host, port))
sock.close()
print(f"DEBUG: TCP connection to {host}:{port} successful")
except Exception as e:
print(f"ERROR: TCP connection to {endpoint} failed: {e}")
raise
url = os.environ.get("MCPGATEWAY_URL")
if not url:
raise ValueError(
f"MCPGATEWAY_URL environment variable not set for {entity_type} tools"
)
print(f"DEBUG: {entity_type} connecting to MCP gateway at {url}")

t = MCPTools(
command=f"socat STDIO TCP:{tcp_endpoint}",
url=url,
transport="sse",
include_tools=tool_names,
)
mcp_tools = await t.__aenter__()
Expand All @@ -130,9 +88,9 @@ async def create_mcp_tools(tools_list: list[str], entity_type: str) -> list[Tool
def get_common_config(entity_data: dict) -> dict:
"""Extract common configuration options."""
return {
'markdown': entity_data.get("markdown", False),
'add_datetime_to_instructions': True,
'debug_mode': True,
"markdown": entity_data.get("markdown", False),
"add_datetime_to_instructions": True,
"debug_mode": True,
}


Expand All @@ -145,11 +103,11 @@ async def run_server(config) -> None:
teams_by_id = {}

for agent_id, agent_data in config.get("agents", {}).items():
model, provider = create_model_from_config(agent_data, agent_id)
model = create_model_from_config(agent_data, agent_id)
common_config = get_common_config(agent_data)

tools: list[Toolkit] = [
# ReasoningTools(think=True, analyze=True)
# ReasoningTools(think=True, analyze=True)
]
tools_list = agent_data.get("tools", [])
mcp_tools = await create_mcp_tools(tools_list, "Agent")
Expand All @@ -160,10 +118,9 @@ async def run_server(config) -> None:
role=agent_data.get("role", ""),
description=agent_data.get("description"),
instructions=agent_data.get("instructions"),
tools=tools,
tools=tools, # type: ignore
model=model,
show_tool_calls=True,
stream=should_stream(provider, tools),
**common_config,
)
agents_by_id[agent_id] = agent
Expand All @@ -172,7 +129,7 @@ async def run_server(config) -> None:
agents.append(agent)

for team_id, team_data in config.get("teams", {}).items():
model, provider = create_model_from_config(team_data, team_id)
model = create_model_from_config(team_data, team_id)
common_config = get_common_config(team_data)

team_agents: list[Agent | Team] = []
Expand All @@ -184,7 +141,7 @@ async def run_server(config) -> None:
team_agents.append(agent)

team_tools: list[Toolkit] = [
# ReasoningTools(think=True, analyze=True)
# ReasoningTools(think=True, analyze=True)
]
tools_list = team_data.get("tools", [])
mcp_tools = await create_mcp_tools(tools_list, "Team")
Expand All @@ -193,21 +150,20 @@ async def run_server(config) -> None:
team = Team(
name=team_data.get("name", ""),
mode=team_data.get("mode", "coordinate"),
members=team_agents, # type: ignore
members=team_agents,
description=team_data.get("description"),
instructions=team_data.get("instructions"),
tools=team_tools, # type: ignore,
tools=team_tools, # type: ignore
model=model,
# show_members_responses=True,
# show_tool_calls=True,
**common_config,
)
team.stream = should_stream(provider, team_tools)
teams_by_id[team_id] = team
if team_data.get("chat", True):
teams.append(team)

playground = Playground(agents=agents, teams=teams) # type: ignore
playground = Playground(agents=agents, teams=teams)

app = playground.get_app()
app.add_middleware(
Expand All @@ -225,7 +181,8 @@ async def run_server(config) -> None:
def main():
config_filename = sys.argv[1] if len(sys.argv) > 1 else "/agents.yaml"
with open(config_filename, "r") as f:
config = yaml.safe_load(f)
expanded = os.path.expandvars(f.read())
config = yaml.safe_load(expanded)

asyncio.run(run_server(config))

Expand Down
Loading