Skip to content

Commit 2f099d2

Browse files
committed
feat(agno): Add OpenAI support and CI
- Add support for OpenAI using the OPENAI_API_KEY environment variable - Update README to document the different inference options - Add code formatting and type checking - Add CI for formatting, type checking and building images
1 parent 89b743d commit 2f099d2

File tree

14 files changed

+950
-182
lines changed

14 files changed

+950
-182
lines changed

.github/workflows/agno.yaml

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
name: Agno CI
2+
3+
on:
4+
push:
5+
paths:
6+
- '.github/workflows/agno.yaml'
7+
- 'agno/**'
8+
branches:
9+
- main
10+
pull_request:
11+
paths:
12+
- '.github/workflows/agno.yaml'
13+
- 'agno/**'
14+
15+
permissions:
16+
contents: read
17+
18+
jobs:
19+
check:
20+
name: Format & Type Check
21+
runs-on: ubuntu-latest
22+
23+
defaults:
24+
run:
25+
working-directory: agno/agent
26+
27+
steps:
28+
- name: Checkout repository
29+
uses: actions/checkout@v4
30+
31+
- name: Install uv
32+
run: |
33+
curl -LsSf https://astral.sh/uv/install.sh | sh
34+
echo "$HOME/.cargo/bin" >> $GITHUB_PATH # Make uv available
35+
36+
- name: Check format
37+
run: uv run ruff format --check
38+
39+
- name: Check lint
40+
run: uv run ruff check
41+
42+
- name: Check types
43+
run: uv run pyright
44+
45+
build:
46+
name: Build
47+
runs-on: ubuntu-latest
48+
defaults:
49+
run:
50+
working-directory: agno
51+
52+
steps:
53+
- name: Checkout repository
54+
uses: actions/checkout@v4
55+
56+
- name: Set up Docker Compose
57+
uses: ./.github/actions/setup-compose
58+
59+
- name: Build Docker images
60+
run: docker compose build

agno/.env.example

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
OPENAI_API_KEY=<your-api-key>

agno/.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
.mcp.env
1+
/.env
2+
/.mcp.env

agno/README.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,31 @@ docker compose -f compose.yaml -f compose.offload.yaml up --build
4040

4141
That's all! The agents will spin up automatically. Open **http://localhost:3000** in your browser to interact with the multi-agent system.
4242

43+
# 🧠 Inference Options
44+
45+
By default, this project uses [Docker Model Runner] to handle LLM inference locally — no internet connection or external API key is required.
46+
47+
If you’d prefer to use OpenAI instead:
48+
49+
1. Copy the example environment file:
50+
51+
```sh
52+
cp .env.example .env
53+
```
54+
55+
2. Edit `.env` and set your OpenAI API key:
56+
57+
```
58+
OPENAI_API_KEY=sk-...
59+
```
60+
61+
3. Restart the project:
62+
63+
```
64+
docker compose down -v
65+
docker compose up
66+
```
67+
4368
# ❓ What Can It Do?
4469

4570
Give it any public GitHub repository and watch the agents collaborate to deliver a comprehensive analysis:

agno/agent/.dockerignore

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,4 @@
1-
.venv
2-
.mypy_cache
1+
*
2+
!pyproject.toml
3+
!uv.lock
4+
!playground.py

agno/agent/.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
/.venv
2+
/.ruff_cache
3+
/.mypy_cache

agno/agent/.ruff.toml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
[lint]
2+
select = ["E4", "E7", "E9", "F", "I", "RUF022"]
3+
4+
[lint.isort]
5+
force-sort-within-sections = true

agno/agent/Dockerfile

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,26 @@
1-
FROM python
2-
3-
RUN apt-get update && apt-get install -y socat netcat-openbsd && \
4-
ln -sf /bin/nc.openbsd /usr/bin/nc
1+
FROM python:3.13-slim
2+
ENV PYTHONUNBUFFERED=1
3+
RUN pip install uv
54

65
WORKDIR /app
7-
COPY requirements.txt .
8-
RUN pip install -r requirements.txt
9-
COPY . .
10-
11-
ENTRYPOINT ["python", "playground.py"]
6+
COPY pyproject.toml uv.lock ./
7+
RUN --mount=type=cache,target=/root/.cache/uv \
8+
UV_COMPILE_BYTECODE=1 uv pip install --system .
9+
COPY playground.py .
10+
RUN python -m compileall -q .
11+
COPY <<EOF entrypoint.sh
12+
#!/bin/sh
13+
if test -n "\${OPENAI_API_KEY}"; then
14+
echo "OPENAI_API_KEY is set, using OpenAI with \${OPENAI_MODEL_NAME}"
15+
export MODEL_PROVIDER=openai
16+
export MODEL_NAME=\${OPENAI_MODEL_NAME}
17+
else
18+
echo "OPENAI_API_KEY is not set, using Docker Model Runner with \${MODEL_RUNNER_MODEL}"
19+
export MODEL_PROVIDER=docker
20+
export MODEL_NAME=\${MODEL_RUNNER_MODEL}
21+
export OPENAI_API_KEY=cannot_be_empty
22+
fi
23+
exec python playground.py
24+
EOF
25+
RUN chmod +x entrypoint.sh
26+
ENTRYPOINT ["./entrypoint.sh"]

agno/agent/playground.py

Lines changed: 47 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -1,69 +1,47 @@
11
import asyncio
22
import os
3-
import socket
43
import sys
5-
from typing import Optional
6-
from urllib.parse import urlparse
74

8-
import nest_asyncio
9-
import yaml
10-
11-
from agno import agent, team
5+
from agno.agent import Agent
126
from agno.models.openai import OpenAIChat
137
from agno.playground import Playground, serve_playground_app
14-
from agno.tools.mcp import MCPTools, Toolkit
15-
from agno.tools.reasoning import ReasoningTools
8+
from agno.team import Team
9+
from agno.tools import Toolkit
10+
from agno.tools.mcp import MCPTools
1611
from fastapi.middleware.cors import CORSMiddleware
12+
import nest_asyncio
13+
import yaml
1714

1815
# Allow nested event loops
1916
nest_asyncio.apply()
2017

21-
DOCKER_MODEL_PROVIDER = "docker"
22-
23-
class Agent(agent.Agent):
24-
@property
25-
def is_streamable(self) -> bool:
26-
if self.stream is not None:
27-
return self.stream
28-
return super().is_streamable
29-
30-
31-
class Team(team.Team):
32-
@property
33-
def is_streamable(self) -> bool:
34-
stream = getattr(self, "stream")
35-
if stream is not None:
36-
return stream
37-
return super().is_streamable
3818

39-
40-
def should_stream(model_provider: str, tools: list[Toolkit]) -> Optional[bool]:
41-
"""Returns whether a model with the given provider and tools can stream"""
42-
if model_provider == DOCKER_MODEL_PROVIDER and len(tools) > 0:
43-
# DMR doesn't yet support tools with streaming
44-
return True
45-
# Let the model/options decide
46-
return None
47-
48-
49-
def create_model_from_config(entity_data: dict, entity_id: str) -> tuple[OpenAIChat, str]:
19+
def create_model_from_config(entity_data: dict, entity_id: str) -> OpenAIChat:
5020
"""Create a model instance from entity configuration data."""
51-
model_name = entity_data.get("model")
52-
if not model_name:
53-
model_name = os.getenv("MODEL_RUNNER_MODEL")
54-
temperature = entity_data.get("temperature", None)
55-
provider = entity_data.get("model_provider", "docker")
56-
model = create_model(model_name, provider, temperature)
57-
return model, provider
21+
model = entity_data.get("model", {})
22+
name = model.get("name")
23+
if not name:
24+
raise ValueError(
25+
f"Model name not specified for {entity_id}. Please set 'model.name' in the configuration."
26+
)
27+
provider = model.get("provider", "")
28+
temperature = entity_data.get("temperature")
29+
return create_model(name, provider, temperature)
5830

5931

60-
def create_model(model_name: str, provider: str, temperature: float) -> OpenAIChat:
32+
def create_model(
33+
model_name: str, provider: str, temperature: float | None
34+
) -> OpenAIChat:
6135
"""Create a model instance based on the model name and provider."""
62-
print(f"creating model {model_name} with provider {provider} and temperature {temperature}")
63-
if provider == DOCKER_MODEL_PROVIDER:
36+
print(
37+
f"creating model {model_name} with provider {provider} and temperature {temperature}"
38+
)
39+
if provider == "docker":
6440
base_url = os.getenv("MODEL_RUNNER_URL")
6541
if base_url is None:
66-
base_url = "http://model-runner.docker.internal/engines/llama.cpp/v1"
42+
raise ValueError(
43+
f"MODEL_RUNNER_URL environment variable not set for {model_name}."
44+
)
6745
model = OpenAIChat(id=model_name, base_url=base_url, temperature=temperature)
6846
model.role_map = {
6947
"system": "system",
@@ -91,36 +69,16 @@ async def create_mcp_tools(tools_list: list[str], entity_type: str) -> list[Tool
9169

9270
tool_names = [name.split(":", 1)[1] for name in tools_list]
9371

94-
# Always use socat, but the endpoint can be different (mock vs real gateway)
95-
endpoint = os.environ['MCPGATEWAY_ENDPOINT']
96-
print(f"DEBUG: {entity_type} connecting to MCP gateway at {endpoint}")
97-
98-
# Parse endpoint to extract host and port
99-
try:
100-
# Handle both URL format (http://host:port/path) and host:port format
101-
if endpoint.startswith('http://') or endpoint.startswith('https://'):
102-
parsed = urlparse(endpoint)
103-
host = parsed.hostname
104-
port = parsed.port
105-
tcp_endpoint = f"{host}:{port}"
106-
else:
107-
# Legacy host:port format
108-
host, port = endpoint.split(':')
109-
port = int(port)
110-
tcp_endpoint = endpoint
111-
112-
# Test TCP connection first
113-
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
114-
sock.settimeout(5)
115-
sock.connect((host, port))
116-
sock.close()
117-
print(f"DEBUG: TCP connection to {host}:{port} successful")
118-
except Exception as e:
119-
print(f"ERROR: TCP connection to {endpoint} failed: {e}")
120-
raise
72+
url = os.environ.get("MCPGATEWAY_URL")
73+
if not url:
74+
raise ValueError(
75+
f"MCPGATEWAY_URL environment variable not set for {entity_type} tools"
76+
)
77+
print(f"DEBUG: {entity_type} connecting to MCP gateway at {url}")
12178

12279
t = MCPTools(
123-
command=f"socat STDIO TCP:{tcp_endpoint}",
80+
url=url,
81+
transport="sse",
12482
include_tools=tool_names,
12583
)
12684
mcp_tools = await t.__aenter__()
@@ -130,9 +88,9 @@ async def create_mcp_tools(tools_list: list[str], entity_type: str) -> list[Tool
13088
def get_common_config(entity_data: dict) -> dict:
13189
"""Extract common configuration options."""
13290
return {
133-
'markdown': entity_data.get("markdown", False),
134-
'add_datetime_to_instructions': True,
135-
'debug_mode': True,
91+
"markdown": entity_data.get("markdown", False),
92+
"add_datetime_to_instructions": True,
93+
"debug_mode": True,
13694
}
13795

13896

@@ -145,11 +103,11 @@ async def run_server(config) -> None:
145103
teams_by_id = {}
146104

147105
for agent_id, agent_data in config.get("agents", {}).items():
148-
model, provider = create_model_from_config(agent_data, agent_id)
106+
model = create_model_from_config(agent_data, agent_id)
149107
common_config = get_common_config(agent_data)
150108

151109
tools: list[Toolkit] = [
152-
# ReasoningTools(think=True, analyze=True)
110+
# ReasoningTools(think=True, analyze=True)
153111
]
154112
tools_list = agent_data.get("tools", [])
155113
mcp_tools = await create_mcp_tools(tools_list, "Agent")
@@ -160,10 +118,9 @@ async def run_server(config) -> None:
160118
role=agent_data.get("role", ""),
161119
description=agent_data.get("description"),
162120
instructions=agent_data.get("instructions"),
163-
tools=tools,
121+
tools=tools, # type: ignore
164122
model=model,
165123
show_tool_calls=True,
166-
stream=should_stream(provider, tools),
167124
**common_config,
168125
)
169126
agents_by_id[agent_id] = agent
@@ -172,7 +129,7 @@ async def run_server(config) -> None:
172129
agents.append(agent)
173130

174131
for team_id, team_data in config.get("teams", {}).items():
175-
model, provider = create_model_from_config(team_data, team_id)
132+
model = create_model_from_config(team_data, team_id)
176133
common_config = get_common_config(team_data)
177134

178135
team_agents: list[Agent | Team] = []
@@ -184,7 +141,7 @@ async def run_server(config) -> None:
184141
team_agents.append(agent)
185142

186143
team_tools: list[Toolkit] = [
187-
# ReasoningTools(think=True, analyze=True)
144+
# ReasoningTools(think=True, analyze=True)
188145
]
189146
tools_list = team_data.get("tools", [])
190147
mcp_tools = await create_mcp_tools(tools_list, "Team")
@@ -193,21 +150,20 @@ async def run_server(config) -> None:
193150
team = Team(
194151
name=team_data.get("name", ""),
195152
mode=team_data.get("mode", "coordinate"),
196-
members=team_agents, # type: ignore
153+
members=team_agents,
197154
description=team_data.get("description"),
198155
instructions=team_data.get("instructions"),
199-
tools=team_tools, # type: ignore,
156+
tools=team_tools, # type: ignore
200157
model=model,
201158
# show_members_responses=True,
202159
# show_tool_calls=True,
203160
**common_config,
204161
)
205-
team.stream = should_stream(provider, team_tools)
206162
teams_by_id[team_id] = team
207163
if team_data.get("chat", True):
208164
teams.append(team)
209165

210-
playground = Playground(agents=agents, teams=teams) # type: ignore
166+
playground = Playground(agents=agents, teams=teams)
211167

212168
app = playground.get_app()
213169
app.add_middleware(
@@ -225,7 +181,8 @@ async def run_server(config) -> None:
225181
def main():
226182
config_filename = sys.argv[1] if len(sys.argv) > 1 else "/agents.yaml"
227183
with open(config_filename, "r") as f:
228-
config = yaml.safe_load(f)
184+
expanded = os.path.expandvars(f.read())
185+
config = yaml.safe_load(expanded)
229186

230187
asyncio.run(run_server(config))
231188

0 commit comments

Comments
 (0)