Skip to content

OpenAI Agents SDK Integration

Anvil integrates with OpenAI’s Agents SDK through the to_openai_agents() adapter.

Terminal window
pip install "anvil-agent[openai-agents]"
from anvil import Anvil
from agents import Agent, Runner
# Create Anvil tools
anvil = Anvil()
search = anvil.use_tool(
name="web_search",
intent="Search the web for current information"
)
weather = anvil.use_tool(
name="get_weather",
intent="Get current weather for a location"
)
# Convert to OpenAI Agents tools
oai_tools = [
search.to_openai_agents(),
weather.to_openai_agents(),
]
# Create agent
agent = Agent(
name="assistant",
instructions="You are a helpful assistant with access to search and weather tools.",
tools=oai_tools,
)
# Run
runner = Runner()
result = runner.run(agent, "What's the weather like in Paris today?")
print(result.final_output)

The to_openai_agents() method:

  1. Creates an OpenAI Agents FunctionTool
  2. Generates a JSON Schema for parameters
  3. Wraps the execution with proper JSON argument parsing
  4. Returns results as strings for the agent
# Anvil tool
tool = anvil.use_tool(
name="search_docs",
intent="Search documentation",
inputs=[
InputParam(name="query", param_type="str", required=True),
InputParam(name="limit", param_type="int", default=10),
]
)
# Converts to approximately:
def search_docs_wrapper(ctx, args: str) -> str:
parsed = json.loads(args)
result = anvil_tool.run(**parsed)
return json.dumps(result) if isinstance(result, dict) else str(result)
oai_tool = FunctionTool(
name="search_docs",
description="Search documentation",
params_json_schema={
"type": "object",
"properties": {
"query": {"type": "string"},
"limit": {"type": "integer", "default": 10}
},
"required": ["query"]
},
on_invoke_tool=search_docs_wrapper,
)

Anvil automatically generates JSON schemas from your input parameters:

from anvil import InputParam
api_tool = anvil.use_tool(
name="api_request",
intent="Make an HTTP API request",
inputs=[
InputParam(name="url", param_type="str", required=True),
InputParam(name="method", param_type="str", default="GET"),
InputParam(name="headers", param_type="dict", default={}),
InputParam(name="timeout", param_type="int", default=30),
]
)
oai_tool = api_tool.to_openai_agents()
# Generated schema:
# {
# "type": "object",
# "properties": {
# "url": {"type": "string"},
# "method": {"type": "string", "default": "GET"},
# "headers": {"type": "object", "default": {}},
# "timeout": {"type": "integer", "default": 30}
# },
# "required": ["url"]
# }

Anvil types map to JSON Schema types:

Anvil TypeJSON Schema Type
strstring
intinteger
floatnumber
boolboolean
listarray
dictobject

Use Anvil tools with multiple agents:

from agents import Agent, Runner, handoff
# Create specialized tools
research_tool = anvil.use_tool(
name="research",
intent="Research a topic thoroughly"
).to_openai_agents()
write_tool = anvil.use_tool(
name="write",
intent="Write polished content"
).to_openai_agents()
# Create specialized agents
researcher = Agent(
name="researcher",
instructions="You research topics and gather information.",
tools=[research_tool],
)
writer = Agent(
name="writer",
instructions="You write clear, engaging content based on research.",
tools=[write_tool],
)
# Main agent that can hand off
main_agent = Agent(
name="coordinator",
instructions="Coordinate research and writing tasks.",
handoffs=[researcher, writer],
)
# Run
runner = Runner()
result = runner.run(main_agent, "Write an article about quantum computing")

OpenAI Agents SDK supports streaming:

from agents import Agent, Runner
agent = Agent(
name="assistant",
tools=[search.to_openai_agents()],
)
runner = Runner()
# Stream responses
for event in runner.run_streamed(agent, "Search for AI news"):
if event.type == "text":
print(event.text, end="", flush=True)

Access run context in your tools:

from anvil.adapters.openai_agents import to_openai_agents_tool_class
# Use the class variant for context access
tool_class = to_openai_agents_tool_class(anvil_tool)
# The tool receives RunContextWrapper with:
# - context.run_id
# - context.agent_name
# - context.model

Errors are returned as tool results:

# If a tool fails, the error message is returned to the agent
# The agent can then decide how to handle it
agent = Agent(
name="assistant",
instructions="""
You have access to various tools. If a tool fails,
try an alternative approach or ask for clarification.
""",
tools=[search.to_openai_agents()],
)

With self-healing enabled, Anvil attempts to fix errors before returning them.

Mix Anvil tools with OpenAI’s built-in capabilities:

from agents import Agent, CodeInterpreterTool, FileSearchTool
# Built-in tools
code_tool = CodeInterpreterTool()
file_tool = FileSearchTool()
# Anvil tools
custom_tool = anvil.use_tool(
name="custom_api",
intent="Call custom API"
).to_openai_agents()
# Use together
agent = Agent(
name="developer",
tools=[code_tool, file_tool, custom_tool],
)

The SDK supports async operations:

import asyncio
from agents import Agent, Runner
async def main():
agent = Agent(
name="assistant",
tools=[search.to_openai_agents()],
)
runner = Runner()
result = await runner.run_async(agent, "Search for something")
print(result.final_output)
asyncio.run(main())