Skip to main content
This comprehensive guide shows you how to build an AI agent that can execute custom Python functions using the OpenAI Responses API.

Prerequisites

  • cycls installed
  • OpenAI API key
pip install cycls

Step 1: Define a Tool

Create a standard Python function. In this example, we’ll create a mock weather function.
import json

def get_weather(location):
    """Get current weather for a city."""
    return json.dumps({"temp": "24", "unit": "celsius"})

Step 2: Define the Schema

OpenAI needs to know what your tool does. You define this in a JSON schema format.
tools = [{
    "type": "function",
    "name": "get_weather",
    "description": "Get current temperature",
    "parameters": {
        "type": "object",
        "properties": {
            "location": {"type": "string"}
        },
        "required": ["location"]
    }
}]
Note: The description fields are crucial—they tell the LLM when to use your tool.

Step 3: Create the Agent

import cycls
import json

@cycls.app(pip=["openai"], copy=[".env"])
async def app(context):
    from openai import OpenAI

    client = OpenAI()

    # Define the tool logic
    def get_weather(location):
        return json.dumps({"temp": "24", "unit": "celsius"})

    # Define the tool schema
    tools = [{
        "type": "function",
        "name": "get_weather",
        "description": "Get current temperature",
        "parameters": {
            "type": "object",
            "properties": {
                "location": {"type": "string"}
            },
            "required": ["location"]
        }
    }]

    # First Call
    response = client.responses.create(
        model="gpt-4o",
        input=context.messages,
        tools=tools
    )

    # Update history
    context.messages.extend(response.output)

    # Handle Tool Execution
    tool_called = False
    for item in response.output:
        if item.type == "function_call":
            tool_called = True
            if item.name == "get_weather":
                args = json.loads(item.arguments)
                result = get_weather(args["location"])
                context.messages.append({
                    "type": "function_call_output",
                    "call_id": item.call_id,
                    "output": result
                })

    # Final Call
    if tool_called:
        final = client.responses.create(
            model="gpt-4o",
            input=context.messages
        )
        yield final.output_text
    else:
        yield response.output_text

app.local()

Step 4: Set Up Environment

Create a .env file with your OpenAI API key:
OPENAI_API_KEY=sk-proj-...

Step 5: Run the Agent

python app.py

Full Code

Here is the complete app.py:
import cycls
import json

@cycls.app(pip=["openai"], copy=[".env"])
async def app(context):
    from openai import OpenAI

    client = OpenAI()

    # Define the tool logic
    def get_weather(location):
        return json.dumps({"temp": "24", "unit": "celsius"})

    # Define the tool schema
    tools = [{
        "type": "function",
        "name": "get_weather",
        "description": "Get current temperature",
        "parameters": {
            "type": "object",
            "properties": {
                "location": {"type": "string"}
            },
            "required": ["location"]
        }
    }]

    # First Call
    response = client.responses.create(
        model="gpt-4o",
        input=context.messages,
        tools=tools
    )

    # Update history
    context.messages.extend(response.output)

    # Handle Tool Execution
    tool_called = False
    for item in response.output:
        if item.type == "function_call":
            tool_called = True
            if item.name == "get_weather":
                args = json.loads(item.arguments)
                result = get_weather(args["location"])
                context.messages.append({
                    "type": "function_call_output",
                    "call_id": item.call_id,
                    "output": result
                })

    # Final Call
    if tool_called:
        final = client.responses.create(
            model="gpt-4o",
            input=context.messages
        )
        yield final.output_text
    else:
        yield response.output_text

app.local()