Skip to content

docs: Improve clarity of LitellmModel and InternalChatCompletionMessage #1343

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ The [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) enables
- **Handoffs**, which allow agents to delegate to other agents for specific tasks
- **Guardrails**, which enable the inputs to agents to be validated
- **Sessions**, which automatically maintains conversation history across agent runs
- **Tracing**, which lets you visualize and debug the flow of an agent's actions


In combination with Python, these primitives are powerful enough to express complex relationships between tools and agents, and allow you to build real-world applications without a steep learning curve. In addition, the SDK comes with built-in **tracing** that lets you visualize and debug your agentic flows, as well as evaluate them and even fine-tune models for your application.

Expand Down
53 changes: 53 additions & 0 deletions examples/exceptions/agents_exception.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
from __future__ import annotations

import asyncio

from agents import Agent, Runner, function_tool
from agents.exceptions import AgentsException

"""
This example demonstrates the use of the OpenAI Agents SDK with tools and comprehensive error handling.

The agent, 'Triage Agent', is configured to handle two tasks:
- Fetching weather information for a specified city using the `get_weather` tool.
- Adding two numbers using the `sum_numbers` tool.

The agent is instructed to use only one tool per execution cycle and can switch to another tool in subsequent cycles.
The example sets a `max_turns=1` limit to intentionally restrict the agent to a single turn, which may trigger a `MaxTurnsExceeded` error.

All exceptions are caught via `AgentsException`, the base class for SDK errors.
"""

# Define tools

@function_tool
async def get_weather(city: str) -> str:
"""Returns weather info for the specified city."""
return f"The weather in {city} is sunny."


@function_tool
async def sum_numbers(a: int, b: int) -> str:
"""Adds two numbers."""
result = a + b
return f"The sum of {a} and {b} is {result}."


agent = Agent(
name="Triage Agent",
instructions="Get weather or sum numbers. Use only one tool per turn.",
tools=[get_weather, sum_numbers],
)


async def main():
try:
user_input = input("Enter a message: ")
result = await Runner.run(agent, user_input, max_turns=1)
print("✅ Final Output:", result.final_output)
except AgentsException as e:
print(f"❌ Caught {e.__class__.__name__}: {e}")


if __name__ == "__main__":
asyncio.run(main())
72 changes: 72 additions & 0 deletions examples/exceptions/input_guardrail_tripwire_triggered.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from __future__ import annotations

import asyncio
from typing import Any

from pydantic import BaseModel

from agents import (
Agent,
GuardrailFunctionOutput,
InputGuardrailTripwireTriggered,
RunContextWrapper,
Runner,
input_guardrail,
)

"""
This example demonstrates an OpenAI Agents SDK agent with an input guardrail to block math homework queries.

If the user asks a math question, the input guardrail blocks it by raising InputGuardrailTripwireTriggered.
"""


# Step 1: Define the output type of the guardrail agent
class MathHomeworkOutput(BaseModel):
is_math_homework: bool


# Step 2: Agent that checks if the input is math homework
guardrail_agent = Agent(
name="GuardrailAgent",
instructions="Return is_math_homework: true if the input is a math question.",
output_type=MathHomeworkOutput,
)


# Step 3: Define the async input guardrail function
@input_guardrail
async def my_input_guardrail(
context: RunContextWrapper[Any],
agent: Agent[Any],
inputs: str | list[Any],
) -> GuardrailFunctionOutput:
input_str = inputs if isinstance(inputs, str) else " ".join(str(i) for i in inputs)
result = await Runner.run(guardrail_agent, input_str)
output = result.final_output_as(MathHomeworkOutput)

return GuardrailFunctionOutput(
output_info=output,
tripwire_triggered=output.is_math_homework,
)


# Step 4: Main agent that responds to queries
async def main():
agent = Agent(
name="CustomerSupportAgent",
instructions="Answer user queries. Avoid math homework.",
input_guardrails=[my_input_guardrail],
tools=[],
)

user_input = "What is 2 + 2?"
try:
result = await Runner.run(agent, user_input)
print(result.final_output)
except InputGuardrailTripwireTriggered:
print("Sorry, I can't help with math homework.")


if __name__ == "__main__":
asyncio.run(main())
43 changes: 43 additions & 0 deletions examples/exceptions/max_turns_exceeded.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from __future__ import annotations

import asyncio

from agents import Agent, Runner, function_tool
from agents.exceptions import MaxTurnsExceeded

"""
This example demonstrates an OpenAI Agents SDK agent that triggers a MaxTurnsExceeded error.

The 'TriageAgent' handles user queries using tools for fetching weather (`get_weather`) or adding numbers (`sum_numbers`). The instructions direct the agent to process both tasks in a single turn, but with `max_turns=1`, this causes a `MaxTurnsExceeded` error. The interactive loop processes user queries as direct string inputs, catching and displaying the `MaxTurnsExceeded` error message.
"""

@function_tool
def get_weather(city: str) -> str:
"""Returns weather info for the specified city."""
return f"The weather in {city} is sunny"


@function_tool
def sum_numbers(a: int, b: int) -> int:
"""Adds two numbers."""
return a + b


async def main():
agent = Agent(
name="TriageAgent",
instructions="Process both get_weather and sum_numbers in a single turn when asked for both.",
tools=[sum_numbers, get_weather],
)

user_input = "What is US Weather and sum 2 + 2."
try:
result = await Runner.run(agent, user_input, max_turns=1)
print(result.final_output)
except MaxTurnsExceeded as e:
print(f"Caught MaxTurnsExceeded: {e}")



if __name__ == "__main__":
asyncio.run(main())
37 changes: 37 additions & 0 deletions examples/exceptions/model_behavior_error.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from __future__ import annotations

import asyncio
from typing import Literal

from pydantic import BaseModel

from agents import Agent, Runner
from agents.exceptions import ModelBehaviorError

"""
This example demonstrates an OpenAI Agents SDK agent that triggers a ModelBehaviorError due to invalid model output.

The 'MiniErrorBot' agent uses a Pydantic model (`Output`) requiring a `value` field with the literal 'EXPECTED_VALUE'. The instructions tell the model to return 'Hello', causing a `ModelBehaviorError` when the output fails validation. The interactive loop processes user queries as direct string inputs, catching and displaying the `ModelBehaviorError` message.
"""

class Output(BaseModel):
value: Literal["EXPECTED_VALUE"]


async def main():
agent = Agent(
name="MiniErrorBot",
instructions="Just say: Hello",
output_type=Output,
)

user_input = "hello"
try:
result = await Runner.run(agent, user_input)
print(result.final_output)
except ModelBehaviorError as e:
print(f"ModelBehaviorError: {e}")


if __name__ == "__main__":
asyncio.run(main())
62 changes: 62 additions & 0 deletions examples/exceptions/output_guardrail_tripwire_triggered.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from __future__ import annotations

import asyncio

from pydantic import BaseModel

from agents import (
Agent,
GuardrailFunctionOutput,
OutputGuardrailTripwireTriggered,
Runner,
output_guardrail,
)

"""
This example demonstrates an OpenAI Agents SDK agent with an output guardrail to block math homework responses.

The 'Assistant' agent processes user queries provided as direct string inputs in an interactive loop. An output guardrail, using a Pydantic model (`MathHomeworkOutput`) and a guardrail agent, checks if the response is a math homework answer. If detected, the guardrail raises `OutputGuardrailTripwireTriggered`, and a refusal message is printed. The loop continues to prompt for new inputs, handling each independently.
"""


class MathHomeworkOutput(BaseModel):
is_math_homework: bool


guardrail_agent = Agent(
name="GuardrailAgent",
instructions="Check if the output is a math homework answer.",
output_type=MathHomeworkOutput,
)


@output_guardrail
async def math_guardrail(context, agent: Agent, output: str) -> GuardrailFunctionOutput:
result = await Runner.run(guardrail_agent, output)
output_data = result.final_output_as(MathHomeworkOutput)
return GuardrailFunctionOutput(
output_info=output_data,
tripwire_triggered=output_data.is_math_homework,
)


async def main():
agent = Agent(
name="Assistant",
instructions="Answer user queries.",
output_guardrails=[math_guardrail],
)

user_input = "What is 2 + 2"

try:
result = await Runner.run(agent, user_input)
print(result.final_output)
except OutputGuardrailTripwireTriggered:
print(
"OutputGuardrailTripwireTriggered, I can't provide math homework answers."
)


if __name__ == "__main__":
asyncio.run(main())
37 changes: 37 additions & 0 deletions examples/exceptions/user_error.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from __future__ import annotations

import asyncio

from agents import Agent, Runner, function_tool
from agents.exceptions import UserError

"""
This example demonstrates raising a `UserError` manually during tool execution.
This avoids mypy errors but simulates incorrect SDK usage or logic issues.
"""


@function_tool
def invalid_tool() -> str:
# Simulate misuse or invalid condition
raise UserError("This tool was misused and raised a UserError intentionally.")


async def main():
agent = Agent(
name="Assistant",
instructions="Use the tool to demonstrate a manual UserError.",
tools=[invalid_tool],
tool_use_behavior="run_llm_again", # ✅ valid, passes mypy
)

user_input = "Trigger the error"
try:
result = await Runner.run(agent, user_input)
print(result.final_output)
except UserError as e:
print(f"UserError caught as expected: {e}")


if __name__ == "__main__":
asyncio.run(main())
19 changes: 14 additions & 5 deletions src/agents/extensions/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,17 +46,26 @@


class InternalChatCompletionMessage(ChatCompletionMessage):
"""An internal subclass used by the SDK to carry the agent's reasoning content.

This separates the agent's internal thought process from the final message
sent to the user, which is crucial for debugging and understanding
how the agent arrived at its conclusion without modifying the standard model output.
"""
An internal subclass to carry reasoning_content without modifying the original model.
"""


reasoning_content: str


class LitellmModel(Model):
"""This class enables using any model via LiteLLM. LiteLLM allows you to acess OpenAPI,
Anthropic, Gemini, Mistral, and many other models.
See supported models here: [litellm models](https://docs.litellm.ai/docs/providers).
"""This class acts as a flexible bridge, allowing you to use a wide variety
of LLM providers with the Agents SDK via LiteLLM.

By using LiteLLM, you can easily switch between different models and providers
(like OpenAI, Anthropic, Gemini, Mistral, and many others) without changing
your core agent code.

See a list of all supported models here: [litellm models](https://docs.litellm.ai/docs/providers).
"""

def __init__(
Expand Down