Skip to main content
The autogen_agentchat package provides high-level abstractions for building conversational agents and teams.

Agent Classes

AI-powered assistant agent with tool use and handoff capabilities.
from autogen_agentchat.agents import AssistantAgent
from autogen_ext.models.openai import OpenAIChatCompletionClient

assistant = AssistantAgent(
    name="assistant",
    model_client=OpenAIChatCompletionClient(model="gpt-4"),
    tools=[calculator_tool],
    handoffs=["supervisor"],
    system_message="You are a helpful assistant.",
    description="General purpose assistant"
)

# Run the agent
result = await assistant.run(task="Calculate 25 * 4")
print(result.messages)
name
str
required
Unique name for the agent
model_client
ChatCompletionClient
required
LLM client for generating responses
tools
List[Tool] | None
Tools the agent can use
handoffs
List[Handoff | str] | None
Other agents this agent can transfer control to
system_message
str | None
System prompt for the agent
description
str
required
Description of the agent’s capabilities
model_context
ChatCompletionContext | None
Context manager for conversation history
memory
List[Memory] | None
Memory modules for the agent
reflect_on_tool_use
bool
Whether to reflect on tool execution results (default: False)
max_tool_iterations
int
Maximum tool call iterations per turn (default: 1)

Methods

on_messages
async method
Process messages and return a response
response = await assistant.on_messages(
    messages=[TextMessage(content="Hello", source="user")],
    cancellation_token=token
)
messages
Sequence[ChatMessage]
required
Conversation messages
cancellation_token
CancellationToken | None
Token to cancel the operation
Returns: Response with the agent’s reply
on_messages_stream
async generator
Stream messages and events as they’re generated
async for message in assistant.on_messages_stream(messages):
    if isinstance(message, TextMessage):
        print(message.content)
run
async method
Run the agent with a task
result = await assistant.run(
    task="Write a poem",
    termination_condition=MaxMessageTermination(10)
)
task
str | ChatMessage
required
Initial task or message
termination_condition
TerminationCondition | None
Condition to stop execution
Returns: TaskResult with conversation history
Agent representing a human user with input capabilities.
from autogen_agentchat.agents import UserProxyAgent

user = UserProxyAgent(
    name="user",
    description="Human user"
)

# Agent will prompt for input during execution
result = await user.run(task="Enter your message")
name
str
required
Agent name
description
str
required
Agent description
input_func
Callable | None
Custom function for getting user input
Agent that executes code in a sandboxed environment.
from autogen_agentchat.agents import CodeExecutorAgent
from autogen_core.code_executor import DockerCommandLineCodeExecutor

executor_agent = CodeExecutorAgent(
    name="executor",
    code_executor=DockerCommandLineCodeExecutor(),
    description="Executes Python code"
)
name
str
required
Agent name
code_executor
CodeExecutor
required
Code execution backend
approval_func
ApprovalFuncType | None
Function to approve code before execution
description
str
required
Agent description
Meta-agent that manages an internal team of agents.
from autogen_agentchat.agents import SocietyOfMindAgent
from autogen_agentchat.teams import RoundRobinGroupChat

inner_team = RoundRobinGroupChat([agent1, agent2])

som_agent = SocietyOfMindAgent(
    name="team",
    group_chat=inner_team,
    description="Coordinated team of specialists"
)
name
str
required
Agent name
group_chat
BaseGroupChat
required
Internal team configuration
description
str
required
Agent description
Agent that filters messages based on configurable criteria.
from autogen_agentchat.agents import MessageFilterAgent, PerSourceFilter

filter_agent = MessageFilterAgent(
    name="filter",
    filter_config=PerSourceFilter(max_messages_per_source=3),
    description="Limits messages per source"
)
name
str
required
Agent name
filter_config
MessageFilterConfig
required
Filter configuration
description
str
required
Agent description
Base class for all chat agents in the framework.
from autogen_agentchat.agents import BaseChatAgent
from autogen_agentchat.messages import ChatMessage, Response

class CustomAgent(BaseChatAgent):
    async def on_messages(
        self,
        messages: Sequence[ChatMessage],
        cancellation_token: CancellationToken | None = None
    ) -> Response:
        # Custom message handling
        return Response(
            chat_message=TextMessage(content="Response", source=self.name)
        )
name
str
required
Agent name
description
str
required
Agent description

Team Classes

Team where agents take turns in a fixed order.
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import MaxMessageTermination

team = RoundRobinGroupChat(
    participants=[agent1, agent2, agent3],
    termination_condition=MaxMessageTermination(20)
)

result = await team.run(task="Solve this problem collaboratively")
participants
List[BaseChatAgent]
required
List of agents in the team
termination_condition
TerminationCondition | None
Condition to stop the conversation
Team with dynamic agent selection using an LLM.
from autogen_agentchat.teams import SelectorGroupChat
from autogen_ext.models.openai import OpenAIChatCompletionClient

team = SelectorGroupChat(
    participants=[researcher, writer, reviewer],
    model_client=OpenAIChatCompletionClient(model="gpt-4"),
    termination_condition=MaxMessageTermination(30)
)

result = await team.run(task="Research and write an article")
participants
List[BaseChatAgent]
required
Agents available for selection
model_client
ChatCompletionClient
required
LLM for selecting the next speaker
selector_prompt
str | None
Custom prompt for agent selection
allow_repeated_speaker
bool
Whether the same agent can speak twice in a row (default: False)
Dynamic team with agent handoffs and context transfer.
from autogen_agentchat.teams import Swarm

team = Swarm(
    participants=[triage_agent, specialist1, specialist2],
    termination_condition=TextMentionTermination("TERMINATE")
)

# Agents can transfer control using handoffs
result = await team.run(task="Handle customer request")
participants
List[BaseChatAgent]
required
Agents in the swarm
termination_condition
TerminationCondition | None
Condition to stop execution
Specialized team for the Magentic-One architecture.
from autogen_agentchat.teams import MagenticOneGroupChat

team = MagenticOneGroupChat(
    participants=[orchestrator, web_surfer, file_surfer, coder, executor],
    max_turns=50
)
participants
List[BaseChatAgent]
required
Specialized agents for the Magentic-One pattern
max_turns
int
Maximum conversation turns
Graph-based team with custom agent flow.
from autogen_agentchat.teams import DiGraphBuilder, GraphFlow

# Build agent graph
builder = DiGraphBuilder()
builder.add_node("start", agent1)
builder.add_node("process", agent2)
builder.add_node("end", agent3)
builder.add_edge("start", "process")
builder.add_edge("process", "end")

graph = builder.build()
team = GraphFlow(graph=graph, entry_point="start")

result = await team.run(task="Process through pipeline")
graph
DiGraph
required
Directed graph of agents
entry_point
str
required
Starting node identifier
Base class for all team implementations.
from autogen_agentchat.teams import BaseGroupChat

class CustomTeam(BaseGroupChat):
    async def run(
        self,
        task: str | ChatMessage,
        termination_condition: TerminationCondition | None = None
    ) -> TaskResult:
        # Custom team logic
        pass

Message Types

Plain text message.
from autogen_agentchat.messages import TextMessage

message = TextMessage(
    content="Hello, how can I help?",
    source="assistant"
)
content
str
required
Message text content
source
str
required
Name of the agent that created the message
Message indicating agent handoff.
from autogen_agentchat.messages import HandoffMessage

handoff = HandoffMessage(
    target="specialist",
    content="Transferring to specialist for details",
    source="triage"
)
target
str
required
Name of the agent to transfer to
content
str
required
Context for the handoff
Summary of tool execution results.
from autogen_agentchat.messages import ToolCallSummaryMessage

summary = ToolCallSummaryMessage(
    content="Executed calculator: 25 * 4 = 100",
    source="assistant"
)
Message with structured data.
from autogen_agentchat.messages import StructuredMessage
from pydantic import BaseModel

class Report(BaseModel):
    title: str
    summary: str
    score: float

message = StructuredMessage[
    content=Report(title="Analysis", summary="...", score=0.95),
    source="analyst"
]

Events

Event emitted when a tool is called.
from autogen_agentchat.messages import ToolCallRequestEvent

# Emitted during streaming
event = ToolCallRequestEvent(
    source="assistant",
    tool_calls=[FunctionCall(id="1", name="calc", arguments={})]
)
Event emitted after tool execution.
from autogen_agentchat.messages import ToolCallExecutionEvent

event = ToolCallExecutionEvent(
    source="assistant",
    tool_call_id="call_123",
    result="100"
)
Streaming chunk from the model.
from autogen_agentchat.messages import ModelClientStreamingChunkEvent

# Emitted during streaming responses
event = ModelClientStreamingChunkEvent(
    source="assistant",
    content="Hello"
)
Agent’s internal reasoning.
from autogen_agentchat.messages import ThoughtEvent

event = ThoughtEvent(
    source="assistant",
    thought="I should use the calculator tool for this"
)

Response Types

Agent response to messages.
from autogen_agentchat.base import Response

response = Response(
    chat_message=TextMessage(content="Done", source="assistant"),
    inner_messages=[event1, event2]
)
chat_message
ChatMessage
The final response message
inner_messages
List[BaseAgentEvent]
Intermediate events and messages
Result of running an agent or team.
result = await agent.run(task="Write code")

print(result.messages)  # All messages
print(result.stop_reason)  # Why it stopped
messages
List[ChatMessage]
Complete conversation history
stop_reason
str | None
Reason for termination

Termination Conditions

Stop after a maximum number of messages.
from autogen_agentchat.conditions import MaxMessageTermination

condition = MaxMessageTermination(max_messages=20)
Stop when specific text is mentioned.
from autogen_agentchat.conditions import TextMentionTermination

condition = TextMentionTermination("TERMINATE")
Stop on a specific message type.
from autogen_agentchat.conditions import StopMessageTermination

condition = StopMessageTermination()
Stop after a time limit.
from autogen_agentchat.conditions import TimeoutTermination

condition = TimeoutTermination(timeout_seconds=300)
Stop after token budget is exhausted.
from autogen_agentchat.conditions import TokenUsageTermination

condition = TokenUsageTermination(max_tokens=10000)

State Management

Serializable state for assistant agents.
# Save state
state = await assistant.save_state()

# Load state
await assistant.load_state(state)

Logging

from autogen_agentchat import TRACE_LOGGER_NAME, EVENT_LOGGER_NAME
import logging

# Configure logging
logging.getLogger(TRACE_LOGGER_NAME).setLevel(logging.DEBUG)
logging.getLogger(EVENT_LOGGER_NAME).setLevel(logging.INFO)
TRACE_LOGGER_NAME
str
Logger name: "autogen_agentchat"
EVENT_LOGGER_NAME
str
Logger name: "autogen_agentchat.events"

See Also

Build docs developers (and LLMs) love