Skip to main content

Overview

FnCallAgent is a widely applicable agent that integrates LLM with tool use capabilities. It can automatically call tools based on the LLM’s function calling responses and iterate until task completion.

Class Signature

from qwen_agent.agents import FnCallAgent

class FnCallAgent(Agent):
    def __init__(
        self,
        function_list: Optional[List[Union[str, Dict, BaseTool]]] = None,
        llm: Optional[Union[Dict, BaseChatModel]] = None,
        system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
        name: Optional[str] = None,
        description: Optional[str] = None,
        files: Optional[List[str]] = None,
        **kwargs
    )

Constructor Parameters

function_list
List[Union[str, Dict, BaseTool]]
List of tools available to the agent:
  • Tool name: 'code_interpreter'
  • Tool config: {'name': 'code_interpreter', 'timeout': 30}
  • Tool object: CodeInterpreter()
llm
Union[Dict, BaseChatModel]
LLM configuration or instance:
{
    'model': 'qwen-max',
    'api_key': 'your-api-key',
    'model_type': 'qwen_dashscope'
}
system_message
str
default:"DEFAULT_SYSTEM_MESSAGE"
System message for LLM
name
str
Agent name
description
str
Agent description for multi-agent scenarios
files
List[str]
Files for tools that require file access

Methods

run

def run(
    self,
    messages: List[Union[Dict, Message]],
    lang: str = 'en',
    **kwargs
) -> Iterator[List[Message]]
Runs the agent with automatic function calling.
messages
List[Union[Dict, Message]]
required
Conversation messages
lang
str
default:"en"
Language: ‘en’ or ‘zh’
return
Iterator[List[Message]]
Streaming response with function calls and results

Workflow

The FnCallAgent follows this execution pattern:
  1. Sends messages to LLM with available functions
  2. LLM responds (may include function calls)
  3. If function call detected:
    • Execute the tool
    • Append result to messages
    • Go back to step 1
  4. If no function call, return final response
Max iterations: Controlled by MAX_LLM_CALL_PER_RUN setting

Usage Examples

Basic Function Calling

from qwen_agent.agents import FnCallAgent
from qwen_agent.llm.schema import Message

# Create agent with tools
agent = FnCallAgent(
    function_list=['code_interpreter'],
    llm={'model': 'qwen-max', 'api_key': 'your-api-key'}
)

messages = [
    Message(
        role='user',
        content='Calculate the factorial of 10'
    )
]

for response in agent.run(messages):
    for msg in response:
        if msg.content:
            print(f"Content: {msg.content}")
        if msg.function_call:
            print(f"Tool: {msg.function_call.name}")
            print(f"Args: {msg.function_call.arguments}")

Multiple Tools

agent = FnCallAgent(
    function_list=[
        'code_interpreter',
        'web_extractor',
        {'name': 'retrieval', 'max_ref_token': 4000}
    ],
    llm={'model': 'qwen-max'}
)

messages = [
    Message(
        role='user',
        content='Search for Python tutorials and create a study plan'
    )
]

for response in agent.run(messages):
    print(response[-1].content)

With File Access

agent = FnCallAgent(
    function_list=['code_interpreter'],
    llm={'model': 'qwen-max'},
    files=['data.csv', 'https://example.com/dataset.xlsx']
)

messages = [
    Message(
        role='user',
        content='Analyze the data.csv file and create visualizations'
    )
]

for response in agent.run(messages):
    for msg in response:
        print(msg.content)

Custom Tool

from qwen_agent.tools.base import BaseTool, register_tool

@register_tool('weather_tool')
class WeatherTool(BaseTool):
    description = 'Get weather information for a city'
    parameters = {
        'type': 'object',
        'properties': {
            'city': {
                'type': 'string',
                'description': 'City name'
            }
        },
        'required': ['city']
    }
    
    def call(self, params, **kwargs):
        params = self._verify_json_format_args(params)
        city = params['city']
        # Simulate weather API call
        return f"Weather in {city}: Sunny, 22°C"

# Use custom tool
agent = FnCallAgent(
    function_list=[WeatherTool()],
    llm={'model': 'qwen-max'}
)

messages = [Message(role='user', content='What is the weather in Paris?')]

for response in agent.run(messages):
    print(response[-1].content)

Multi-turn with Function Calling

agent = FnCallAgent(
    function_list=['code_interpreter'],
    llm={'model': 'qwen-max'}
)

messages = []

# First turn
messages.append(Message(role='user', content='Create a list of prime numbers up to 20'))
for response in agent.run(messages):
    pass
messages.extend(response)

print(f"Assistant: {messages[-1].content}")

# Second turn - uses previous context
messages.append(Message(role='user', content='Now calculate their sum'))
for response in agent.run(messages):
    pass
messages.extend(response)

print(f"Assistant: {messages[-1].content}")

Handling Tool Errors

agent = FnCallAgent(
    function_list=['code_interpreter'],
    llm={'model': 'qwen-max'}
)

messages = [
    Message(
        role='user',
        content='Run this code: print(undefined_variable)'
    )
]

for response in agent.run(messages):
    for msg in response:
        if msg.role == 'function':
            # Tool result (may contain error message)
            print(f"Tool result: {msg.content}")
        elif msg.content:
            # Agent's response after seeing tool result
            print(f"Agent: {msg.content}")

Language-Specific Usage

# Chinese language agent
agent_cn = FnCallAgent(
    function_list=['code_interpreter'],
    llm={'model': 'qwen-max'},
    system_message='你是一个有用的AI助手。'
)

messages = [Message(role='user', content='计算1到100的和')]

for response in agent_cn.run(messages, lang='zh'):
    print(response[-1].content)

Non-streaming Mode

agent = FnCallAgent(
    function_list=['code_interpreter'],
    llm={'model': 'qwen-max'}
)

messages = [Message(role='user', content='Generate random numbers')]

# Get complete response at once
response = agent.run_nonstream(messages)
for msg in response:
    if msg.function_call:
        print(f"Called: {msg.function_call.name}")
    elif msg.role == 'function':
        print(f"Result: {msg.content}")
    elif msg.content:
        print(f"Final: {msg.content}")

Function Call Flow Example

# User asks a question
messages = [Message(role='user', content='What is 15 factorial?')]

# Agent execution:
# 1. LLM receives: [user message] + [available functions]
# 2. LLM responds with function call:
#    Message(role='assistant', function_call={
#        'name': 'code_interpreter',
#        'arguments': '{"code": "import math\nprint(math.factorial(15))"}'
#    })
# 3. Agent executes tool:
#    result = code_interpreter.call(...)
# 4. Tool result added:
#    Message(role='function', name='code_interpreter', content='1307674368000')
# 5. LLM receives updated messages
# 6. LLM responds with final answer:
#    Message(role='assistant', content='The factorial of 15 is 1,307,674,368,000')

Memory Management

from qwen_agent.memory import Memory

agent = FnCallAgent(
    function_list=['code_interpreter'],
    llm={'model': 'qwen-max'},
    files=['document.pdf']
)

# Access memory
print(f"System files: {agent.mem.system_files}")

Configuration

Max Iterations

from qwen_agent.settings import MAX_LLM_CALL_PER_RUN

# Default is typically 10
# Modify in settings if needed
print(f"Max LLM calls per run: {MAX_LLM_CALL_PER_RUN}")

See Also

Build docs developers (and LLMs) love