AWS Strands is a comprehensive Python framework for building production-grade AI agents with built-in session management, multi-agent orchestration, and enterprise features like observability and guardrails.
# For LiteLLM (most common)pip install litellm# For observabilitypip install opentelemetry-api opentelemetry-sdk# For specific providerspip install anthropic # For Claudepip install openai # For OpenAI
Strands provides built-in tools and supports custom tools:
from strands_tools import http_request, python_repl, brave_searchagent = Agent( system_prompt="You can make HTTP requests and run Python code", tools=[http_request, python_repl], model=model,)
import osfrom dotenv import load_dotenvfrom strands import Agentfrom strands.models.litellm import LiteLLMModelfrom strands_tools import http_requestload_dotenv()WEATHER_PROMPT = """You are a weather assistant with HTTP capabilities.Use the National Weather Service API:1. Get coordinates: https://api.weather.gov/points/{latitude},{longitude}2. Use returned forecast URL to get weather data3. Present information in a clear, user-friendly format"""model = LiteLLMModel( client_args={"api_key": os.getenv("NEBIUS_API_KEY")}, model_id="nebius/deepseek-ai/DeepSeek-V3-0324", params={"max_tokens": 1500, "temperature": 0.7},)agent = Agent( system_prompt=WEATHER_PROMPT, tools=[http_request], model=model,)response = agent("Compare temperature in New York and Chicago")print(response)
agent = Agent( model=model, session_manager=session_manager, system_prompt="You are a friendly assistant",)
4
Use Across Sessions
# First sessionprint("User: My name is Alice")response1 = agent("My name is Alice")print(f"Agent: {response1}")# Later session (same session_id)print("\nUser: What's my name?")response2 = agent("What's my name?")print(f"Agent: {response2}") # Remembers "Alice"
from strands import Agent, toolfrom strands.models.litellm import LiteLLMModel@tooldef request_human_input(question: str) -> str: """ Ask the human user for input. Args: question: What to ask the user Returns: User's response """ print(f"\n🤖 Agent asks: {question}") return input("👤 Your answer: ")@tooldef request_approval(action: str, details: str) -> bool: """ Request human approval before taking an action. Args: action: Action to perform details: Details about the action Returns: True if approved, False otherwise """ print(f"\n🤖 Agent requests approval:") print(f" Action: {action}") print(f" Details: {details}") response = input("👤 Approve? (yes/no): ") return response.lower() in ['yes', 'y']model = LiteLLMModel( client_args={"api_key": os.getenv("NEBIUS_API_KEY")}, model_id="nebius/deepseek-ai/DeepSeek-V3-0324",)agent = Agent( system_prompt=""" You help users complete tasks, but must: 1. Ask for clarification when needed using request_human_input 2. Get approval before sensitive actions using request_approval """, tools=[request_human_input, request_approval], model=model,)response = agent("Send an email to the team about the project update")
# .env fileNEBIUS_API_KEY=your_nebius_api_keyOPENAI_API_KEY=your_openai_api_key # If using OpenAIANTHROPIC_API_KEY=your_anthropic_api_key # If using Claude# For MCP serversGITHUB_PERSONAL_ACCESS_TOKEN=your_github_token
Always use session managers for multi-turn conversations:
# ✓ Good: Persistent memorysession_manager = FileSessionManager( session_id=f"user_{user_id}", storage_dir="./sessions")agent = Agent(model=model, session_manager=session_manager)# ✗ Bad: No memory between callsagent = Agent(model=model) # Forgets after each call
Clear System Prompts
Write detailed system prompts with:
Agent’s role and capabilities
How to use tools
Output format expectations
Error handling guidance
system_prompt = """You are a weather assistant with HTTP capabilities.When retrieving weather:1. First get coordinates: https://api.weather.gov/points/{lat},{lon}2. Use the forecast URL from the response3. Format weather data in a clear, readable way4. Handle errors gracefullyAlways explain weather conditions in user-friendly terms."""