Get started with Memori in just a few minutes. This guide shows you how to add persistent memory to your LLM applications using either Memori Cloud or your own database.
from memori import Memorifrom openai import OpenAI# Initialize OpenAI and Memoriclient = OpenAI()mem = Memori().llm.register(client)# Set attribution to identify the user and processmem.attribution(entity_id="user_123", process_id="support_agent")# First conversation - teach the AIresponse = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "My favorite color is blue."}])print(response.choices[0].message.content)# Second conversation - Memori recalls automaticallyresponse = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "What's my favorite color?"}])print(response.choices[0].message.content)# Output: Your favorite color is blue.
Memori Cloud is free for developers with generous rate limits. Advanced Augmentation runs in the background with zero latency impact.
import osfrom openai import OpenAIfrom sqlalchemy import create_enginefrom sqlalchemy.orm import sessionmakerfrom memori import Memori# Setup OpenAIclient = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))# Setup SQLite databaseengine = create_engine("sqlite:///memori.db")Session = sessionmaker(bind=engine)# Setup Memori with SQLitemem = Memori(conn=Session).llm.register(client)mem.attribution(entity_id="user-123", process_id="my-app")mem.config.storage.build()# First conversationprint("You: My favorite color is blue and I live in Paris")response1 = client.chat.completions.create( model="gpt-4o-mini", messages=[{ "role": "user", "content": "My favorite color is blue and I live in Paris" }],)print(f"AI: {response1.choices[0].message.content}\n")# Second conversation - Memori recalls contextprint("You: What's my favorite color?")response2 = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "What's my favorite color?"}],)print(f"AI: {response2.choices[0].message.content}\n")# Third conversationprint("You: What city do I live in?")response3 = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "What city do I live in?"}],)print(f"AI: {response3.choices[0].message.content}")# Wait for augmentation to completemem.augmentation.wait()
import osfrom openai import OpenAIfrom sqlalchemy import create_enginefrom sqlalchemy.orm import sessionmakerfrom memori import Memoriclient = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))# Connect to PostgreSQLdatabase_connection_string = os.getenv("DATABASE_CONNECTION_STRING")engine = create_engine(database_connection_string)Session = sessionmaker(bind=engine)# Setup Memori with PostgreSQLmem = Memori(conn=Session).llm.register(client)mem.attribution(entity_id="user-123", process_id="my-app")mem.config.storage.build()# Use as normalresponse = client.chat.completions.create( model="gpt-4o-mini", messages=[{ "role": "user", "content": "My favorite color is blue and I live in Paris" }],)print(response.choices[0].message.content)mem.augmentation.wait()
import osfrom openai import OpenAIfrom pymongo import MongoClientfrom memori import Memoriclient = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))# Connect to MongoDBmongo_client = MongoClient(os.getenv("MONGODB_CONNECTION_STRING"))db = mongo_client["memori"]# Setup Memori with MongoDBmem = Memori(conn=lambda: db).llm.register(client)mem.attribution(entity_id="user-123", process_id="my-app")mem.config.storage.build()# Use as normalresponse = client.chat.completions.create( model="gpt-4o-mini", messages=[{ "role": "user", "content": "My favorite color is blue and I live in Paris" }],)print(response.choices[0].message.content)mem.augmentation.wait()
The mem.config.storage.build() call creates the necessary database tables/collections on first run.
import osfrom agno.agent import Agentfrom agno.models.openai import OpenAIChatfrom sqlalchemy import create_enginefrom sqlalchemy.orm import sessionmakerfrom memori import Memori# Setup databaseengine = create_engine("sqlite:///memori_agno.db")Session = sessionmaker(bind=engine)# Setup Agno with OpenAImodel = OpenAIChat(id="gpt-4o-mini")# Register Memori with Agnomem = Memori(conn=Session).llm.register(openai_chat=model)mem.attribution(entity_id="customer-456", process_id="support-agent")mem.config.storage.build()# Create agentagent = Agent( model=model, instructions=[ "You are a helpful customer support agent.", "Remember customer preferences and history.", ], markdown=True,)# Use the agent - memory is automaticresponse = agent.run( "Hi, I'd like to order a large pepperoni pizza with extra cheese")print(response.content)response = agent.run("Can you remind me what I just ordered?")print(response.content)mem.augmentation.wait()