You can manually generate embeddings for custom use cases.
Python
TypeScript
from memori import Memorimem = Memori()# Generate embeddings for a single texttext = "Machine learning is transforming industries"embedding = mem.embed_texts(text)print(f"Generated embedding with {len(embedding[0])} dimensions")print(f"First few values: {embedding[0][:5]}")# Generate embeddings for multiple textstexts = [ "Artificial intelligence is advancing rapidly", "Neural networks power modern AI systems", "Deep learning requires large datasets",]embeddings = mem.embed_texts(texts)print(f"Generated {len(embeddings)} embeddings")for i, emb in enumerate(embeddings): print(f"Text {i+1}: {len(emb)} dimensions")
import { Memori } from 'memori';async function generateEmbeddings() { const mem = new Memori(); // Generate embeddings for a single text const text = 'Machine learning is transforming industries'; const embedding = await mem.embed_texts(text); console.log(`Generated embedding with ${embedding[0].length} dimensions`); console.log(`First few values: ${embedding[0].slice(0, 5)}`); // Generate embeddings for multiple texts const texts = [ 'Artificial intelligence is advancing rapidly', 'Neural networks power modern AI systems', 'Deep learning requires large datasets', ]; const embeddings = await mem.embed_texts(texts); console.log(`Generated ${embeddings.length} embeddings`); embeddings.forEach((emb, i) => { console.log(`Text ${i + 1}: ${emb.length} dimensions`); });}generateEmbeddings().catch(console.error);
Generate embeddings asynchronously for better performance.
Python
TypeScript
import asynciofrom memori import Memoriasync def generate_async_embeddings(): mem = Memori() texts = [ "First document about AI", "Second document about ML", "Third document about DL", ] # Generate embeddings asynchronously (runs in thread pool) embeddings_future = mem.embed_texts(texts, async_=True) # Do other work while embeddings are being generated print("Generating embeddings in background...") # Await the result embeddings = await embeddings_future print(f"Generated {len(embeddings)} embeddings") return embeddingsasyncio.run(generate_async_embeddings())
import { Memori } from 'memori';async function generateAsyncEmbeddings() { const mem = new Memori(); const texts = [ 'First document about AI', 'Second document about ML', 'Third document about DL', ]; // Generate embeddings asynchronously (always async in TS) console.log('Generating embeddings...'); const embeddings = await mem.embed_texts(texts); console.log(`Generated ${embeddings.length} embeddings`); return embeddings;}generateAsyncEmbeddings().catch(console.error);
For self-hosted deployments, you can configure the embedding model.
Python
TypeScript
from memori import Memorifrom sqlalchemy import create_enginefrom sqlalchemy.orm import sessionmaker# Setup databaseengine = create_engine("sqlite:///memori.db")Session = sessionmaker(bind=engine)# Create Memori instance with custom configmem = Memori(conn=Session)# Configure embedding modelmem.config.embeddings.model = "sentence-transformers/all-MiniLM-L6-v2"# Build storagemem.config.storage.build()# Use normally - embeddings will use the configured modeltext = "This will be embedded with the custom model"embedding = mem.embed_texts(text)print(f"Embedding dimension: {len(embedding[0])}")
// TypeScript SDK uses Memori Cloud embeddings automatically// Custom models are configured server-sideimport { Memori } from 'memori';const mem = new Memori();mem.attribution('user-123');// Embeddings are generated automatically during recallconst facts = await mem.recall('What are my preferences?');console.log(`Found ${facts.length} relevant facts`);
from memori import Memoriimport numpy as npdef cosine_similarity(a, b): return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))mem = Memori()# Document corpusdocuments = [ "Python is a programming language", "JavaScript runs in browsers", "Machine learning uses neural networks", "Databases store structured data", "APIs connect different services",]# Generate embeddings for all documentsdoc_embeddings = mem.embed_texts(documents)# User queryquery = "How do I connect to a web service?"query_embedding = mem.embed_texts(query)[0]# Find most similar documentssimilarities = []for i, doc_emb in enumerate(doc_embeddings): similarity = cosine_similarity(query_embedding, doc_emb) similarities.append((i, similarity, documents[i]))# Sort by similaritysimilarities.sort(key=lambda x: x[1], reverse=True)print("Most relevant documents:")for idx, score, doc in similarities[:3]: print(f" [{score:.3f}] {doc}")
from memori import Memorifrom sqlalchemy import create_enginefrom sqlalchemy.orm import sessionmakerengine = create_engine("postgresql://user:pass@localhost/memori")Session = sessionmaker(bind=engine)mem = Memori(conn=Session)# Configure embeddingsmem.config.embeddings.model = "sentence-transformers/all-mpnet-base-v2"mem.config.recall_facts_limit = 10 # Return top 10 factsmem.config.recall_embeddings_limit = 50 # Search top 50 embeddingsmem.config.recall_relevance_threshold = 0.7 # Min relevance scoremem.config.storage.build()
# Set embedding configuration via environmentexport MEMORI_EMBEDDING_MODEL="sentence-transformers/all-mpnet-base-v2"export MEMORI_RECALL_FACTS_LIMIT=10export MEMORI_RECALL_EMBEDDINGS_LIMIT=50