Resources
Examples
Code examples for common use cases
Practical code examples for common CortexaDB use cases.
Basic Memory Storage & Retrieval
from cortexadb import CortexaDB
from cortexadb.providers.openai import OpenAIEmbedder
db = CortexaDB.open("agent.mem", embedder=OpenAIEmbedder())
# Store memories
mid1 = db.add("The user prefers dark mode.")
mid2 = db.add("User works at Stripe.")
mid3 = db.add("User's favorite language is Python.")
# Search
hits = db.search("What programming language does the user like?")
for hit in hits:
mem = db.get(hit.id)
print(f"[{hit.score:.3f}] {mem.content.decode()}")Knowledge Graph
db = CortexaDB.open("knowledge.mem", embedder=embedder)
# Store entities
alice = db.add("Alice is a software engineer at Acme Corp")
bob = db.add("Bob is Alice's manager")
acme = db.add("Acme Corp builds developer tools")
# Create relationships
db.connect(alice, bob, "reports_to")
db.connect(alice, acme, "works_at")
db.connect(bob, acme, "works_at")
# Query with graph expansion
hits = db.search("Who works at Acme?", use_graph=True)Multi-Agent System
db = CortexaDB.open("agents.mem", embedder=embedder)
# Each agent has isolated memory
planner = db.collection("planner")
researcher = db.collection("researcher")
writer = db.collection("writer")
# Agents store memories independently
planner.add("Task: Write a blog post about vector databases")
researcher.add("Found: CortexaDB supports HNSW indexing")
researcher.add("Found: Typical recall is 95% with HNSW")
writer.add("Draft intro: Vector databases are transforming AI...")
# Each agent queries only its own memories
research = researcher.query("What did I find about indexing?").execute()
# Shared knowledge base (readonly for agents)
shared = db.collection("shared")
shared.add("Company style guide: Use active voice")
agent_view = db.collection("shared", readonly=True)
guidelines = agent_view.query("What is the writing style?").execute()Document Ingestion Pipeline
db = CortexaDB.open("docs.mem", embedder=embedder, index_mode="hnsw")
# Load different file types
db.load("README.md", strategy="markdown")
db.load("api_docs.pdf", strategy="recursive", chunk_size=1024)
db.load("config.json", strategy="json")
# Ingest raw text
article = """
Long article about machine learning...
"""
ids = db.ingest(article, strategy="semantic", chunk_size=2048)
# Query across all ingested documents
hits = db.search("How do I configure the API?", limit=10)
for hit in hits:
mem = db.get(hit.id)
print(f"[{hit.score:.3f}] {mem.content.decode()[:100]}...")Session Recording & Replay
# Record a session
db = CortexaDB.open("agent.mem", embedder=embedder, record="session.log")
db.add("User searched about pricing")
db.add("Showed enterprise plan")
db.connect(1, 2, "led_to")
# Later: replay the session for debugging
db2 = CortexaDB.replay("session.log", "/tmp/debug.mem")
report = db2.last_replay_report
print(f"Replayed {report['applied']}/{report['total_ops']} operations")
# Export current state as a migration snapshot
db.export_replay("snapshot.log")HNSW with Custom Tuning
# High recall setup
db = CortexaDB.open("precise.mem", dimension=384, index_mode={
"type": "hnsw",
"m": 32,
"ef_search": 200,
"ef_construction": 400,
"metric": "cos"
})
# Low latency setup
db = CortexaDB.open("fast.mem", dimension=384, index_mode={
"type": "hnsw",
"m": 8,
"ef_search": 20,
"metric": "cos"
})
# L2 metric for image embeddings
db = CortexaDB.open("images.mem", dimension=512, index_mode={
"type": "hnsw",
"metric": "l2"
})Capacity-Managed Database
# Auto-evict when exceeding limits
db = CortexaDB.open(
"bounded.mem",
embedder=embedder,
max_entries=10000, # Keep at most 10K memories
max_bytes=50_000_000, # Keep under 50MB
)
# Old, low-importance memories are automatically evicted
for i in range(20000):
db.add(f"Memory #{i}")
stats = db.stats()
print(f"Entries: {stats.entries}") # ~10000 (eviction kicked in)Metadata Filtering
db = CortexaDB.open("agent.mem", embedder=embedder)
# Store with metadata
db.add("Dark mode enabled", metadata={"category": "preference"})
db.add("Meeting at 3pm", metadata={"category": "schedule"})
db.add("Likes Python", metadata={"category": "preference"})
# Filter by metadata (if supported by your query)
hits = db.search("What are the user's preferences?")
for hit in hits:
mem = db.get(hit.id)
print(f"{mem.content.decode()} [{mem.metadata}]")Chunking Without Storage
from cortexadb import chunk
text = open("large_document.txt").read()
# Try different strategies
for strategy in ["fixed", "recursive", "semantic", "markdown"]:
chunks = chunk(text, strategy=strategy, chunk_size=512, overlap=50)
print(f"{strategy}: {len(chunks)} chunks")
print(f" First: {chunks[0].text[:80]}...")Rust Basic Usage
use cortexadb_core::CortexaDB;
use std::collections::HashMap;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let db = CortexaDB::open("/tmp/agent.mem", 128)?;
// Store memories
let emb = vec![0.1_f32; 128];
let id1 = db.add(emb.clone(), None)?;
let mut meta = HashMap::new();
meta.insert("source".into(), "test".into());
let id2 = db.add(vec![0.2; 128], Some(meta))?;
// Query
let hits = db.search(emb, 5, None)?;
println!("Found {} results", hits.len());
// Graph
db.connect(id1, id2, "related_to")?;
let neighbors = db.get_neighbors(id1)?;
println!("Neighbors: {:?}", neighbors);
// Maintenance
db.checkpoint()?;
db.compact()?;
let stats = db.stats()?;
println!("Total entries: {}", stats.entries);
Ok(())
}Next Steps
- Python API - Full API reference
- Rust API - Rust crate reference
- Configuration - All tuning options
