Store and recall context across interactions using Kubiya’s cognitive memory system
Cognitive Memory enables agents to remember context across conversations and sessions. Store operational knowledge, incident history, and domain expertise that can be recalled later using natural language queries.
from kubiya import ControlPlaneClientclient = ControlPlaneClient(api_key="your-api-key")# Store operational knowledgememory = client.graph.store_memory( dataset_id="ops-knowledge", context=""" Incident Resolution: Database slow query issue in production. Symptoms: API response times increased from 200ms to 3000ms. Root Cause: Missing index on users table for email lookups. Resolution: Added index `idx_users_email` which reduced query time to 50ms. Impact: 45 minutes of degraded performance. """, metadata={ "incident_id": "INC-2024-001", "severity": "high", "resolved_by": "ops-team", "timestamp": "2024-12-10T14:30:00Z" })print(f"Memory ID: {memory['memory_id']}")print(f"Dataset: {memory['dataset_id']}")print(f"Status: {memory['status']}")
from kubiya import ControlPlaneClientclient = ControlPlaneClient(api_key="your-api-key")# Store large context asynchronouslyjob = client.graph.store_memory_async( dataset_id="logs-dataset", context="Large batch of application logs...", # Could be MB of data metadata={"source": "app-logs", "date": "2024-12-10"})print(f"Job ID: {job['job_id']}")print(f"Status: {job['status']}") # "processing"# Continue with other operations while storage completes
Use store_memory_async() for large content or when you don’t need to wait for completion. The memory will be available for recall once processing completes.
To retrieve memories, use recall_memory() with a query:
Copy
Ask AI
from kubiya import ControlPlaneClientclient = ControlPlaneClient(api_key="your-api-key")# Recall memories using a query (returns most relevant matches)memories = client.graph.recall_memory( query="incident OR deployment OR error", # Broad query to retrieve various memories limit=20)print(f"Found {len(memories)} memories\n")for memory in memories: print(f"Memory ID: {memory['memory_id']}") print(f"Relevance: {memory['relevance_score']:.2f}") print(f"Created: {memory['created_at']}") print(f"Preview: {memory['content'][:80]}...") print("---")
The SDK does not provide a list_memories() method. Use recall_memory() with appropriate queries to retrieve memories.
Find similar historical incidents for current issues:
Copy
Ask AI
from kubiya import ControlPlaneClientdef find_similar_incidents( client: ControlPlaneClient, current_issue: str, limit: int = 3): """Find similar historical incidents.""" memories = client.graph.recall_memory( query=current_issue, limit=limit ) if not memories: print("No similar incidents found in history") return [] print(f"=== Similar Historical Incidents ===\n") print(f"Current Issue: {current_issue}\n") for i, memory in enumerate(memories, 1): print(f"{i}. Relevance Score: {memory['relevance_score']:.2f}") # Extract key information from metadata metadata = memory.get('metadata', {}) print(f" Incident ID: {metadata.get('incident_id', 'N/A')}") print(f" Severity: {metadata.get('severity', 'N/A')}") print(f" Date: {memory['created_at']}") # Show excerpt lines = memory['content'].split('\n') root_cause = next((line for line in lines if 'Root Cause:' in line), '') resolution = next((line for line in lines if 'Resolution Steps:' in line), '') if root_cause: print(f" {root_cause.strip()}") print() return memories# Usageclient = ControlPlaneClient(api_key="your-api-key")# Find similar incidents for current problemsimilar = find_similar_incidents( client, "API returning 500 errors under high load", limit=5)
# ❌ BAD - Too briefclient.graph.store_memory( dataset_id="ops", context="Fixed bug")# ✅ GOOD - Detailed and searchableclient.graph.store_memory( dataset_id="ops", context=""" Bug Fix: Authentication service returning 401 for valid tokens Issue: JWT validation failing due to clock skew between services Solution: Increased token validation tolerance to 30 seconds Impact: Resolved intermittent auth failures for 2% of requests """)
# Separate datasets for different purposesclient.graph.store_memory(dataset_id="incident-history", context="...")client.graph.store_memory(dataset_id="deployment-logs", context="...")client.graph.store_memory(dataset_id="team-knowledge", context="...")client.graph.store_memory(dataset_id="customer-feedback", context="...")
# Store large batches asynchronouslyfor log_entry in large_log_batch: client.graph.store_memory_async( dataset_id="logs", context=log_entry['message'], metadata=log_entry['metadata'] )# Continue with other work - storage happens in background