15+ production-ready agent patterns. Each recipe uses real Slopshop APIs. Copy, paste, ship.
import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def research_and_remember(topic):
# Search existing knowledge first
existing = slop.memory.search(topic, namespace="research")
if existing.results:
return existing.results[0].value
# Run research tools
result = slop.tools.run("llm-search", {"query": topic})
# Store for future queries
slop.memory.set(
f"research:{topic}",
result.output,
namespace="research"
)
return result.output
# Each run builds on previous knowledge
answer = research_and_remember(
"quantum computing breakthroughs 2026"
)import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def support_swarm(ticket_id, user_message):
# Check user history
history = slop.memory.get(
f"user:{ticket_id}", namespace="support"
)
# Classify intent
intent = slop.tools.run("llm-classify", {
"text": user_message,
"categories": ["billing", "technical",
"general", "escalate"]
})
if intent.output == "escalate":
slop.memory.set(
f"escalated:{ticket_id}",
user_message,
namespace="support"
)
return {"action": "escalate", "to": "human_agent"}
# Store interaction
slop.memory.set(
f"user:{ticket_id}",
f"Last: {user_message}",
namespace="support"
)
return {"action": "resolve", "intent": intent.output}import slopshop
import schedule
import time
slop = slopshop.Client(api_key="YOUR_KEY")
def store_market_signal(symbol, price, sentiment):
slop.memory.set(
f"signal:{symbol}:{int(time.time())}",
f"price={price} sentiment={sentiment}",
namespace="trading"
)
def nightly_analysis():
# Dream Engine synthesizes overnight
session = slop.dream.start(
namespace="trading",
strategy="forecast", # Monte Carlo scenarios
model="claude-opus-4-5"
)
report = slop.dream.wait(session.dream_id)
print(f"Intelligence Score: {report.intelligence_score}")
return report.entries # tomorrow's scenarios
# Store signals throughout the day
store_market_signal("BTC", 95000, "bullish")
# Schedule nightly synthesis
schedule.every().day.at("02:00").do(nightly_analysis)import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def review_pr(diff_content, pr_id):
# Check complexity
complexity = slop.tools.run(
"code-complexity-score",
{"code": diff_content}
)
# Find similar past issues
similar = slop.memory.search(
f"complexity:{complexity.output}",
namespace="reviews"
)
# Store patterns for future learning
slop.memory.set(
f"pr:{pr_id}",
f"complexity={complexity.output} "
f"issues={len(similar.results)}",
namespace="reviews"
)
return {
"complexity": complexity.output,
"similar_issues": similar.results[:3],
"recommendation": (
"request_changes"
if int(complexity.output) > 50
else "approve"
)
}import slopshop
from datetime import date
slop = slopshop.Client(api_key="YOUR_KEY")
def summarize_channel(channel_id, messages):
# Summarize with LLM
summary = slop.tools.run("llm-summarize", {
"text": "\n".join(messages),
"max_length": 200
})
# Store for searchable history
slop.memory.set(
f"summary:{channel_id}:{date.today()}",
summary.output,
namespace="slack"
)
return summary.output
# Example usage
msgs = fetch_slack_messages("#eng-general", limit=200)
daily_summary = summarize_channel("eng-general", msgs)
print(daily_summary)import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def morning_brief(namespace="default"):
# Check yesterday's dream report
sessions = slop.dream.list(namespace=namespace, limit=1)
if not sessions:
return "No dream data yet. Run a dream cycle first."
report = slop.dream.report(sessions[0].dream_id)
return {
"intelligence_score": report.intelligence_score,
"top_insights": report.entries[:5],
"compression_ratio":
report.compression_metrics["ratio"],
"themes":
report.hierarchy_metadata["themes_generated"]
}
brief = morning_brief(namespace="work")
print(
f"IS: {brief['intelligence_score']} | "
f"{brief['themes']} themes synthesized"
)import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def rag_with_memory(query, docs, namespace="rag"):
# Store docs in memory
for i, doc in enumerate(docs):
slop.memory.set(
f"doc:{i}", doc["content"],
namespace=namespace
)
# Search with semantic similarity
results = slop.memory.search(
query, namespace=namespace, limit=5
)
# Build context from top results
context = "\n---\n".join(
[r.value for r in results.results]
)
# Generate answer with context
answer = slop.tools.run("llm-think", {
"prompt":
f"Context:\n{context}\n\nQuestion: {query}"
})
return answer.outputimport slopshop
class SelfImprovingAgent:
def __init__(self, namespace="agent"):
self.slop = slopshop.Client(api_key="YOUR_KEY")
self.namespace = namespace
self.session_count = 0
def act(self, task):
result = self.slop.tools.run(
"llm-think", {"prompt": task}
)
# Store outcome for learning
self.slop.memory.set(
f"outcome:{self.session_count}",
f"task={task[:50]} result_quality=0.8",
namespace=self.namespace
)
self.session_count += 1
return result.output
def evolve(self):
# Dream with evolve strategy — Bayesian belief updates
session = self.slop.dream.start(
namespace=self.namespace,
strategy="evolve"
)
return self.slop.dream.wait(session.dream_id)
agent = SelfImprovingAgent()
agent.act("Summarize AI trends for Q2 2026")
report = agent.evolve() # Agent gets smarter overnightimport slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
SHARED_NS = "debate-room-001"
def agent_argue(position, agent_id, topic):
# Read opposing arguments from shared memory
counter = slop.memory.search(
f"argument:{topic}", namespace=SHARED_NS
)
# Generate argument considering counter-positions
context = "\n".join(
[r.value for r in counter.results[-3:]]
)
arg = slop.tools.run("llm-think", {
"prompt":
f"Argue {position} on '{topic}'. "
f"Counter-arguments: {context}"
})
# Store in shared namespace
slop.memory.set(
f"argument:{topic}:{agent_id}",
arg.output,
namespace=SHARED_NS
)
return arg.output
# Two agents debate
arg1 = agent_argue("for", "agent_alpha",
"AGI will arrive by 2027")
arg2 = agent_argue("against", "agent_beta",
"AGI will arrive by 2027")import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def ingest_pdf_and_dream(pdf_text, namespace="kb"):
# Chunk and store
chunks = [
pdf_text[i:i+500]
for i in range(0, len(pdf_text), 500)
]
for i, chunk in enumerate(chunks):
slop.memory.set(
f"chunk:{i}", chunk, namespace=namespace
)
print(f"Stored {len(chunks)} chunks")
# Run synthesis Dream
session = slop.dream.start(
namespace=namespace,
strategy="insight_generate", # Cross-domain discovery
model="claude-sonnet-4-6"
)
report = slop.dream.wait(session.dream_id)
print(f"Synthesized {report.insights_generated} insights")
print(
f"Compression: "
f"{report.compression_metrics['ratio']}x"
)
return reportfrom langchain.agents import AgentExecutor
import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
# Use Slopshop as the memory layer for LangChain agents
class SlopMemory:
def save_context(self, inputs, outputs):
slop.memory.set(
f"ctx:{hash(str(inputs))}",
str(outputs),
namespace="langchain"
)
def load_memory_variables(self, inputs):
results = slop.memory.search(
str(inputs), namespace="langchain"
)
return {
"history": [
r.value for r in results.results[:3]
]
}
# Drop-in memory upgrade for any LangChain agent
memory = SlopMemory()
# Pass to your existing AgentExecutor
# agent = AgentExecutor(agent=..., memory=memory)import slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def replay_agent_session(namespace, from_timestamp):
# List all memories since timestamp
memories = slop.memory.list(namespace=namespace)
# Filter and sort
timeline = sorted(
[
m for m in memories.keys
if m.created_at >= from_timestamp
],
key=lambda m: m.created_at
)
print(f"Replaying {len(timeline)} memory events...")
for event in timeline:
val = slop.memory.get(
event.key, namespace=namespace
)
print(
f"[{event.created_at}] "
f"{event.key}: {val.value[:80]}..."
)
# Fetch any dream reports in this window
sessions = slop.dream.list(namespace=namespace)
for s in sessions:
if s.started_at >= from_timestamp:
report = slop.dream.report(s.dream_id)
print(
f"[DREAM] IS={report.intelligence_score} "
f"strategy={report.strategy}"
)import slopshop
import time
slop = slopshop.Client(api_key="YOUR_KEY")
HIVE_NS = "team-hive-v1"
def contribute_to_hive(agent_id, insight):
slop.memory.set(
f"insight:{agent_id}:{int(time.time())}",
insight,
namespace=HIVE_NS
)
def run_collective_dream():
# All team agents' memories synthesized together
session = slop.dream.start(
namespace=HIVE_NS,
strategy="full_cycle",
model="claude-opus-4-6"
)
report = slop.dream.wait(session.dream_id)
print(f"Collective IS: {report.intelligence_score}")
print(
f"Themes: "
f"{report.hierarchy_metadata['themes_generated']}"
)
return report
# Agents contribute throughout the day
contribute_to_hive(
"agent_sales",
"Q2 pipeline looking strong, 3 enterprise deals"
)
contribute_to_hive(
"agent_eng",
"Latency improved 40% after caching layer"
)
run_collective_dream() # Collective intelligence synthesizedimport slopshop
slop = slopshop.Client(api_key="YOUR_KEY")
def track_portfolio_risk(portfolio_id, positions):
for symbol, data in positions.items():
slop.memory.set(
f"position:{symbol}",
f"qty={data['qty']} price={data['price']} "
f"risk={data['beta']}",
namespace=f"portfolio:{portfolio_id}"
)
# Dream with forecast strategy for risk scenarios
session = slop.dream.start(
namespace=f"portfolio:{portfolio_id}",
strategy="forecast", # Monte Carlo scenarios
model="claude-sonnet-4-6"
)
report = slop.dream.wait(session.dream_id)
# Extract risk scenarios from forecast entries
risk_scenarios = [
e for e in report.entries
if e.get("type") == "forecast"
]
return {
"intelligence_score": report.intelligence_score,
"risk_scenarios": risk_scenarios[:5],
"causal_edges":
report.hierarchy_metadata["causal_edges_added"]
}import slopshop
import json
slop = slopshop.Client(api_key="YOUR_KEY")
def backup_namespace(namespace, output_file):
all_memories = slop.memory.list(
namespace=namespace, limit=1000
)
backup = []
for key in all_memories.keys:
val = slop.memory.get(key, namespace=namespace)
backup.append({
"key": key,
"value": val.value,
"namespace": namespace
})
with open(output_file, "w") as f:
json.dump(backup, f, indent=2)
print(f"Backed up {len(backup)} memories to {output_file}")
def restore_namespace(input_file, target_namespace):
with open(input_file) as f:
backup = json.load(f)
for item in backup:
slop.memory.set(
item["key"], item["value"],
namespace=target_namespace
)
print(
f"Restored {len(backup)} memories "
f"to {target_namespace}"
)
# Usage
backup_namespace("production", "backup_2026_04_01.json")
# restore_namespace("backup_2026_04_01.json", "production_v2")Get an API key, install the SDK, and your first recipe will be running in under 5 minutes.