AGENT COOKBOOK

Copy-Paste Agent Recipes

15+ production-ready agent patterns. Each recipe uses real Slopshop APIs. Copy, paste, ship.

15+
Recipes
Python
Language
6
Categories
Real APIs
No mocks
Filter
Showing 15 recipes
Recipes
Research Starter
Research Agent with Persistent Memory
Searches existing knowledge before running tools, then stores results for future queries — each run builds on the last.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def research_and_remember(topic):
    # Search existing knowledge first
    existing = slop.memory.search(topic, namespace="research")
    if existing.results:
        return existing.results[0].value

    # Run research tools
    result = slop.tools.run("llm-search", {"query": topic})

    # Store for future queries
    slop.memory.set(
        f"research:{topic}",
        result.output,
        namespace="research"
    )
    return result.output

# Each run builds on previous knowledge
answer = research_and_remember(
    "quantum computing breakthroughs 2026"
)
Support Intermediate
Customer Support Swarm with Escalation
Classifies intent, resolves autonomously, and escalates to a human agent when confidence is low — with full interaction history.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def support_swarm(ticket_id, user_message):
    # Check user history
    history = slop.memory.get(
        f"user:{ticket_id}", namespace="support"
    )

    # Classify intent
    intent = slop.tools.run("llm-classify", {
        "text": user_message,
        "categories": ["billing", "technical",
                        "general", "escalate"]
    })

    if intent.output == "escalate":
        slop.memory.set(
            f"escalated:{ticket_id}",
            user_message,
            namespace="support"
        )
        return {"action": "escalate", "to": "human_agent"}

    # Store interaction
    slop.memory.set(
        f"user:{ticket_id}",
        f"Last: {user_message}",
        namespace="support"
    )
    return {"action": "resolve", "intent": intent.output}
Finance Advanced
Trading Bot with Dream Cycles
Stores market signals throughout the day, then runs nightly Dream Engine synthesis to generate tomorrow's forecast scenarios.
Python
import slopshop
import schedule
import time

slop = slopshop.Client(api_key="YOUR_KEY")

def store_market_signal(symbol, price, sentiment):
    slop.memory.set(
        f"signal:{symbol}:{int(time.time())}",
        f"price={price} sentiment={sentiment}",
        namespace="trading"
    )

def nightly_analysis():
    # Dream Engine synthesizes overnight
    session = slop.dream.start(
        namespace="trading",
        strategy="forecast",  # Monte Carlo scenarios
        model="claude-opus-4-5"
    )
    report = slop.dream.wait(session.dream_id)
    print(f"Intelligence Score: {report.intelligence_score}")
    return report.entries  # tomorrow's scenarios

# Store signals throughout the day
store_market_signal("BTC", 95000, "bullish")

# Schedule nightly synthesis
schedule.every().day.at("02:00").do(nightly_analysis)
Code Intermediate
Code Review Pipeline
Scores PR complexity, surfaces similar past issues from memory, and recommends approve or request-changes automatically.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def review_pr(diff_content, pr_id):
    # Check complexity
    complexity = slop.tools.run(
        "code-complexity-score",
        {"code": diff_content}
    )

    # Find similar past issues
    similar = slop.memory.search(
        f"complexity:{complexity.output}",
        namespace="reviews"
    )

    # Store patterns for future learning
    slop.memory.set(
        f"pr:{pr_id}",
        f"complexity={complexity.output} "
        f"issues={len(similar.results)}",
        namespace="reviews"
    )

    return {
        "complexity": complexity.output,
        "similar_issues": similar.results[:3],
        "recommendation": (
            "request_changes"
            if int(complexity.output) > 50
            else "approve"
        )
    }
Support Starter
Auto-summarizing Slack Bot
Summarizes any Slack channel's messages with an LLM and stores the result for searchable channel history.
Python
import slopshop
from datetime import date

slop = slopshop.Client(api_key="YOUR_KEY")

def summarize_channel(channel_id, messages):
    # Summarize with LLM
    summary = slop.tools.run("llm-summarize", {
        "text": "\n".join(messages),
        "max_length": 200
    })

    # Store for searchable history
    slop.memory.set(
        f"summary:{channel_id}:{date.today()}",
        summary.output,
        namespace="slack"
    )

    return summary.output

# Example usage
msgs = fetch_slack_messages("#eng-general", limit=200)
daily_summary = summarize_channel("eng-general", msgs)
print(daily_summary)
Research Intermediate
Daily Intelligence Brief Generator
Reads your latest Dream Engine report each morning and formats a structured intelligence brief with themes and compression metrics.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def morning_brief(namespace="default"):
    # Check yesterday's dream report
    sessions = slop.dream.list(namespace=namespace, limit=1)
    if not sessions:
        return "No dream data yet. Run a dream cycle first."

    report = slop.dream.report(sessions[0].dream_id)

    return {
        "intelligence_score": report.intelligence_score,
        "top_insights": report.entries[:5],
        "compression_ratio":
            report.compression_metrics["ratio"],
        "themes":
            report.hierarchy_metadata["themes_generated"]
    }

brief = morning_brief(namespace="work")
print(
    f"IS: {brief['intelligence_score']} | "
    f"{brief['themes']} themes synthesized"
)
Research Intermediate
Memory-Augmented RAG Pipeline
Ingests documents into persistent memory, retrieves via semantic search, and generates grounded answers using retrieved context.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def rag_with_memory(query, docs, namespace="rag"):
    # Store docs in memory
    for i, doc in enumerate(docs):
        slop.memory.set(
            f"doc:{i}", doc["content"],
            namespace=namespace
        )

    # Search with semantic similarity
    results = slop.memory.search(
        query, namespace=namespace, limit=5
    )

    # Build context from top results
    context = "\n---\n".join(
        [r.value for r in results.results]
    )

    # Generate answer with context
    answer = slop.tools.run("llm-think", {
        "prompt":
            f"Context:\n{context}\n\nQuestion: {query}"
    })

    return answer.output
Research Advanced
Self-Improving Agent (Dream + Evolve)
Agent stores task outcomes during the day, then runs an evolve Dream cycle overnight for Bayesian belief updates and self-improvement.
Python
import slopshop

class SelfImprovingAgent:
    def __init__(self, namespace="agent"):
        self.slop = slopshop.Client(api_key="YOUR_KEY")
        self.namespace = namespace
        self.session_count = 0

    def act(self, task):
        result = self.slop.tools.run(
            "llm-think", {"prompt": task}
        )
        # Store outcome for learning
        self.slop.memory.set(
            f"outcome:{self.session_count}",
            f"task={task[:50]} result_quality=0.8",
            namespace=self.namespace
        )
        self.session_count += 1
        return result.output

    def evolve(self):
        # Dream with evolve strategy — Bayesian belief updates
        session = self.slop.dream.start(
            namespace=self.namespace,
            strategy="evolve"
        )
        return self.slop.dream.wait(session.dream_id)

agent = SelfImprovingAgent()
agent.act("Summarize AI trends for Q2 2026")
report = agent.evolve()  # Agent gets smarter overnight
Multi-Agent Advanced
Multi-Agent Debate with Shared Memory
Two agents argue opposite sides of a topic, each reading the other's arguments from a shared namespace before responding.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")
SHARED_NS = "debate-room-001"

def agent_argue(position, agent_id, topic):
    # Read opposing arguments from shared memory
    counter = slop.memory.search(
        f"argument:{topic}", namespace=SHARED_NS
    )

    # Generate argument considering counter-positions
    context = "\n".join(
        [r.value for r in counter.results[-3:]]
    )
    arg = slop.tools.run("llm-think", {
        "prompt":
            f"Argue {position} on '{topic}'. "
            f"Counter-arguments: {context}"
    })

    # Store in shared namespace
    slop.memory.set(
        f"argument:{topic}:{agent_id}",
        arg.output,
        namespace=SHARED_NS
    )
    return arg.output

# Two agents debate
arg1 = agent_argue("for",     "agent_alpha",
                   "AGI will arrive by 2027")
arg2 = agent_argue("against", "agent_beta",
                   "AGI will arrive by 2027")
Research Intermediate
PDF Knowledge Base + Dream Synthesis
Chunks a PDF into memory, then runs insight_generate Dream to surface cross-domain discoveries you'd never find manually.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def ingest_pdf_and_dream(pdf_text, namespace="kb"):
    # Chunk and store
    chunks = [
        pdf_text[i:i+500]
        for i in range(0, len(pdf_text), 500)
    ]
    for i, chunk in enumerate(chunks):
        slop.memory.set(
            f"chunk:{i}", chunk, namespace=namespace
        )

    print(f"Stored {len(chunks)} chunks")

    # Run synthesis Dream
    session = slop.dream.start(
        namespace=namespace,
        strategy="insight_generate",  # Cross-domain discovery
        model="claude-sonnet-4-6"
    )

    report = slop.dream.wait(session.dream_id)
    print(f"Synthesized {report.insights_generated} insights")
    print(
        f"Compression: "
        f"{report.compression_metrics['ratio']}x"
    )
    return report
Research Advanced
LangChain Agent + Slopshop Memory
Drop-in persistent memory upgrade for any LangChain agent — replaces in-process buffers with durable, searchable Slopshop memory.
Python
from langchain.agents import AgentExecutor
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

# Use Slopshop as the memory layer for LangChain agents
class SlopMemory:
    def save_context(self, inputs, outputs):
        slop.memory.set(
            f"ctx:{hash(str(inputs))}",
            str(outputs),
            namespace="langchain"
        )

    def load_memory_variables(self, inputs):
        results = slop.memory.search(
            str(inputs), namespace="langchain"
        )
        return {
            "history": [
                r.value for r in results.results[:3]
            ]
        }

# Drop-in memory upgrade for any LangChain agent
memory = SlopMemory()

# Pass to your existing AgentExecutor
# agent = AgentExecutor(agent=..., memory=memory)
Code Intermediate
Memory Replay Debugger
Replays a full agent session timeline from a timestamp, printing every memory event and Dream report in chronological order.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def replay_agent_session(namespace, from_timestamp):
    # List all memories since timestamp
    memories = slop.memory.list(namespace=namespace)

    # Filter and sort
    timeline = sorted(
        [
            m for m in memories.keys
            if m.created_at >= from_timestamp
        ],
        key=lambda m: m.created_at
    )

    print(f"Replaying {len(timeline)} memory events...")
    for event in timeline:
        val = slop.memory.get(
            event.key, namespace=namespace
        )
        print(
            f"[{event.created_at}] "
            f"{event.key}: {val.value[:80]}..."
        )

    # Fetch any dream reports in this window
    sessions = slop.dream.list(namespace=namespace)
    for s in sessions:
        if s.started_at >= from_timestamp:
            report = slop.dream.report(s.dream_id)
            print(
                f"[DREAM] IS={report.intelligence_score} "
                f"strategy={report.strategy}"
            )
Multi-Agent Advanced
Collective Dream Namespace (Hive)
Multiple agents contribute insights to a shared Hive namespace throughout the day, then a collective Dream synthesizes all team intelligence overnight.
Python
import slopshop
import time

slop = slopshop.Client(api_key="YOUR_KEY")
HIVE_NS = "team-hive-v1"

def contribute_to_hive(agent_id, insight):
    slop.memory.set(
        f"insight:{agent_id}:{int(time.time())}",
        insight,
        namespace=HIVE_NS
    )

def run_collective_dream():
    # All team agents' memories synthesized together
    session = slop.dream.start(
        namespace=HIVE_NS,
        strategy="full_cycle",
        model="claude-opus-4-6"
    )
    report = slop.dream.wait(session.dream_id)
    print(f"Collective IS: {report.intelligence_score}")
    print(
        f"Themes: "
        f"{report.hierarchy_metadata['themes_generated']}"
    )
    return report

# Agents contribute throughout the day
contribute_to_hive(
    "agent_sales",
    "Q2 pipeline looking strong, 3 enterprise deals"
)
contribute_to_hive(
    "agent_eng",
    "Latency improved 40% after caching layer"
)
run_collective_dream()  # Collective intelligence synthesized
Finance Vertical Advanced
Portfolio Risk Memory Agent
Tracks all portfolio positions in persistent memory, then runs a forecast Dream for Monte Carlo risk scenarios and causal edge analysis.
Python
import slopshop

slop = slopshop.Client(api_key="YOUR_KEY")

def track_portfolio_risk(portfolio_id, positions):
    for symbol, data in positions.items():
        slop.memory.set(
            f"position:{symbol}",
            f"qty={data['qty']} price={data['price']} "
            f"risk={data['beta']}",
            namespace=f"portfolio:{portfolio_id}"
        )

    # Dream with forecast strategy for risk scenarios
    session = slop.dream.start(
        namespace=f"portfolio:{portfolio_id}",
        strategy="forecast",  # Monte Carlo scenarios
        model="claude-sonnet-4-6"
    )
    report = slop.dream.wait(session.dream_id)

    # Extract risk scenarios from forecast entries
    risk_scenarios = [
        e for e in report.entries
        if e.get("type") == "forecast"
    ]
    return {
        "intelligence_score": report.intelligence_score,
        "risk_scenarios": risk_scenarios[:5],
        "causal_edges":
            report.hierarchy_metadata["causal_edges_added"]
    }
Code Starter
Memory Backup + Migration CLI Script
Exports an entire namespace to a JSON file and restores it to any target namespace — perfect for migrations, snapshots, and disaster recovery.
Python
import slopshop
import json

slop = slopshop.Client(api_key="YOUR_KEY")

def backup_namespace(namespace, output_file):
    all_memories = slop.memory.list(
        namespace=namespace, limit=1000
    )

    backup = []
    for key in all_memories.keys:
        val = slop.memory.get(key, namespace=namespace)
        backup.append({
            "key": key,
            "value": val.value,
            "namespace": namespace
        })

    with open(output_file, "w") as f:
        json.dump(backup, f, indent=2)

    print(f"Backed up {len(backup)} memories to {output_file}")

def restore_namespace(input_file, target_namespace):
    with open(input_file) as f:
        backup = json.load(f)

    for item in backup:
        slop.memory.set(
            item["key"], item["value"],
            namespace=target_namespace
        )

    print(
        f"Restored {len(backup)} memories "
        f"to {target_namespace}"
    )

# Usage
backup_namespace("production", "backup_2026_04_01.json")
# restore_namespace("backup_2026_04_01.json", "production_v2")

Ready to ship your agent?

Get an API key, install the SDK, and your first recipe will be running in under 5 minutes.