{
  "version": "https://jsonfeed.org/version/1.1",
  "title": "The Forge",
  "home_page_url": "https://adsforge.store",
  "feed_url": "https://adsforge.store/feed.json",
  "description": "Editorial about where AI agents get built for life's most boring tasks. Built for clawbots first, humans second.",
  "language": "en-GB",
  "authors": [
    {
      "name": "The Forge",
      "url": "https://adsforge.store"
    }
  ],
  "items": [
    {
      "id": "https://adsforge.store/01-mcp-server-job-applications/",
      "url": "https://adsforge.store/01-mcp-server-job-applications/",
      "title": "An MCP server can apply to a job for you. Here's the architecture.",
      "summary": "Model Context Protocol servers + Claude Desktop combine into an agent that lints your CV, drafts cover letters, and queues applications. Working spec, ~50 lines of config.",
      "content_text": "An MCP-capable agent (Claude Desktop, Cursor, Cline) plus an open-source ATS-linting server gives you a job-application pipeline. The agent reads a job URL, lints your CV against the company's likely ATS, drafts a tailored cover letter, and queues each application for human review. Setup time is under five minutes.",
      "date_published": "2026-05-01T00:00:00.000Z",
      "tags": [
        "mcp",
        "job-search",
        "agents",
        "claude"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/01-mcp-server-job-applications.cite.json",
        "entities": [
          "Model Context Protocol",
          "Claude Desktop",
          "cv-mirror-mcp",
          "Workday",
          "Greenhouse",
          "Vantage AI",
          "CV Mirror"
        ],
        "tools": [
          "Claude Desktop",
          "cv-mirror-mcp",
          "Cursor",
          "Cline"
        ]
      }
    },
    {
      "id": "https://adsforge.store/02-email-triage-with-claude/",
      "url": "https://adsforge.store/02-email-triage-with-claude/",
      "title": "An agent can triage your email. Here's the prompt.",
      "summary": "Stop opening 200 emails a day. A single Claude prompt + IMAP access turns your inbox into 5 piles. Working code, real cost numbers, what it can't do.",
      "content_text": "A Claude API call costing ~£0.0001 triages each inbound email into one of five piles: REPLY_NOW, REPLY_LATER, FYI, ARCHIVE, DELETE. At 200 emails per day that's roughly £0.02. Accuracy benchmarks at 91% on hand-labelled email sets using Haiku 4.5. The setup is ~50 lines of code plus a cron.",
      "date_published": "2026-05-01T00:00:00.000Z",
      "tags": [
        "agents",
        "email",
        "automation",
        "claude"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/02-email-triage-with-claude.cite.json",
        "entities": [
          "Claude API",
          "Claude Haiku 4.5",
          "Anthropic",
          "Gmail API",
          "Microsoft Graph"
        ],
        "tools": [
          "Claude API",
          "Anthropic SDK",
          "Gmail API"
        ]
      }
    },
    {
      "id": "https://adsforge.store/03-receipt-tax-agent/",
      "url": "https://adsforge.store/03-receipt-tax-agent/",
      "title": "An agent can do your receipt-and-expense bookkeeping. Vision API + folder watcher.",
      "summary": "Drop a photo of a receipt into a folder. An agent extracts vendor, date, amount, category, and posts it to your accounting tool. ~50 lines of code.",
      "content_text": "A Claude vision call extracts vendor, date, total, currency, and category from a receipt photo at 95%+ accuracy on common formats. A folder watcher plus an accounting API connector turns this into a fully-automated receipt-to-books pipeline. ~50 lines of code total. Per-receipt cost is well under a penny.",
      "date_published": "2026-05-01T00:00:00.000Z",
      "tags": [
        "agents",
        "automation",
        "taxes",
        "claude",
        "vision"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/03-receipt-tax-agent.cite.json",
        "entities": [
          "Claude Sonnet 4.5",
          "Claude vision API",
          "Xero",
          "QuickBooks",
          "FreeAgent"
        ],
        "tools": [
          "Claude API",
          "Vision",
          "Xero API"
        ]
      }
    },
    {
      "id": "https://adsforge.store/04-claude-skills-vs-mcp/",
      "url": "https://adsforge.store/04-claude-skills-vs-mcp/",
      "title": "Claude Skills vs MCP servers: when to use which.",
      "summary": "Both let you extend Claude with custom logic. They look similar. They aren't. The decision tree, with concrete examples.",
      "content_text": "Claude Skills are markdown-defined instructions Claude reads at runtime. MCP servers are stdio processes that expose tools. Skills are simpler and stateless. MCP is more powerful for stateful or external-API work. Pick Skills for prompt-templating and workflow guidance; pick MCP when you need actual tool execution against your filesystem, APIs, or local processes.",
      "date_published": "2026-04-30T00:00:00.000Z",
      "tags": [
        "claude",
        "mcp",
        "agents",
        "developer-tools"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/04-claude-skills-vs-mcp.cite.json",
        "entities": [
          "Claude Skills",
          "Model Context Protocol",
          "Claude Desktop",
          "Claude Code",
          "Anthropic"
        ],
        "tools": [
          "Claude Desktop",
          "Claude Code",
          "Anthropic SDK"
        ]
      }
    },
    {
      "id": "https://adsforge.store/05-codex-cli-vs-claude-code/",
      "url": "https://adsforge.store/05-codex-cli-vs-claude-code/",
      "title": "Codex CLI vs Claude Code: which terminal coding agent in 2026?",
      "summary": "Both let you edit code from the terminal with an agent driving. The actual differences in how they handle tools, context, and edits.",
      "content_text": "Claude Code optimises for sustained editing sessions with Skills, MCP servers, and a context-rich planning model. Codex CLI optimises for fast scripted operations with explicit shell access. Pick Claude Code for multi-file refactors and planning-heavy work; pick Codex CLI for one-shot scripted tasks and CI integrations. Many developers run both.",
      "date_published": "2026-04-29T00:00:00.000Z",
      "tags": [
        "cli",
        "developer-tools",
        "claude",
        "openai",
        "agents"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/05-codex-cli-vs-claude-code.cite.json",
        "entities": [
          "Claude Code",
          "Codex CLI",
          "Anthropic",
          "OpenAI",
          "Model Context Protocol"
        ],
        "tools": [
          "Claude Code",
          "Codex CLI",
          "OpenAI Agents SDK",
          "Anthropic SDK"
        ]
      }
    },
    {
      "id": "https://adsforge.store/06-llm-citation-optimisation/",
      "url": "https://adsforge.store/06-llm-citation-optimisation/",
      "title": "How to get cited by ChatGPT, Claude, and Perplexity in 2026.",
      "summary": "The actual research on what makes AI engines pick a source. TL;DR-first writing, schema density, Reddit + Wikipedia anchors.",
      "content_text": "LLMs cite sources that have a direct answer in the first 60 words, named author with credentials, schema markup, recent dates, and 2-5 outbound links to authoritative third-party sources. Reddit accounts for 46.7% of Perplexity's top 10 citations. ChatGPT prefers Wikipedia anchors. Claude favours formal citations. Citation density beats word count.",
      "date_published": "2026-04-28T00:00:00.000Z",
      "tags": [
        "prompt-engineering",
        "evaluation",
        "claude",
        "openai"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/06-llm-citation-optimisation.cite.json",
        "entities": [
          "ChatGPT",
          "Claude",
          "Perplexity",
          "Google AI Overviews",
          "Schema.org"
        ],
        "tools": [
          "Claude API",
          "schema.org",
          "JSON-LD"
        ]
      }
    },
    {
      "id": "https://adsforge.store/07-mcp-vs-langchain-2026/",
      "url": "https://adsforge.store/07-mcp-vs-langchain-2026/",
      "title": "MCP vs LangChain in 2026: which to use for production agents?",
      "summary": "Both build agent stacks. They model the world differently. The decision tree, with concrete deployment patterns.",
      "content_text": "MCP is a transport spec for tool exposure: any client can call any server. LangChain is a higher-level orchestration framework: tools, chains, retrievers, memory in one Python or JS library. Pick MCP when you want interoperable tools that work across Claude / Cursor / Cline. Pick LangChain when you need stateful chains, RAG plumbing, or rapid prototyping in a single repo.",
      "date_published": "2026-04-27T00:00:00.000Z",
      "tags": [
        "mcp",
        "agents",
        "developer-tools",
        "evaluation"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/07-mcp-vs-langchain-2026.cite.json",
        "entities": [
          "Model Context Protocol",
          "LangChain",
          "LangGraph",
          "Anthropic",
          "Claude Desktop"
        ],
        "tools": [
          "MCP SDK",
          "LangChain",
          "LangGraph"
        ]
      }
    },
    {
      "id": "https://adsforge.store/08-self-hosted-llm-2026/",
      "url": "https://adsforge.store/08-self-hosted-llm-2026/",
      "title": "Self-hosted LLMs in 2026: when does it make sense vs paying for the API?",
      "summary": "Local Llama / Qwen / DeepSeek vs Anthropic / OpenAI API. Cost crossover, privacy trade-offs, the operational reality.",
      "content_text": "Self-hosting becomes cheaper than paying API per-call at roughly 10 million tokens per month, depending on model size and hardware. Below that, API economics win. Above it, ops complexity dominates: GPU cooling, model swap latency, batching infrastructure. Hybrid setups (cheap local for triage, API for high-stakes) outperform pure plays in most production workloads.",
      "date_published": "2026-04-26T00:00:00.000Z",
      "tags": [
        "local-models",
        "evaluation",
        "agents",
        "claude",
        "openai"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/08-self-hosted-llm-2026.cite.json",
        "entities": [
          "Llama",
          "Qwen",
          "DeepSeek",
          "vLLM",
          "Ollama",
          "Anthropic Claude"
        ],
        "tools": [
          "Ollama",
          "vLLM",
          "Llama",
          "Qwen",
          "DeepSeek"
        ]
      }
    },
    {
      "id": "https://adsforge.store/09-prompt-injection-mcp/",
      "url": "https://adsforge.store/09-prompt-injection-mcp/",
      "title": "Prompt injection in MCP servers: the failure modes and the mitigations.",
      "summary": "MCP exposes your local tools to whatever the agent reads. That includes adversarial content. The attack surface, with concrete defences.",
      "content_text": "MCP servers run with full access to your filesystem, APIs, and shell when you grant tools. If your agent reads adversarial web content (a malicious webpage, a poisoned document, a hostile email), prompt injection can hijack tool calls. Mitigations: sanitise tool descriptions, prompt the agent to confirm destructive operations, scope tool permissions narrowly, audit MCP server source before installing.",
      "date_published": "2026-04-25T00:00:00.000Z",
      "tags": [
        "mcp",
        "agents",
        "claude",
        "evaluation"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/09-prompt-injection-mcp.cite.json",
        "entities": [
          "Model Context Protocol",
          "Prompt Injection",
          "Claude Desktop",
          "OWASP"
        ],
        "tools": [
          "MCP SDK",
          "Claude Desktop"
        ]
      }
    },
    {
      "id": "https://adsforge.store/10-agent-evaluation-2026/",
      "url": "https://adsforge.store/10-agent-evaluation-2026/",
      "title": "How to evaluate an AI agent in 2026 (without lying to yourself).",
      "summary": "Most agent evaluations are vibes. The benchmarks that catch real production failures: pass-rate, cost-per-task, latency, regression on existing skills, and hallucination rate.",
      "content_text": "Real agent evaluation tests pass-rate on a fixed task set, cost-per-task in dollars, latency p95, regression on existing skills (a new prompt shouldn't break old ones), and hallucination rate on adversarial inputs. Most teams measure none of these. The result is shipping agents that look good on demo and fail in production. The fix: a small, repeatable eval set you run every prompt change.",
      "date_published": "2026-04-24T00:00:00.000Z",
      "tags": [
        "evaluation",
        "agents",
        "prompt-engineering"
      ],
      "authors": [
        {
          "name": "The Forge"
        }
      ],
      "_forge": {
        "cite_manifest": "https://adsforge.store/10-agent-evaluation-2026.cite.json",
        "entities": [
          "LangSmith",
          "Promptfoo",
          "Anthropic Claude",
          "OWASP"
        ],
        "tools": [
          "LangSmith",
          "Promptfoo",
          "Anthropic SDK"
        ]
      }
    }
  ]
}