<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
	<channel>
		<title>OpenAI Guardrails Registry — Blog</title>
		<link>https://openaiguardrails.org/blog/</link>
		<atom:link href="https://openaiguardrails.org/rss.xml" rel="self" type="application/rss+xml" />
		<description>Research and engineering notes on AI guardrails, policy-as-code, and safe LLM deployment.</description>
		<language>en-us</language>
		<lastBuildDate>Sun, 10 May 2026 18:55:54 GMT</lastBuildDate>
    <item>
			<title>The Forge: Hardening AI Safety Inside the Noir Sandbox</title>
			<link>https://openaiguardrails.org/blog/the-forge-hardening-ai-safety-in-the-noir-sandbox.html</link>
			<guid isPermaLink="true">https://openaiguardrails.org/blog/the-forge-hardening-ai-safety-in-the-noir-sandbox.html</guid>
			<pubDate>Sun, 10 May 2026 00:00:00 GMT</pubDate>
			<description>The Forge is Noir’s adversarial AI safety sandbox for building, testing, and deploying production-grade guardrails. Replay attacks, validate rail logic, simulate prompt injection scenarios, and promote hardened policies directly into CI/CD and distributed enforcement pipelines.</description>
			<author>noreply@openaiguardrails.org (auth0|69ebedaa1d4731384e69df25)</author>
      <category>DevSecOps</category>
      <category>Noir Guardrails</category>
      <category>Policy Enforcement</category>
		</item>
    <item>
			<title>The Probe Terminal: Exposing the Attack Surface of Agentic AI</title>
			<link>https://openaiguardrails.org/blog/probe-terminal-agentic-ai-attack-surface.html</link>
			<guid isPermaLink="true">https://openaiguardrails.org/blog/probe-terminal-agentic-ai-attack-surface.html</guid>
			<pubDate>Sun, 10 May 2026 00:00:00 GMT</pubDate>
			<description>Agentic AI introduces a new security frontier where prompts, orchestration logic, and runtime behavior become the attack surface. The Probe Terminal enables automated adversarial testing, real-time risk grading, and continuous AI security enforcement without sacrificing performance.</description>
			<author>noreply@openaiguardrails.org (auth0|69ebedaa1d4731384e69df25)</author>
      <category>Agentic AI</category>
      <category>AI Security</category>
      <category>Prompt Injection</category>
      <category>LLM Security</category>
		</item>
	</channel>
</rss>
