<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for redprobe</title>
    <link>https://pypi.org/project/redprobe/</link>
    <description>Recent updates to the Python Package Index for redprobe</description>
    <language>en</language>    <item>
      <title>0.1.6</title>
      <link>https://pypi.org/project/redprobe/0.1.6/</link>
      <description>A defensive security tool for hardening AI systems. Define YAML-based test cases to systematically probe LLMs for jailbreaks, prompt injections, biases, harmful content generation, data leakage, and policy violations before attackers find them. Compatible with any OpenAI-style API endpoint.</description>
<author>audrey@feldroy.com</author>      <pubDate>Tue, 03 Feb 2026 09:11:34 GMT</pubDate>
    </item>    <item>
      <title>0.1.5</title>
      <link>https://pypi.org/project/redprobe/0.1.5/</link>
      <description>A defensive security tool for hardening AI systems. Define YAML-based test cases to systematically probe LLMs for jailbreaks, prompt injections, biases, harmful content generation, data leakage, and policy violations before attackers find them. Compatible with any OpenAI-style API endpoint.</description>
<author>audrey@feldroy.com</author>      <pubDate>Tue, 03 Feb 2026 09:06:41 GMT</pubDate>
    </item>    <item>
      <title>0.1.4</title>
      <link>https://pypi.org/project/redprobe/0.1.4/</link>
      <description>A defensive security tool for hardening AI systems. Define YAML-based test cases to systematically probe LLMs for jailbreaks, prompt injections, biases, harmful content generation, data leakage, and policy violations before attackers find them. Compatible with any OpenAI-style API endpoint.</description>
<author>audrey@feldroy.com</author>      <pubDate>Tue, 03 Feb 2026 09:01:00 GMT</pubDate>
    </item>    <item>
      <title>0.1.3</title>
      <link>https://pypi.org/project/redprobe/0.1.3/</link>
      <description>A defensive security tool for hardening AI systems. Define YAML-based test cases to systematically probe LLMs for jailbreaks, prompt injections, biases, harmful content generation, data leakage, and policy violations before attackers find them. Compatible with any OpenAI-style API endpoint.</description>
<author>audrey@feldroy.com</author>      <pubDate>Tue, 03 Feb 2026 08:42:58 GMT</pubDate>
    </item>    <item>
      <title>0.1.2</title>
      <link>https://pypi.org/project/redprobe/0.1.2/</link>
      <description>A defensive security tool for hardening AI systems. Define YAML-based test cases to systematically probe LLMs for jailbreaks, prompt injections, biases, harmful content generation, data leakage, and policy violations before attackers find them. Compatible with any OpenAI-style API endpoint.</description>
<author>audrey@feldroy.com</author>      <pubDate>Tue, 03 Feb 2026 08:38:29 GMT</pubDate>
    </item>    <item>
      <title>0.1.1</title>
      <link>https://pypi.org/project/redprobe/0.1.1/</link>
      <description>A defensive security tool for hardening AI systems. Define YAML-based test cases to systematically probe LLMs for jailbreaks, prompt injections, biases, harmful content generation, data leakage, and policy violations before attackers find them. Compatible with any OpenAI-style API endpoint.</description>
<author>audrey@feldroy.com</author>      <pubDate>Tue, 03 Feb 2026 08:27:10 GMT</pubDate>
    </item>    <item>
      <title>0.1.0</title>
      <link>https://pypi.org/project/redprobe/0.1.0/</link>
      <description>A defensive security tool for hardening AI systems. Define YAML-based test cases to systematically probe LLMs for jailbreaks, prompt injections, biases, harmful content generation, data leakage, and policy violations before attackers find them. Compatible with any OpenAI-style API endpoint.</description>
<author>audrey@feldroy.com</author>      <pubDate>Tue, 03 Feb 2026 08:22:22 GMT</pubDate>
    </item>  </channel>
</rss>