<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for evalview</title>
    <link>https://pypi.org/project/evalview/</link>
    <description>Recent updates to the Python Package Index for evalview</description>
    <language>en</language>    <item>
      <title>0.6.1</title>
      <link>https://pypi.org/project/evalview/0.6.1/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Sat, 28 Mar 2026 22:24:54 GMT</pubDate>
    </item>    <item>
      <title>0.6.0</title>
      <link>https://pypi.org/project/evalview/0.6.0/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Fri, 27 Mar 2026 08:47:37 GMT</pubDate>
    </item>    <item>
      <title>0.5.5</title>
      <link>https://pypi.org/project/evalview/0.5.5/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Wed, 25 Mar 2026 09:27:28 GMT</pubDate>
    </item>    <item>
      <title>0.5.4</title>
      <link>https://pypi.org/project/evalview/0.5.4/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Mon, 23 Mar 2026 15:34:24 GMT</pubDate>
    </item>    <item>
      <title>0.5.3</title>
      <link>https://pypi.org/project/evalview/0.5.3/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Wed, 18 Mar 2026 22:15:47 GMT</pubDate>
    </item>    <item>
      <title>0.5.2</title>
      <link>https://pypi.org/project/evalview/0.5.2/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Mon, 16 Mar 2026 16:56:01 GMT</pubDate>
    </item>    <item>
      <title>0.5.1</title>
      <link>https://pypi.org/project/evalview/0.5.1/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Fri, 13 Mar 2026 20:14:53 GMT</pubDate>
    </item>    <item>
      <title>0.5.0</title>
      <link>https://pypi.org/project/evalview/0.5.0/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Thu, 12 Mar 2026 12:08:50 GMT</pubDate>
    </item>    <item>
      <title>0.4.1</title>
      <link>https://pypi.org/project/evalview/0.4.1/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Mon, 09 Mar 2026 09:50:21 GMT</pubDate>
    </item>    <item>
      <title>0.4.0</title>
      <link>https://pypi.org/project/evalview/0.4.0/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Thu, 05 Mar 2026 10:57:01 GMT</pubDate>
    </item>    <item>
      <title>0.3.2</title>
      <link>https://pypi.org/project/evalview/0.3.2/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Fri, 27 Feb 2026 11:09:26 GMT</pubDate>
    </item>    <item>
      <title>0.3.1</title>
      <link>https://pypi.org/project/evalview/0.3.1/</link>
      <description>Open-source testing and regression detection framework for AI agents. Golden baseline diffing, CI/CD integration, works with LangGraph, CrewAI, OpenAI, Anthropic Claude, HuggingFace, Ollama, and MCP.</description>
<author>hidai@evalview.com</author>      <pubDate>Wed, 25 Feb 2026 11:02:02 GMT</pubDate>
    </item>    <item>
      <title>0.3.0</title>
      <link>https://pypi.org/project/evalview/0.3.0/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic, Claude</description>
<author>hidai@evalview.com</author>      <pubDate>Fri, 20 Feb 2026 10:38:56 GMT</pubDate>
    </item>    <item>
      <title>0.2.9</title>
      <link>https://pypi.org/project/evalview/0.2.9/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic, Claude</description>
<author>hidai@evalview.com</author>      <pubDate>Thu, 19 Feb 2026 06:39:21 GMT</pubDate>
    </item>    <item>
      <title>0.2.8</title>
      <link>https://pypi.org/project/evalview/0.2.8/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic, Claude</description>
<author>hidai@evalview.com</author>      <pubDate>Thu, 19 Feb 2026 06:31:07 GMT</pubDate>
    </item>    <item>
      <title>0.2.7</title>
      <link>https://pypi.org/project/evalview/0.2.7/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic, Claude</description>
<author>hidai@evalview.com</author>      <pubDate>Thu, 19 Feb 2026 06:23:35 GMT</pubDate>
    </item>    <item>
      <title>0.2.6</title>
      <link>https://pypi.org/project/evalview/0.2.6/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic, Claude</description>
<author>hidai@evalview.com</author>      <pubDate>Thu, 19 Feb 2026 06:16:35 GMT</pubDate>
    </item>    <item>
      <title>0.2.5</title>
      <link>https://pypi.org/project/evalview/0.2.5/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic, Claude</description>
<author>hidai@evalview.com</author>      <pubDate>Sun, 15 Feb 2026 10:46:18 GMT</pubDate>
    </item>    <item>
      <title>0.2.4</title>
      <link>https://pypi.org/project/evalview/0.2.4/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sun, 01 Feb 2026 23:18:16 GMT</pubDate>
    </item>    <item>
      <title>0.2.3</title>
      <link>https://pypi.org/project/evalview/0.2.3/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sat, 24 Jan 2026 22:46:40 GMT</pubDate>
    </item>    <item>
      <title>0.2.2</title>
      <link>https://pypi.org/project/evalview/0.2.2/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Mon, 19 Jan 2026 07:04:28 GMT</pubDate>
    </item>    <item>
      <title>0.2.1</title>
      <link>https://pypi.org/project/evalview/0.2.1/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sun, 11 Jan 2026 22:13:28 GMT</pubDate>
    </item>    <item>
      <title>0.2.0</title>
      <link>https://pypi.org/project/evalview/0.2.0/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sat, 10 Jan 2026 13:31:25 GMT</pubDate>
    </item>    <item>
      <title>0.1.9</title>
      <link>https://pypi.org/project/evalview/0.1.9/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sat, 03 Jan 2026 14:56:14 GMT</pubDate>
    </item>    <item>
      <title>0.1.8</title>
      <link>https://pypi.org/project/evalview/0.1.8/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Wed, 31 Dec 2025 21:51:39 GMT</pubDate>
    </item>    <item>
      <title>0.1.7</title>
      <link>https://pypi.org/project/evalview/0.1.7/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Mon, 29 Dec 2025 14:22:07 GMT</pubDate>
    </item>    <item>
      <title>0.1.6</title>
      <link>https://pypi.org/project/evalview/0.1.6/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sun, 21 Dec 2025 19:59:48 GMT</pubDate>
    </item>    <item>
      <title>0.1.5</title>
      <link>https://pypi.org/project/evalview/0.1.5/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Thu, 18 Dec 2025 22:41:10 GMT</pubDate>
    </item>    <item>
      <title>0.1.4</title>
      <link>https://pypi.org/project/evalview/0.1.4/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Tue, 09 Dec 2025 23:08:00 GMT</pubDate>
    </item>    <item>
      <title>0.1.3</title>
      <link>https://pypi.org/project/evalview/0.1.3/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sun, 07 Dec 2025 23:03:10 GMT</pubDate>
    </item>    <item>
      <title>0.1.2</title>
      <link>https://pypi.org/project/evalview/0.1.2/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Sun, 07 Dec 2025 22:47:46 GMT</pubDate>
    </item>    <item>
      <title>0.1.1</title>
      <link>https://pypi.org/project/evalview/0.1.1/</link>
      <description>Pytest-style testing framework for AI agents — LangGraph, CrewAI, OpenAI, Anthropic</description>
<author>hidai@evalview.com</author>      <pubDate>Fri, 05 Dec 2025 21:48:09 GMT</pubDate>
    </item>    <item>
      <title>0.1.0</title>
      <link>https://pypi.org/project/evalview/0.1.0/</link>
      <description>Testing framework for multi-step AI agents</description>
<author>hidai@evalview.com</author>      <pubDate>Wed, 03 Dec 2025 20:29:32 GMT</pubDate>
    </item>  </channel>
</rss>