<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for tool-scorer</title>
    <link>https://pypi.org/project/tool-scorer/</link>
    <description>Recent updates to the Python Package Index for tool-scorer</description>
    <language>en</language>    <item>
      <title>1.6.0</title>
      <link>https://pypi.org/project/tool-scorer/1.6.0/</link>
      <description>Lightweight tool-call testing for LLM agents. Deterministic, local, zero API cost. Compare expected vs actual tool calls in 3 lines of Python. Supports OpenAI, Anthropic, Gemini.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Fri, 20 Mar 2026 15:39:36 GMT</pubDate>
    </item>    <item>
      <title>1.5.0</title>
      <link>https://pypi.org/project/tool-scorer/1.5.0/</link>
      <description>Lightweight tool-call testing for LLM agents. Deterministic, local, zero API cost. Compare expected vs actual tool calls in 3 lines of Python. Supports OpenAI, Anthropic, Gemini.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Fri, 06 Feb 2026 19:01:23 GMT</pubDate>
    </item>    <item>
      <title>1.4.2</title>
      <link>https://pypi.org/project/tool-scorer/1.4.2/</link>
      <description>Pytest for LLM agents. Self-explaining metrics show exactly WHY your agent failed. Regression testing catches degradation before deployment. GitHub Action for one-click CI/CD. Supports OpenAI, Anthropic, Gemini.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Fri, 09 Jan 2026 13:20:46 GMT</pubDate>
    </item>    <item>
      <title>1.4.1</title>
      <link>https://pypi.org/project/tool-scorer/1.4.1/</link>
      <description>Pytest for LLM agents. Self-explaining metrics show exactly WHY your agent failed. Regression testing catches degradation before deployment. GitHub Action for one-click CI/CD. Supports OpenAI, Anthropic, Gemini.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Fri, 09 Jan 2026 13:17:24 GMT</pubDate>
    </item>    <item>
      <title>1.4.0</title>
      <link>https://pypi.org/project/tool-scorer/1.4.0/</link>
      <description>Pytest for LLM agents. Self-explaining metrics show exactly WHY your agent failed. Regression testing catches degradation before deployment. GitHub Action for one-click CI/CD. Supports OpenAI, Anthropic, Gemini.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Fri, 09 Jan 2026 13:14:56 GMT</pubDate>
    </item>    <item>
      <title>1.3.3</title>
      <link>https://pypi.org/project/tool-scorer/1.3.3/</link>
      <description>Catch LLM agent regressions before deployment. Test tool-calling accuracy for OpenAI, Anthropic, Gemini with pytest integration and CI/CD workflows.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Tue, 28 Oct 2025 21:46:54 GMT</pubDate>
    </item>    <item>
      <title>1.3.2</title>
      <link>https://pypi.org/project/tool-scorer/1.3.2/</link>
      <description>Catch LLM agent regressions before deployment. Test tool-calling accuracy for OpenAI, Anthropic, Gemini with pytest integration and CI/CD workflows.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Tue, 28 Oct 2025 21:45:05 GMT</pubDate>
    </item>    <item>
      <title>1.3.1</title>
      <link>https://pypi.org/project/tool-scorer/1.3.1/</link>
      <description>Catch LLM agent regressions before deployment. Test tool-calling accuracy for OpenAI, Anthropic, Gemini with pytest integration and CI/CD workflows.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Tue, 28 Oct 2025 21:41:10 GMT</pubDate>
    </item>    <item>
      <title>1.3.0</title>
      <link>https://pypi.org/project/tool-scorer/1.3.0/</link>
      <description>Catch LLM agent regressions before deployment. Test tool-calling accuracy for OpenAI, Anthropic, Gemini with pytest integration and CI/CD workflows.</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Tue, 28 Oct 2025 21:37:36 GMT</pubDate>
    </item>    <item>
      <title>1.2.0</title>
      <link>https://pypi.org/project/tool-scorer/1.2.0/</link>
      <description>Evaluate LLM tool usage and function calling accuracy with comprehensive metrics, support for OpenAI/Anthropic/LangChain, pytest integration, and beautiful reports</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Sat, 18 Oct 2025 10:59:17 GMT</pubDate>
    </item>    <item>
      <title>1.1.1</title>
      <link>https://pypi.org/project/tool-scorer/1.1.1/</link>
      <description>Evaluate LLM tool usage and function calling accuracy with comprehensive metrics, support for OpenAI/Anthropic/LangChain, pytest integration, and beautiful reports</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Sat, 18 Oct 2025 10:37:42 GMT</pubDate>
    </item>    <item>
      <title>1.1.0</title>
      <link>https://pypi.org/project/tool-scorer/1.1.0/</link>
      <description>Evaluate LLM tool usage and function calling accuracy with comprehensive metrics, support for OpenAI/Anthropic/LangChain, pytest integration, and beautiful reports</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Sat, 18 Oct 2025 10:34:58 GMT</pubDate>
    </item>    <item>
      <title>1.0.4</title>
      <link>https://pypi.org/project/tool-scorer/1.0.4/</link>
      <description>Evaluate LLM tool usage and function calling accuracy with comprehensive metrics, support for OpenAI/Anthropic/LangChain, pytest integration, and beautiful reports</description>
<author>yotambarun93@gmail.com</author>      <pubDate>Mon, 13 Oct 2025 11:32:28 GMT</pubDate>
    </item>    <item>
      <title>0.1.0</title>
      <link>https://pypi.org/project/tool-scorer/0.1.0/</link>
      <description>A Python package for evaluating LLM tool usage against gold standard specifications</description>
<author>yotam@example.com</author>      <pubDate>Fri, 10 Oct 2025 23:09:27 GMT</pubDate>
    </item>  </channel>
</rss>