<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for ttyg-evaluation</title>
    <link>https://pypi.org/project/ttyg-evaluation/</link>
    <description>Recent updates to the Python Package Index for ttyg-evaluation</description>
    <language>en</language>    <item>
      <title>3.0.0</title>
      <link>https://pypi.org/project/ttyg-evaluation/3.0.0/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a Q&amp;A dataset with tool call expectations.</description>
      <pubDate>Wed, 16 Jul 2025 12:40:42 GMT</pubDate>
    </item>    <item>
      <title>2.2.0</title>
      <link>https://pypi.org/project/ttyg-evaluation/2.2.0/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a Q&amp;A dataset with tool call expectations.</description>
      <pubDate>Tue, 01 Jul 2025 06:37:47 GMT</pubDate>
    </item>    <item>
      <title>2.1.2</title>
      <link>https://pypi.org/project/ttyg-evaluation/2.1.2/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a gold-standard corpus of tool call expectations.</description>
      <pubDate>Thu, 19 Jun 2025 09:59:27 GMT</pubDate>
    </item>    <item>
      <title>2.1.1</title>
      <link>https://pypi.org/project/ttyg-evaluation/2.1.1/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a gold-standard corpus of tool call expectations.</description>
      <pubDate>Wed, 18 Jun 2025 16:02:13 GMT</pubDate>
    </item>    <item>
      <title>2.1.0</title>
      <link>https://pypi.org/project/ttyg-evaluation/2.1.0/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a gold-standard corpus of tool call expectations.</description>
      <pubDate>Tue, 17 Jun 2025 12:42:33 GMT</pubDate>
    </item>    <item>
      <title>2.0.0</title>
      <link>https://pypi.org/project/ttyg-evaluation/2.0.0/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a gold-standard corpus of tool call expectations.</description>
      <pubDate>Mon, 16 Jun 2025 15:33:35 GMT</pubDate>
    </item>    <item>
      <title>1.0.1</title>
      <link>https://pypi.org/project/ttyg-evaluation/1.0.1/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a gold-standard corpus of tool call expectations.</description>
      <pubDate>Fri, 23 May 2025 13:31:20 GMT</pubDate>
    </item>    <item>
      <title>1.0.0</title>
      <link>https://pypi.org/project/ttyg-evaluation/1.0.0/</link>
      <description>Talk to Your Graph (TTYG) Evaluation is a Python module for evaluating whether LLM agents correctly orchestrate and invoke available tools to answer user questions, based on a gold-standard corpus of tool call expectations.</description>
      <pubDate>Thu, 22 May 2025 14:40:00 GMT</pubDate>
    </item>  </channel>
</rss>