<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for llm-autotune</title>
    <link>https://pypi.org/project/llm-autotune/</link>
    <description>Recent updates to the Python Package Index for llm-autotune</description>
    <language>en</language>    <item>
      <title>0.1.1</title>
      <link>https://pypi.org/project/llm-autotune/0.1.1/</link>
      <description>39% faster TTFT, 67% less KV cache, zero config — autotune optimises local LLMs on Ollama, LM Studio, and MLX</description>
<author>tanavc1@users.noreply.github.com</author>      <pubDate>Wed, 15 Apr 2026 07:29:22 GMT</pubDate>
    </item>    <item>
      <title>0.1.0</title>
      <link>https://pypi.org/project/llm-autotune/0.1.0/</link>
      <description>Automatic local-LLM inference configuration recommender for Ollama, LM Studio, and MLX</description>
<author>tanavc1@users.noreply.github.com</author>      <pubDate>Wed, 15 Apr 2026 03:12:06 GMT</pubDate>
    </item>  </channel>
</rss>