<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for vllm-npu</title>
    <link>https://pypi.org/project/vllm-npu/</link>
    <description>Recent updates to the Python Package Index for vllm-npu</description>
    <language>en</language>    <item>
      <title>0.4.2.post2</title>
      <link>https://pypi.org/project/vllm-npu/0.4.2.post2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 16 Jan 2025 06:29:04 GMT</pubDate>
    </item>    <item>
      <title>0.4.2.post1</title>
      <link>https://pypi.org/project/vllm-npu/0.4.2.post1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 16 Jan 2025 06:03:35 GMT</pubDate>
    </item>    <item>
      <title>0.4.2</title>
      <link>https://pypi.org/project/vllm-npu/0.4.2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 16 Jan 2025 04:12:45 GMT</pubDate>
    </item>  </channel>
</rss>