<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for foolbox</title>
    <link>https://pypi.org/project/foolbox/</link>
    <description>Recent updates to the Python Package Index for foolbox</description>
    <language>en</language>    <item>
      <title>3.3.4</title>
      <link>https://pypi.org/project/foolbox/3.3.4/</link>
      <description>Foolbox is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>foolbox+rzrolandzimmermann@gmail.com</author>      <pubDate>Mon, 04 Mar 2024 20:59:17 GMT</pubDate>
    </item>    <item>
      <title>3.3.3</title>
      <link>https://pypi.org/project/foolbox/3.3.3/</link>
      <description>Foolbox is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>foolbox+rzrolandzimmermann@gmail.com</author>      <pubDate>Sat, 02 Apr 2022 15:26:45 GMT</pubDate>
    </item>    <item>
      <title>3.3.2</title>
      <link>https://pypi.org/project/foolbox/3.3.2/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Tue, 08 Mar 2022 08:13:02 GMT</pubDate>
    </item>    <item>
      <title>3.3.1</title>
      <link>https://pypi.org/project/foolbox/3.3.1/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Tue, 23 Feb 2021 07:07:53 GMT</pubDate>
    </item>    <item>
      <title>3.3.0</title>
      <link>https://pypi.org/project/foolbox/3.3.0/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Wed, 10 Feb 2021 08:54:33 GMT</pubDate>
    </item>    <item>
      <title>3.2.1</title>
      <link>https://pypi.org/project/foolbox/3.2.1/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sat, 26 Sep 2020 06:49:13 GMT</pubDate>
    </item>    <item>
      <title>3.2.0</title>
      <link>https://pypi.org/project/foolbox/3.2.0/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sat, 26 Sep 2020 06:28:45 GMT</pubDate>
    </item>    <item>
      <title>3.1.1</title>
      <link>https://pypi.org/project/foolbox/3.1.1/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sat, 29 Aug 2020 21:00:49 GMT</pubDate>
    </item>    <item>
      <title>3.0.4</title>
      <link>https://pypi.org/project/foolbox/3.0.4/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Fri, 03 Jul 2020 13:57:26 GMT</pubDate>
    </item>    <item>
      <title>3.0.2</title>
      <link>https://pypi.org/project/foolbox/3.0.2/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sat, 23 May 2020 14:34:24 GMT</pubDate>
    </item>    <item>
      <title>3.0.1</title>
      <link>https://pypi.org/project/foolbox/3.0.1/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sat, 23 May 2020 07:46:06 GMT</pubDate>
    </item>    <item>
      <title>3.0.0</title>
      <link>https://pypi.org/project/foolbox/3.0.0/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sun, 22 Mar 2020 21:42:44 GMT</pubDate>
    </item>    <item>
      <title>3.0.0b1</title>
      <link>https://pypi.org/project/foolbox/3.0.0b1/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sun, 16 Feb 2020 23:26:49 GMT</pubDate>
    </item>    <item>
      <title>3.0.0b0</title>
      <link>https://pypi.org/project/foolbox/3.0.0b0/</link>
      <description>Foolbox Native is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX</description>
<author>git@jonasrauber.de</author>      <pubDate>Sat, 15 Feb 2020 15:31:43 GMT</pubDate>
    </item>    <item>
      <title>2.4.0</title>
      <link>https://pypi.org/project/foolbox/2.4.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Fri, 07 Feb 2020 14:35:26 GMT</pubDate>
    </item>    <item>
      <title>2.3.0</title>
      <link>https://pypi.org/project/foolbox/2.3.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Mon, 04 Nov 2019 22:01:52 GMT</pubDate>
    </item>    <item>
      <title>2.2.1</title>
      <link>https://pypi.org/project/foolbox/2.2.1/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Tue, 29 Oct 2019 13:09:41 GMT</pubDate>
    </item>    <item>
      <title>2.2.0</title>
      <link>https://pypi.org/project/foolbox/2.2.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Mon, 28 Oct 2019 15:54:49 GMT</pubDate>
    </item>    <item>
      <title>2.1.0</title>
      <link>https://pypi.org/project/foolbox/2.1.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Sun, 27 Oct 2019 09:19:11 GMT</pubDate>
    </item>    <item>
      <title>2.0.0</title>
      <link>https://pypi.org/project/foolbox/2.0.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Wed, 23 Oct 2019 14:00:48 GMT</pubDate>
    </item>    <item>
      <title>2.0.0rc0</title>
      <link>https://pypi.org/project/foolbox/2.0.0rc0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Fri, 18 Oct 2019 10:38:53 GMT</pubDate>
    </item>    <item>
      <title>2.0.0b0</title>
      <link>https://pypi.org/project/foolbox/2.0.0b0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Tue, 21 May 2019 19:16:13 GMT</pubDate>
    </item>    <item>
      <title>1.8.0</title>
      <link>https://pypi.org/project/foolbox/1.8.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Fri, 16 Nov 2018 16:17:26 GMT</pubDate>
    </item>    <item>
      <title>1.7.0</title>
      <link>https://pypi.org/project/foolbox/1.7.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Wed, 24 Oct 2018 07:41:30 GMT</pubDate>
    </item>    <item>
      <title>1.6.2</title>
      <link>https://pypi.org/project/foolbox/1.6.2/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Fri, 12 Oct 2018 10:19:57 GMT</pubDate>
    </item>    <item>
      <title>1.6.1</title>
      <link>https://pypi.org/project/foolbox/1.6.1/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Mon, 08 Oct 2018 10:57:10 GMT</pubDate>
    </item>    <item>
      <title>1.6.0</title>
      <link>https://pypi.org/project/foolbox/1.6.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Fri, 05 Oct 2018 12:29:17 GMT</pubDate>
    </item>    <item>
      <title>1.5.0</title>
      <link>https://pypi.org/project/foolbox/1.5.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Thu, 27 Sep 2018 15:57:55 GMT</pubDate>
    </item>    <item>
      <title>1.4.0</title>
      <link>https://pypi.org/project/foolbox/1.4.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Tue, 18 Sep 2018 12:37:17 GMT</pubDate>
    </item>    <item>
      <title>1.3.2</title>
      <link>https://pypi.org/project/foolbox/1.3.2/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Mon, 06 Aug 2018 10:39:52 GMT</pubDate>
    </item>    <item>
      <title>1.3.1</title>
      <link>https://pypi.org/project/foolbox/1.3.1/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Wed, 11 Jul 2018 08:28:30 GMT</pubDate>
    </item>    <item>
      <title>1.3.0</title>
      <link>https://pypi.org/project/foolbox/1.3.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Tue, 10 Jul 2018 15:51:30 GMT</pubDate>
    </item>    <item>
      <title>1.2.0</title>
      <link>https://pypi.org/project/foolbox/1.2.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Wed, 27 Jun 2018 07:45:07 GMT</pubDate>
    </item>    <item>
      <title>1.1.0</title>
      <link>https://pypi.org/project/foolbox/1.1.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Fri, 23 Mar 2018 12:29:47 GMT</pubDate>
    </item>    <item>
      <title>1.0.0</title>
      <link>https://pypi.org/project/foolbox/1.0.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Tue, 20 Mar 2018 10:16:53 GMT</pubDate>
    </item>    <item>
      <title>0.15.0</title>
      <link>https://pypi.org/project/foolbox/0.15.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Thu, 15 Mar 2018 13:23:05 GMT</pubDate>
    </item>    <item>
      <title>0.14.0</title>
      <link>https://pypi.org/project/foolbox/0.14.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Mon, 05 Mar 2018 18:11:03 GMT</pubDate>
    </item>    <item>
      <title>0.13.0</title>
      <link>https://pypi.org/project/foolbox/0.13.0/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Thu, 01 Mar 2018 09:18:50 GMT</pubDate>
    </item>    <item>
      <title>0.12.4</title>
      <link>https://pypi.org/project/foolbox/0.12.4/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Mon, 19 Feb 2018 12:42:02 GMT</pubDate>
    </item>    <item>
      <title>0.12.3</title>
      <link>https://pypi.org/project/foolbox/0.12.3/</link>
      <description>Python toolbox to create adversarial examples that fool neural networks</description>
<author>opensource@bethgelab.org</author>      <pubDate>Mon, 19 Feb 2018 12:39:39 GMT</pubDate>
    </item>  </channel>
</rss>