A framework for composing Neural Processes in Python
Project description
Neural Processes
A framework for composing Neural Processes in Python. See also NeuralProcesses.jl.
This package is currently under construction. There will be more here soon. In the meantime, see NeuralProcesses.jl.
Contents:
Installation
See the instructions here. Then simply
pip install neuralprocesses
Examples of Predefined Models
TensorFlow
GNP
import lab as B
import tensorflow as tf
import neuralprocesses.tensorflow as nps
cnp = nps.construct_gnp(dim_x=2, dim_y=3, likelihood="lowrank")
dist = cnp(
B.randn(tf.float32, 16, 2, 10),
B.randn(tf.float32, 16, 3, 10),
B.randn(tf.float32, 16, 2, 15),
)
mean, var = dist.mean, dist.var
print(dist.logpdf(B.randn(tf.float32, 16, 3, 15)))
print(dist.sample())
print(dist.kl(dist))
print(dist.entropy())
ConvGNP
import lab as B
import tensorflow as tf
import neuralprocesses.tensorflow as nps
cnp = nps.construct_convgnp(dim_x=2, dim_y=3, likelihood="lowrank")
dist = cnp(
B.randn(tf.float32, 16, 2, 10),
B.randn(tf.float32, 16, 3, 10),
B.randn(tf.float32, 16, 2, 15),
)
mean, var = dist.mean, dist.var
print(dist.logpdf(B.randn(tf.float32, 16, 3, 15)))
print(dist.sample())
print(dist.kl(dist))
print(dist.entropy())
ConvGNP with Auxiliary Variables
import lab as B
import tensorflow as tf
import neuralprocesses.tensorflow as nps
cnp = nps.construct_convgnp(
dim_x=2,
dim_yc=(
3, # Observed data has three dimensions.
1, # First auxiliary variable has one dimension.
2, # Second auxiliary variable has two dimensions.
),
dim_yt=3, # Predictions have three dimensions.
# Third auxiliary variable has four dimensions and is auxiliary information specific
# to the target inputs.
aux_t_dim=4,
num_basis_functions=64,
likelihood="lowrank",
)
observed_data = (
B.randn(tf.float32, 16, 2, 10),
B.randn(tf.float32, 16, 3, 10),
)
# Define three auxiliary variables. The first one is specified like the observed data
# at arbitrary inputs.
aux_var1 = (
B.randn(tf.float32, 16, 2, 12),
B.randn(tf.float32, 16, 1, 12), # Has one dimension.
)
# The second one is specified on a grid.
aux_var2 = (
(B.randn(tf.float32, 16, 1, 25), B.randn(tf.float32, 16, 1, 35)),
B.randn(tf.float32, 16, 2, 25, 35), # Has two dimensions.
)
# The third one is specific to the target inputs. We could encode it like the first
# auxiliary variable `aux_var1`, but we illustrate how an MLP-style encoding can
# also be used. The number must match the number of target inputs!
aux_var_t = B.randn(tf.float32, 16, 4, 15) # Has four dimensions.
dist = cnp(
[observed_data, aux_var1, aux_var2],
B.randn(tf.float32, 16, 2, 15),
aux_t=aux_var_t,
)
mean, var = dist.mean, dist.var
print(dist.logpdf(B.randn(tf.float32, 16, 3, 15)))
print(dist.sample())
print(dist.kl(dist))
print(dist.entropy())
PyTorch
GNP
import lab as B
import torch
import neuralprocesses.torch as nps
cnp = nps.construct_gnp(dim_x=2, dim_y=3, likelihood="lowrank")
dist = cnp(
B.randn(torch.float32, 16, 2, 10),
B.randn(torch.float32, 16, 3, 10),
B.randn(torch.float32, 16, 2, 15),
)
mean, var = dist.mean, dist.var
print(dist.logpdf(B.randn(torch.float32, 16, 3, 15)))
print(dist.sample())
print(dist.kl(dist))
print(dist.entropy())
ConvGNP
import lab as B
import torch
import neuralprocesses.torch as nps
cnp = nps.construct_convgnp(dim_x=2, dim_y=3, likelihood="lowrank")
dist = cnp(
B.randn(torch.float32, 16, 2, 10),
B.randn(torch.float32, 16, 3, 10),
B.randn(torch.float32, 16, 2, 15),
)
mean, var = dist.mean, dist.var
print(dist.logpdf(B.randn(torch.float32, 16, 3, 15)))
print(dist.sample())
print(dist.kl(dist))
print(dist.entropy())
Build Your Own Model
ConvGNP
TensorFlow
import lab as B
import tensorflow as tf
import neuralprocesses.tensorflow as nps
dim_x = 1
dim_y = 1
# CNN architecture:
unet = nps.UNet(
dim=dim_x,
in_channels=2 * dim_y,
out_channels=(2 + 512) * dim_y,
channels=(8, 16, 16, 32, 32, 64),
)
# Discretisation of the functional embedding:
disc = nps.Discretisation(
points_per_unit=64,
multiple=2**unet.num_halving_layers,
margin=0.1,
dim=dim_x,
)
# Create the encoder and decoder and construct the model.
encoder = nps.FunctionalCoder(
disc,
nps.Chain(
nps.PrependDensityChannel(),
nps.SetConv(scale=2 / disc.points_per_unit),
nps.DivideByFirstChannel(),
),
)
decoder = nps.Chain(
unet,
nps.SetConv(scale=2 / disc.points_per_unit),
nps.LowRankGaussianLikelihood(512),
)
convgnp = nps.Model(encoder, decoder)
# Run the model on some random data.
dist = convgnp(
B.randn(tf.float32, 16, 1, 10),
B.randn(tf.float32, 16, 1, 10),
B.randn(tf.float32, 16, 1, 15),
)
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
neuralprocesses-0.1.5.tar.gz
(31.3 kB
view hashes)