Skip to main content

Machine Learning Pipeline, Training and Logging for Me.

Project description

Enchanter

Codacy Badge CI testing license code size Using PyTorch

Machine Learning Pipeline, Training and Logging for Me.

Installation

pip install git+https://github.com/khirotaka/enchanter.git

Documentation

Example

Runner

run()

from comet_ml import Experiment
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader

import enchanter.addons as addons
import enchanter.wrappers as wrappers


class MNIST(nn.Module):
    """
    MNIST 用のCNN
    """
    def __init__(self):
        super(MNIST, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 32, 3),
            addons.Swish(),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 3),
            addons.Swish(),
            nn.MaxPool2d(2)
        )
        self.fc = nn.Sequential(
            nn.Linear(64*5*5, 512),
            addons.Swish(),
            nn.Linear(512, 10)
        )

    def forward(self, x):
        out = self.conv(x)
        out = out.view(-1, 64*5*5)
        out = self.fc(out)
        return out



experiment = Experiment()

train_ds = MNIST("./data", train=True, transform=transforms.ToTensor())
test_ds = MNIST("./data", train=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_ds, batch_size=64, shuffle=True)
test_loader = DataLoader(test_ds, batch_size=64, shuffle=False)

model = MNIST()
optimizer = optim.Adam(model.parameters())
runner = wrappers.ClassificationRunner(
    model,
    optimizer,
    nn.CrossEntropyLoss(),
    experiment,
    scheduler=optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
)
runner.add_loader(train_loader, "train").add_loader(test_loader, "test")

runner.train_config(epochs=1)
runner.run(verbose=True)

Comet.ml hyper parameter turning

from comet_ml import Optimizer
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
import enchanter.wrappers as wrappers
import enchanter.addons as addons
import enchanter.addons.layers as layers
from enchanter.utils import comet

config = comet.TunerConfigGenerator(
    algorithm="bayes",
    metric="train_avg_loss",
    objective="minimize",
    seed=0,
    trials=5
)

config.suggest_categorical("activation", ["addons.mish", "torch.relu", "torch.sigmoid"])

opt = Optimizer(config.generate())

for experiment in opt.get_experiments():
    model = layers.MLP([4, 512, 128, 3], eval(experiment.get_parameter("activation")))
    optimizer = optim.Adam(model.parameters())
    runner = wrappers.ClassificationRunner(
        model, optimizer=optimizer, criterion=nn.CrossEntropyLoss(), experiment=experiment
    )
    x, y = load_iris(return_X_y=True)
    x = x.astype("float32")
    y = y.astype("int64")

    runner.fit(x, y, epochs=1)

Project details


Download files

Download the file for your platform. If you're not sure which to choose, learn more about installing packages.

Source Distribution

enchanter-0.4.1.1.tar.gz (21.0 kB view hashes)

Uploaded Source

Built Distribution

enchanter-0.4.1.1-py3-none-any.whl (30.1 kB view hashes)

Uploaded Python 3

Supported by

AWS AWS Cloud computing and Security Sponsor Datadog Datadog Monitoring Fastly Fastly CDN Google Google Download Analytics Microsoft Microsoft PSF Sponsor Pingdom Pingdom Monitoring Sentry Sentry Error logging StatusPage StatusPage Status page