Machine Learning Pipeline, Training and Logging for Me.
Project description
Enchanter
Machine Learning Pipeline, Training and Logging for Me.
Installation
pip install git+https://github.com/khirotaka/enchanter.git
Documentation
Example
Runner
run()
from comet_ml import Experiment
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
import enchanter.addons as addons
import enchanter.wrappers as wrappers
class MNIST(nn.Module):
"""
MNIST 用のCNN
"""
def __init__(self):
super(MNIST, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 32, 3),
addons.Swish(),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3),
addons.Swish(),
nn.MaxPool2d(2)
)
self.fc = nn.Sequential(
nn.Linear(64*5*5, 512),
addons.Swish(),
nn.Linear(512, 10)
)
def forward(self, x):
out = self.conv(x)
out = out.view(-1, 64*5*5)
out = self.fc(out)
return out
experiment = Experiment()
train_ds = MNIST("./data", train=True, transform=transforms.ToTensor())
test_ds = MNIST("./data", train=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_ds, batch_size=64, shuffle=True)
test_loader = DataLoader(test_ds, batch_size=64, shuffle=False)
model = MNIST()
optimizer = optim.Adam(model.parameters())
runner = wrappers.ClassificationRunner(
model,
optimizer,
nn.CrossEntropyLoss(),
experiment,
scheduler=optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
)
runner.add_loader(train_loader, "train").add_loader(test_loader, "test")
runner.train_config(epochs=1)
runner.run(verbose=True)
Comet.ml hyper parameter turning
from comet_ml import Optimizer
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
import enchanter.wrappers as wrappers
import enchanter.addons as addons
import enchanter.addons.layers as layers
from enchanter.utils import comet
config = comet.TunerConfigGenerator(
algorithm="bayes",
metric="train_avg_loss",
objective="minimize",
seed=0,
trials=5
)
config.suggest_categorical("activation", ["addons.mish", "torch.relu", "torch.sigmoid"])
opt = Optimizer(config.generate())
for experiment in opt.get_experiments():
model = layers.MLP([4, 512, 128, 3], eval(experiment.get_parameter("activation")))
optimizer = optim.Adam(model.parameters())
runner = wrappers.ClassificationRunner(
model, optimizer=optimizer, criterion=nn.CrossEntropyLoss(), experiment=experiment
)
x, y = load_iris(return_X_y=True)
x = x.astype("float32")
y = y.astype("int64")
runner.fit(x, y, epochs=1)
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
enchanter-0.4.1.1.tar.gz
(21.0 kB
view hashes)
Built Distribution
Close
Hashes for enchanter-0.4.1.1-py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | 98cbf223b061b522c442205c15539fc7a90deca7dc348cf88be72feb8949b1d0 |
|
MD5 | f70513642e814fc8d3e12468dcd01e44 |
|
BLAKE2b-256 | c69d1d3536ec1b2f906e75be3d2fda11729d844d19780fda1b3d1af874d4d1a1 |