utils
Project description
Merge mutiple models or embedding into a single one very easily
in Pytorch.
## Usage
https://colab.research.google.com/drive/1vOFxEcLQdgCxCCJCkp-mxouTQ1f8F5FX?usp=sharing
### Example Short:
import os, random, numpy as np, pandas as pd ;from box import Box
from copy import deepcopy
import copy, collections
import torch
import torch.nn as nn
import torchvision
#############################################################################################
def test3d():
from box import Box ; from copy import deepcopy
from torch.utils.data import DataLoader, TensorDataset
ARG = Box({
'MODE' : 'mode1',
'DATASET': {},
'MODEL_INFO' : {},
})
PARAMS = {}
##################################################################
if ARG.MODE == 'mode1':
ARG.MODEL_INFO.TYPE = 'dataonly'
train_config = Box({})
train_config.LR = 0.001
train_config.SEED = 42
train_config.DEVICE = 'cpu'
train_config.BATCH_SIZE = 64
train_config.EPOCHS = 1
train_config.EARLY_STOPPING_THLD = 10
train_config.VALID_FREQ = 1
train_config.SAVE_FILENAME = './model.pt'
train_config.TRAIN_RATIO = 0.7
train_config.VAL_RATIO = 0.2
train_config.TEST_RATIO = 0.1
####################################################################
def load_DataFrame():
return None
def test_dataset_f_mnist(samples=100):
from sklearn.model_selection import train_test_split
from torchvision import transforms, datasets
# Generate the transformations
train_list_transforms = [transforms.ToTensor(),transforms.Lambda(lambda x: x.repeat(3, 1, 1))]
dataset1 = datasets.FashionMNIST(root="data",train=True,
transform=transforms.Compose(train_list_transforms),download=True,)
#sampling the requred no. of samples from dataset
dataset1 = torch.utils.data.Subset(dataset1, np.arange(samples))
X,Y = [], []
for data, targets in dataset1:
X.append(data)
Y.append(targets)
#Converting list to tensor format
X,y = torch.stack(X),torch.Tensor(Y)
train_r, test_r, val_r = train_config.TRAIN_RATIO, train_config.TEST_RATIO,train_config.VAL_RATIO
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=1 - train_r)
valid_X, test_X, valid_y, test_y = train_test_split(test_X, test_y, test_size= test_r / (test_r + val_r))
return (train_X, train_y, valid_X, valid_y, test_X , test_y)
def prepro_dataset(self,df:pd.DataFrame=None):
train_X ,train_y,valid_X ,valid_y,test_X, test_y = test_dataset_f_mnist(samples=100)
return train_X ,train_y,valid_X ,valid_y,test_X,test_y
### modelA ########################################################
from torchvision import models
model_ft = models.resnet18(pretrained=True)
embA_dim = model_ft.fc.in_features ###
ARG.modelA = {}
ARG.modelA.name = 'resnet18'
ARG.modelA.nn_model = model_ft
ARG.modelA.layer_emb_id = 'fc'
ARG.modelA.architect = [ embA_dim] ### head s
modelA = me.model_create(ARG.modelA)
### modelB ########################################################
from torchvision import models
model_ft = models.resnet50(pretrained=True)
embB_dim = int(model_ft.fc.in_features)
ARG.modelB = {}
ARG.modelB.name = 'resnet50'
ARG.modelB.nn_model = model_ft
ARG.modelB.layer_emb_id = 'fc'
ARG.modelB.architect = [embB_dim ] ### head size
modelB = me.model_create(ARG.modelB )
### merge_model ###################################################
### EXPLICIT DEPENDENCY
ARG.merge_model = {}
ARG.merge_model.name = 'modelmerge1'
ARG.merge_model.architect = {}
ARG.merge_model.architect.input_dim = embA_dim + embB_dim
ARG.merge_model.architect.merge_type = 'cat'
ARG.merge_model.architect.merge_layers_dim = [1024, 768] ### Common embedding is 768
ARG.merge_model.architect.merge_custom = None
### Custom head
ARG.merge_model.architect.head_layers_dim = [ 128, 1] ### Specific task
ARG.merge_model.architect.head_custom = None
ARG.merge_model.dataset = {}
ARG.merge_model.dataset.dirin = "/"
ARG.merge_model.dataset.coly = 'ytarget'
ARG.merge_model.train_config = train_config
model = me.MergeModel_create(ARG, model_create_list= [modelA, modelB ] )
model.build()
#### Run Model ###################################################
model.training(load_DataFrame, prepro_dataset)
model.save_weight('ztmp/model_x5.pt')
model.load_weights('ztmp/model_x5.pt')
inputs = torch.randn((train_config.BATCH_SIZE,3,28,28)).to(model.device)
outputs = model.predict(inputs)
print(outputs)
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
File details
Details for the file torchmerge-0.1.16540964.tar.gz
.
File metadata
- Download URL: torchmerge-0.1.16540964.tar.gz
- Upload date:
- Size: 9.5 kB
- Tags: Source
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/4.0.1 CPython/3.10.4
File hashes
Algorithm | Hash digest | |
---|---|---|
SHA256 | 83b25a90f12262562a0e0a3b7e3771bc860b8942bfc5d17b67bd8556641b214f |
|
MD5 | e4ef6453e6fc88e3faad4699661c87cb |
|
BLAKE2b-256 | 3d5f525755b12868355e9893f5d85e31f236638c2ca0ea82e985ca471890912b |
File details
Details for the file torchmerge-0.1.16540964-py3-none-any.whl
.
File metadata
- Download URL: torchmerge-0.1.16540964-py3-none-any.whl
- Upload date:
- Size: 8.0 kB
- Tags: Python 3
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/4.0.1 CPython/3.10.4
File hashes
Algorithm | Hash digest | |
---|---|---|
SHA256 | 0dc8594e4a33b6c6620cf37aa637cb42dc1ee855e81641b8f3480e73bd46530c |
|
MD5 | b7e74e10dad59a74b3337ddc8259c7b5 |
|
BLAKE2b-256 | 50287369abb2f6d87f3a5f5d64e36805af3fe1746c292010cbdc1553a59201c3 |