TensorFlow steps, savers, and utilities for Neuraxle. Neuraxle is a Machine Learning (ML) library for building neat pipelines, providing the right abstractions to both ease research, development, and deployment of your ML applications.
Project description
Neuraxle-TensorFlow
TensorFlow steps, savers, and utilities for Neuraxle.
Neuraxle is a Machine Learning (ML) library for building neat pipelines, providing the right abstractions to both ease research, development, and deployment of your ML applications.
Usage example
Tensorflow 1
Create a tensorflow 1 model step by giving it a graph, an optimizer, and a loss function.
def create_graph(step: TensorflowV1ModelStep):
tf.placeholder('float', name='data_inputs')
tf.placeholder('float', name='expected_outputs')
tf.Variable(np.random.rand(), name='weight')
tf.Variable(np.random.rand(), name='bias')
return tf.add(tf.multiply(step['data_inputs'], step['weight']), step['bias'])
"""
# Note: you can also return a tuple containing two elements : tensor for training (fit), tensor for inference (transform)
def create_graph(step: TensorflowV1ModelStep)
# ...
decoder_outputs_training = create_training_decoder(step, encoder_state, decoder_cell)
decoder_outputs_inference = create_inference_decoder(step, encoder_state, decoder_cell)
return decoder_outputs_training, decoder_outputs_inference
"""
def create_loss(step: TensorflowV1ModelStep):
return tf.reduce_sum(tf.pow(step['output'] - step['expected_outputs'], 2)) / (2 * N_SAMPLES)
def create_optimizer(step: TensorflowV1ModelStep):
return tf.train.GradientDescentOptimizer(step.hyperparams['learning_rate'])
model_step = TensorflowV1ModelStep(
create_grah=create_graph,
create_loss=create_loss,
create_optimizer=create_optimizer,
has_expected_outputs=True
).set_hyperparams(HyperparameterSamples({
'learning_rate': 0.01
})).set_hyperparams_space(HyperparameterSpace({
'learning_rate': LogUniform(0.0001, 0.01)
}))
Tensorflow 2
Create a tensorflow 2 model step by giving it a model, an optimizer, and a loss function.
def create_model(step: Tensorflow2ModelStep):
return LinearModel()
def create_optimizer(step: Tensorflow2ModelStep):
return tf.keras.optimizers.Adam(0.1)
def create_loss(step: Tensorflow2ModelStep, expected_outputs, predicted_outputs):
return tf.reduce_mean(tf.abs(predicted_outputs - expected_outputs))
model_step = Tensorflow2ModelStep(
create_model=create_model,
create_optimizer=create_optimizer,
create_loss=create_loss,
tf_model_checkpoint_folder=os.path.join(tmpdir, 'tf_checkpoints')
)
Deep Learning Pipeline
batch_size = 100
epochs = 3
validation_size = 0.15
max_plotted_validation_predictions = 10
seq2seq_pipeline_hyperparams = HyperparameterSamples({
'hidden_dim': 100,
'layers_stacked_count': 2,
'lambda_loss_amount': 0.0003,
'learning_rate': 0.006,
'window_size_future': sequence_length,
'output_dim': output_dim,
'input_dim': input_dim
})
feature_0_metric = metric_3d_to_2d_wrapper(mean_squared_error)
metrics = {'mse': feature_0_metric}
signal_prediction_pipeline = Pipeline([
ForEachDataInput(MeanStdNormalizer()),
ToNumpy(),
Tensorflow2ModelStep(
create_model=create_model,
create_loss=create_loss,
create_optimizer=create_optimizer,
expected_outputs_dtype=tf.dtypes.float32,
data_inputs_dtype=tf.dtypes.float32,
print_loss=True
).set_hyperparams(seq2seq_pipeline_hyperparams)
]).set_name('SignalPrediction')
pipeline = Pipeline([EpochRepeater(
ValidationSplitWrapper(
MetricsWrapper(Pipeline([
TrainOnlyWrapper(DataShuffler()),
MiniBatchSequentialPipeline([
MetricsWrapper(
signal_prediction_pipeline,
metrics=metrics,
name='batch_metrics'
)
], batch_size=batch_size)
]), metrics=metrics,
name='epoch_metrics',
print_metrics=True
),
test_size=validation_size,
scoring_function=feature_0_metric
), epochs=epochs)])
pipeline, outputs = pipeline.fit_transform(data_inputs, expected_outputs)
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
Hashes for neuraxle_tensorflow-0.1.1.tar.gz
Algorithm | Hash digest | |
---|---|---|
SHA256 | 7baa55f6baa3af8e73802e8988cfcbc65d0ca62f07e2ac16c73c305323f5cee5 |
|
MD5 | 9334f44bc1320bfac04d322d83d33652 |
|
BLAKE2b-256 | 41f42df17aadcae90feac23c9ccf278d6a3940d801726c0d2440bbf164efda29 |
Hashes for neuraxle_tensorflow-0.1.1-py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | 8a87757481de9a9a5ed2e20dd4a5625119bf2438a882b83b3fea9cd6e6dfe0f3 |
|
MD5 | 753c95c481d8137946e39036b34c8377 |
|
BLAKE2b-256 | ea0bd3650c4bbb3f81c25cb4da330b08917bbbf1cb00d572b5e1bed0676a1b63 |