Dataset SDK for consistent read/write [batch, online, streaming] data.
Project description
Welcome to @datasets
TODO
import pandas as pd
from metaflow import FlowSpec, step
from datasets import Dataset, Mode
from datasets.metaflow import DatasetParameter
from datasets.plugins import BatchOptions
# Can also invoke from CLI:
# > python datasets/tutorials/0_hello_dataset_flow.py run \
# --hello_dataset '{"name": "HelloDataset", "mode": "READ_WRITE", \
# "options": {"type": "BatchOptions", "partition_by": "region"}}'
class HelloDatasetFlow(FlowSpec):
hello_dataset = DatasetParameter(
"hello_dataset",
default=Dataset("HelloDataset", mode=Mode.READ_WRITE, options=BatchOptions(partition_by="region")),
)
@step
def start(self):
df = pd.DataFrame({"region": ["A", "A", "A", "B", "B", "B"], "zpid": [1, 2, 3, 4, 5, 6]})
print("saving data_frame: \n", df.to_string(index=False))
# Example of writing to a dataset
self.hello_dataset.write(df)
# save this as an output dataset
self.output_dataset = self.hello_dataset
self.next(self.end)
@step
def end(self):
print(f"I have dataset \n{self.output_dataset=}")
# output_dataset to_pandas(partitions=dict(region="A")) only
df: pd.DataFrame = self.output_dataset.to_pandas(partitions=dict(region="A"))
print('self.output_dataset.to_pandas(partitions=dict(region="A")):')
print(df.to_string(index=False))
if __name__ == "__main__":
HelloDatasetFlow()
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
zdatasets-0.1.1.tar.gz
(50.3 kB
view hashes)
Built Distribution
zdatasets-0.1.1-py3-none-any.whl
(79.0 kB
view hashes)
Close
Hashes for zdatasets-0.1.1-py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | 7370421ff69ee3aeef5959b103f6f82ec776c8d7446d121eb031b7faa7e04247 |
|
MD5 | 170211ce82327342833df1e3d3163894 |
|
BLAKE2b-256 | 5d18d29765835ea89659005007b5ea42d8a9387a1d10ea9daf3f30051728a435 |