Skip to main content

Python client for oka repository

Project description

test codecov pypi Python version license: GPL v3

DOI arXiv User Manual API Documentation

oka - Client for OKA repository

Latest version as a package

Current code

User manual

API documentation

Overview

oka is a client for Oka repository. It also provides utilities to process data.

Installation

...as a standalone lib

# Set up a virtualenv. 
python3 -m venv venv
source venv/bin/activate

# Install from PyPI...
pip install --upgrade pip
pip install -U oka
pip install -U oka[full]  # use the flag 'full' for extra functionality (recommended)

# ...or, install from updated source code.
pip install git+https://github.com/rabizao/oka

...from source

sudo apt install python3.8-venv python3.8-dev python3.8-distutils # For Debian-like systems.
git clone https://github.com/rabizao/oka
cd oka
python3.8 -m venv venv
source venv/bin/activate
pip install -e .

Usage

Hello world

from oka import Oka, generate_token, toy_df

# Create a pandas dataframe.
df = toy_df()
print(df.head())
"""
   attr1  attr2  class
0    5.1    6.4      0
1    1.1    2.5      1
2    6.1    3.6      0
3    1.1    3.5      1
4    3.1    2.5      0
"""
# Login.
token = generate_token("http://localhost:5000")
client = Oka(token, "http://localhost:5000")

# Store.
id = client.send(df)

# Store again.
id = client.send(df)
"""
Content already stored for id iJ_e4463c51904e9efb800533d25082af2a7bf77
"""

# Fetch.
df = client.get(id)

print(df.head())
"""
   attr1  attr2  class
0    5.1    6.4      0
1    1.1    2.5      1
2    6.1    3.6      0
3    1.1    3.5      1
4    3.1    2.5      0
"""

DataFrame by hand

import pandas as pd
from oka import Oka, generate_token

# Create a pandas dataframe.
df = pd.DataFrame(
    [[1, 2, "+"],
     [3, 4, "-"]],
    index=["row 1", "row 2"],
    columns=["col 1", "col 2", "class"],
)
print(df.head())
"""
       col 1  col 2 class
row 1      1      2     +
row 2      3      4     -
"""
# Login.
token = generate_token("http://localhost:5000")
client = Oka(token, "http://localhost:5000")

# Store.
id = client.send(df)

# Store again.
id = client.send(df)
"""
Content already stored for id f7_6b9deafec2562edde56bfdc573b336b55cb16
"""

# Fetch.
df = client.get(id)

print(df.head())
"""
       col 1  col 2 class
row 1      1      2     +
row 2      3      4     -
"""

Machine Learning workflow

from sklearn.ensemble import RandomForestClassifier as RF

from idict import let, idict
from idict.function.model import fit, predict
from idict.function.evaluation import split
from oka import Oka, generate_token
import json

# Login.
token = generate_token()
cache = Oka(token)
d = (
        idict.fromtoy()
        >> split
        >> let(fit, algorithm=RF, config={"n_estimators": 55}, Xin="Xtr", yin="ytr")
        >> let(predict, Xin="Xts")
        >> (lambda X: {"X2": X * X, "_history": ...})
        >> [cache]
)
cache.send(d)
print(json.dumps(list(d.history.keys()), indent=2))
"""
OKATESTING var in use davips http://localhost:5000
[
  "idict--------------sklearn-1.0.1---split",
  "idict-----------------------wrapper--fit",
  "idict-------------------wrapper--predict",
  "RwMG040tZc3XNoJkwkBe6A1aIUGNQ4EAQVqi.uAl"
]
"""
d.show()
"""
{
    "X2": "«{'attr1': {0: 26.009999999999998, 1: 1.2100000000000002, 2: 37.209999999999994, 3: 1.2100000000000002, 4: 9.610000000000001, 5: 22.090000000000003, 6: 82.80999999999999, 7: 68.89000000000001, 8: 82.80999999999999, 9: 6.25, 10: 50.41, 11: 0.010000000000000002, 12: 4.41, 13: 0.010000000000000002, 14: 26.009999999999998, 15: 967.21, 16: 1.2100000000000002, 17: 4.840000000000001, 18: 9.610000000000001, 19: 1.2100000000000002}, 'attr2': {0: 40.96000000000001, 1: 6.25, 2: 12.96, 3: 12.25, 4: 6.25, 5: 24.010000000000005, 6: 12.25, 7: 8.41, 8: 51.84, 9: 20.25, 10: 43.559999999999995, 11: 18.49, 12: 0.010000000000000002, 13: 16.0, 14: 20.25, 15: 22.090000000000003, 16: 10.240000000000002, 17: 72.25, 18: 6.25, 19: 72.25}}»",
    "_history": "idict--------------sklearn-1.0.1---split idict-----------------------wrapper--fit idict-------------------wrapper--predict RwMG040tZc3XNoJkwkBe6A1aIUGNQ4EAQVqi.uAl",
    "z": "«[1 0 1 0 1 1 1]»",
    "model": "RandomForestClassifier(n_estimators=55)",
    "Xtr": "«{'attr1': {8: 9.1, 2: 6.1, 18: 3.1, 7: 8.3, 17: 2.2, 4: 3.1, 19: 1.1, 5: 4.7, 12: 2.1, 16: 1.1, 3: 1.1, 1: 1.1, 11: 0.1}, 'attr2': {8: 7.2, 2: 3.6, 18: 2.5, 7: 2.9, 17: 8.5, 4: 2.5, 19: 8.5, 5: 4.9, 12: 0.1, 16: 3.2, 3: 3.5, 1: 2.5, 11: 4.3}}»",
    "Xts": "«{'attr1': {13: 0.1, 6: 9.1, 9: 2.5, 10: 7.1, 0: 5.1, 14: 5.1, 15: 31.1}, 'attr2': {13: 4.0, 6: 3.5, 9: 4.5, 10: 6.6, 0: 6.4, 14: 4.5, 15: 4.7}}»",
    "ytr": "«[0 0 0 1 1 0 1 1 0 0 1 1 1]»",
    "yts": "«[1 0 1 0 0 0 1]»",
    "X": "«{'attr1': {0: 5.1, 1: 1.1, 2: 6.1, 3: 1.1, 4: 3.1, 5: 4.7, 6: 9.1, 7: 8.3, 8: 9.1, 9: 2.5, 10: 7.1, 11: 0.1, 12: 2.1, 13: 0.1, 14: 5.1, 15: 31.1, 16: 1.1, 17: 2.2, 18: 3.1, 19: 1.1}, 'attr2': {0: 6.4, 1: 2.5, 2: 3.6, 3: 3.5, 4: 2.5, 5: 4.9, 6: 3.5, 7: 2.9, 8: 7.2, 9: 4.5, 10: 6.6, 11: 4.3, 12: 0.1, 13: 4.0, 14: 4.5, 15: 4.7, 16: 3.2, 17: 8.5, 18: 2.5, 19: 8.5}}»",
    "y": "«[0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]»",
    "_id": ".b0brk0dY77cwYstNl.xylIBj6XNZCaHtQ3mrkEb",
    "_ids": {
        "X2": "V8qrVsh6ptdtFSGVHkeHUq8T4mUNQ4EAQVqi.uAl",
        "_history": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "z": "SlXbYEPNdc.MAa05goCiqlFoB36-QXFsRw.A3wAU",
        "model": "srsfhoJE3tDX1dG2.WfSrHQuQkUFyrXlhOouWxUp",
        "Xtr": "XfsV-At5YI8NOBUjjJ-0vEa.qJLAX0I5JQ7SxahM",
        "Xts": "CRh.K81gKNvdsngaWupSWn9JwTpU1O-FiM6QfraE",
        "ytr": "ZzQ4QJL8HWGdMxK5QZl2XLDxq2rVjr4XHPILqMUI",
        "yts": "DojEKdt2iJ0wrptdFUnxUqPGet097oXIBIrZpihN",
        "X": "md_cb8b0c76becc1de32236764e91e8e457e826f (content: 34_1738c83af436029507def2710bc5125f58d0e)",
        "y": "Kp_697ef70c1a293f50ed352ea5775bba6d9b1f4 (content: S0_b6360d62ccafa275d4051dfd02b939104feac)"
    }
}
"""
print(d.z)
"""
[1 0 1 0 1 1 1]
"""
d.show()
"""
{
    "X2": "«{'attr1': {0: 26.009999999999998, 1: 1.2100000000000002, 2: 37.209999999999994, 3: 1.2100000000000002, 4: 9.610000000000001, 5: 22.090000000000003, 6: 82.80999999999999, 7: 68.89000000000001, 8: 82.80999999999999, 9: 6.25, 10: 50.41, 11: 0.010000000000000002, 12: 4.41, 13: 0.010000000000000002, 14: 26.009999999999998, 15: 967.21, 16: 1.2100000000000002, 17: 4.840000000000001, 18: 9.610000000000001, 19: 1.2100000000000002}, 'attr2': {0: 40.96000000000001, 1: 6.25, 2: 12.96, 3: 12.25, 4: 6.25, 5: 24.010000000000005, 6: 12.25, 7: 8.41, 8: 51.84, 9: 20.25, 10: 43.559999999999995, 11: 18.49, 12: 0.010000000000000002, 13: 16.0, 14: 20.25, 15: 22.090000000000003, 16: 10.240000000000002, 17: 72.25, 18: 6.25, 19: 72.25}}»",
    "_history": "idict--------------sklearn-1.0.1---split idict-----------------------wrapper--fit idict-------------------wrapper--predict RwMG040tZc3XNoJkwkBe6A1aIUGNQ4EAQVqi.uAl",
    "z": "«[1 0 1 0 1 1 1]»",
    "model": "RandomForestClassifier(n_estimators=55)",
    "Xtr": "«{'attr1': {8: 9.1, 2: 6.1, 18: 3.1, 7: 8.3, 17: 2.2, 4: 3.1, 19: 1.1, 5: 4.7, 12: 2.1, 16: 1.1, 3: 1.1, 1: 1.1, 11: 0.1}, 'attr2': {8: 7.2, 2: 3.6, 18: 2.5, 7: 2.9, 17: 8.5, 4: 2.5, 19: 8.5, 5: 4.9, 12: 0.1, 16: 3.2, 3: 3.5, 1: 2.5, 11: 4.3}}»",
    "Xts": "«{'attr1': {13: 0.1, 6: 9.1, 9: 2.5, 10: 7.1, 0: 5.1, 14: 5.1, 15: 31.1}, 'attr2': {13: 4.0, 6: 3.5, 9: 4.5, 10: 6.6, 0: 6.4, 14: 4.5, 15: 4.7}}»",
    "ytr": "«[0 0 0 1 1 0 1 1 0 0 1 1 1]»",
    "yts": "«[1 0 1 0 0 0 1]»",
    "X": "«{'attr1': {0: 5.1, 1: 1.1, 2: 6.1, 3: 1.1, 4: 3.1, 5: 4.7, 6: 9.1, 7: 8.3, 8: 9.1, 9: 2.5, 10: 7.1, 11: 0.1, 12: 2.1, 13: 0.1, 14: 5.1, 15: 31.1, 16: 1.1, 17: 2.2, 18: 3.1, 19: 1.1}, 'attr2': {0: 6.4, 1: 2.5, 2: 3.6, 3: 3.5, 4: 2.5, 5: 4.9, 6: 3.5, 7: 2.9, 8: 7.2, 9: 4.5, 10: 6.6, 11: 4.3, 12: 0.1, 13: 4.0, 14: 4.5, 15: 4.7, 16: 3.2, 17: 8.5, 18: 2.5, 19: 8.5}}»",
    "y": "«[0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]»",
    "_id": ".b0brk0dY77cwYstNl.xylIBj6XNZCaHtQ3mrkEb",
    "_ids": {
        "X2": "V8qrVsh6ptdtFSGVHkeHUq8T4mUNQ4EAQVqi.uAl",
        "_history": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "z": "SlXbYEPNdc.MAa05goCiqlFoB36-QXFsRw.A3wAU",
        "model": "srsfhoJE3tDX1dG2.WfSrHQuQkUFyrXlhOouWxUp",
        "Xtr": "XfsV-At5YI8NOBUjjJ-0vEa.qJLAX0I5JQ7SxahM",
        "Xts": "CRh.K81gKNvdsngaWupSWn9JwTpU1O-FiM6QfraE",
        "ytr": "ZzQ4QJL8HWGdMxK5QZl2XLDxq2rVjr4XHPILqMUI",
        "yts": "DojEKdt2iJ0wrptdFUnxUqPGet097oXIBIrZpihN",
        "X": "md_cb8b0c76becc1de32236764e91e8e457e826f (content: 34_1738c83af436029507def2710bc5125f58d0e)",
        "y": "Kp_697ef70c1a293f50ed352ea5775bba6d9b1f4 (content: S0_b6360d62ccafa275d4051dfd02b939104feac)"
    }
}
"""
# A field '_' means this function is a noop process triggered only once by accessing one of the other provided fields."
d >>= (lambda _, X2, y: print("Some logging/printing that doesn't affect data...\nX²=\n", X2[:3]))
d.show()
"""
{
    "X2": "→(X2 y _)",
    "y": "→(X2 y _)",
    "_history": "idict--------------sklearn-1.0.1---split idict-----------------------wrapper--fit idict-------------------wrapper--predict RwMG040tZc3XNoJkwkBe6A1aIUGNQ4EAQVqi.uAl",
    "z": "«[1 0 1 0 1 1 1]»",
    "model": "RandomForestClassifier(n_estimators=55)",
    "Xtr": "«{'attr1': {8: 9.1, 2: 6.1, 18: 3.1, 7: 8.3, 17: 2.2, 4: 3.1, 19: 1.1, 5: 4.7, 12: 2.1, 16: 1.1, 3: 1.1, 1: 1.1, 11: 0.1}, 'attr2': {8: 7.2, 2: 3.6, 18: 2.5, 7: 2.9, 17: 8.5, 4: 2.5, 19: 8.5, 5: 4.9, 12: 0.1, 16: 3.2, 3: 3.5, 1: 2.5, 11: 4.3}}»",
    "Xts": "«{'attr1': {13: 0.1, 6: 9.1, 9: 2.5, 10: 7.1, 0: 5.1, 14: 5.1, 15: 31.1}, 'attr2': {13: 4.0, 6: 3.5, 9: 4.5, 10: 6.6, 0: 6.4, 14: 4.5, 15: 4.7}}»",
    "ytr": "«[0 0 0 1 1 0 1 1 0 0 1 1 1]»",
    "yts": "«[1 0 1 0 0 0 1]»",
    "X": "«{'attr1': {0: 5.1, 1: 1.1, 2: 6.1, 3: 1.1, 4: 3.1, 5: 4.7, 6: 9.1, 7: 8.3, 8: 9.1, 9: 2.5, 10: 7.1, 11: 0.1, 12: 2.1, 13: 0.1, 14: 5.1, 15: 31.1, 16: 1.1, 17: 2.2, 18: 3.1, 19: 1.1}, 'attr2': {0: 6.4, 1: 2.5, 2: 3.6, 3: 3.5, 4: 2.5, 5: 4.9, 6: 3.5, 7: 2.9, 8: 7.2, 9: 4.5, 10: 6.6, 11: 4.3, 12: 0.1, 13: 4.0, 14: 4.5, 15: 4.7, 16: 3.2, 17: 8.5, 18: 2.5, 19: 8.5}}»",
    "_id": ".b0brk0dY77cwYstNl.xylIBj6XNZCaHtQ3mrkEb",
    "_ids": {
        "X2": "YpDLE6GhfJlNqpJswV570WkjIRtDL4lEzPzH7kzE",
        "y": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "_history": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "z": "SlXbYEPNdc.MAa05goCiqlFoB36-QXFsRw.A3wAU",
        "model": "srsfhoJE3tDX1dG2.WfSrHQuQkUFyrXlhOouWxUp",
        "Xtr": "XfsV-At5YI8NOBUjjJ-0vEa.qJLAX0I5JQ7SxahM",
        "Xts": "CRh.K81gKNvdsngaWupSWn9JwTpU1O-FiM6QfraE",
        "ytr": "ZzQ4QJL8HWGdMxK5QZl2XLDxq2rVjr4XHPILqMUI",
        "yts": "DojEKdt2iJ0wrptdFUnxUqPGet097oXIBIrZpihN",
        "X": "md_cb8b0c76becc1de32236764e91e8e457e826f (content: 34_1738c83af436029507def2710bc5125f58d0e)"
    }
}
"""
print("Triggering noop function by accessing 'y'...")
print("y", d.y[:3])
"""
Triggering noop function by accessing 'y'...
Some logging/printing that doesn't affect data...
X²=
    attr1  attr2
0  26.01  40.96
1   1.21   6.25
2  37.21  12.96
y [0 1 0]
"""
d.show()
"""
{
    "X2": "     attr1  attr2\n0    26.01  40.96\n1     1.21   6.25\n2    37.21  12.96\n3     1.21  12.25\n4     9.61   6.25\n5    22.09  24.01\n6    82.81  12.25\n7    68.89   8.41\n8    82.81  51.84\n9     6.25  20.25\n10   50.41  43.56\n11    0.01  18.49\n12    4.41   0.01\n13    0.01  16.00\n14   26.01  20.25\n15  967.21  22.09\n16    1.21  10.24\n17    4.84  72.25\n18    9.61   6.25\n19    1.21  72.25",
    "y": "«[0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]»",
    "_history": "idict--------------sklearn-1.0.1---split idict-----------------------wrapper--fit idict-------------------wrapper--predict RwMG040tZc3XNoJkwkBe6A1aIUGNQ4EAQVqi.uAl",
    "z": "«[1 0 1 0 1 1 1]»",
    "model": "RandomForestClassifier(n_estimators=55)",
    "Xtr": "«{'attr1': {8: 9.1, 2: 6.1, 18: 3.1, 7: 8.3, 17: 2.2, 4: 3.1, 19: 1.1, 5: 4.7, 12: 2.1, 16: 1.1, 3: 1.1, 1: 1.1, 11: 0.1}, 'attr2': {8: 7.2, 2: 3.6, 18: 2.5, 7: 2.9, 17: 8.5, 4: 2.5, 19: 8.5, 5: 4.9, 12: 0.1, 16: 3.2, 3: 3.5, 1: 2.5, 11: 4.3}}»",
    "Xts": "«{'attr1': {13: 0.1, 6: 9.1, 9: 2.5, 10: 7.1, 0: 5.1, 14: 5.1, 15: 31.1}, 'attr2': {13: 4.0, 6: 3.5, 9: 4.5, 10: 6.6, 0: 6.4, 14: 4.5, 15: 4.7}}»",
    "ytr": "«[0 0 0 1 1 0 1 1 0 0 1 1 1]»",
    "yts": "«[1 0 1 0 0 0 1]»",
    "X": "«{'attr1': {0: 5.1, 1: 1.1, 2: 6.1, 3: 1.1, 4: 3.1, 5: 4.7, 6: 9.1, 7: 8.3, 8: 9.1, 9: 2.5, 10: 7.1, 11: 0.1, 12: 2.1, 13: 0.1, 14: 5.1, 15: 31.1, 16: 1.1, 17: 2.2, 18: 3.1, 19: 1.1}, 'attr2': {0: 6.4, 1: 2.5, 2: 3.6, 3: 3.5, 4: 2.5, 5: 4.9, 6: 3.5, 7: 2.9, 8: 7.2, 9: 4.5, 10: 6.6, 11: 4.3, 12: 0.1, 13: 4.0, 14: 4.5, 15: 4.7, 16: 3.2, 17: 8.5, 18: 2.5, 19: 8.5}}»",
    "_id": ".b0brk0dY77cwYstNl.xylIBj6XNZCaHtQ3mrkEb",
    "_ids": {
        "X2": "YpDLE6GhfJlNqpJswV570WkjIRtDL4lEzPzH7kzE",
        "y": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "_history": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "z": "SlXbYEPNdc.MAa05goCiqlFoB36-QXFsRw.A3wAU",
        "model": "srsfhoJE3tDX1dG2.WfSrHQuQkUFyrXlhOouWxUp",
        "Xtr": "XfsV-At5YI8NOBUjjJ-0vEa.qJLAX0I5JQ7SxahM",
        "Xts": "CRh.K81gKNvdsngaWupSWn9JwTpU1O-FiM6QfraE",
        "ytr": "ZzQ4QJL8HWGdMxK5QZl2XLDxq2rVjr4XHPILqMUI",
        "yts": "DojEKdt2iJ0wrptdFUnxUqPGet097oXIBIrZpihN",
        "X": "md_cb8b0c76becc1de32236764e91e8e457e826f (content: 34_1738c83af436029507def2710bc5125f58d0e)"
    }
}
"""
# The same workflow will not be processed again if the same cache is used.
d = (
        idict.fromtoy()
        >> split
        >> let(fit, algorithm=RF, config={"n_estimators": 55}, Xin="Xtr", yin="ytr")
        >> let(predict, Xin="Xts")
        >> (lambda X: {"X2": X * X})
        >> (lambda _, X2, y: print("Some logging/printing that doesn't affect data...", X2.head()))
        >> [cache]
)
d.show()
"""
{
    "X2": "→(↑ X2→(X) y _)",
    "y": "→(↑ X2→(X) y _)",
    "z": "→(↑ input Xin yout version Xts→(input output config X y) model→(algorithm config Xin yin output version Xtr→(input output config X y) ytr→(input output config X y)))",
    "_history": "idict--------------sklearn-1.0.1---split idict-----------------------wrapper--fit idict-------------------wrapper--predict",
    "model": "→(↑ algorithm config Xin yin output version Xtr→(input output config X y) ytr→(input output config X y))",
    "Xtr": "→(↑ input output config X y)",
    "Xts": "→(↑ input output config X y)",
    "ytr": "→(↑ input output config X y)",
    "yts": "→(↑ input output config X y)",
    "X": "«{'attr1': {0: 5.1, 1: 1.1, 2: 6.1, 3: 1.1, 4: 3.1, 5: 4.7, 6: 9.1, 7: 8.3, 8: 9.1, 9: 2.5, 10: 7.1, 11: 0.1, 12: 2.1, 13: 0.1, 14: 5.1, 15: 31.1, 16: 1.1, 17: 2.2, 18: 3.1, 19: 1.1}, 'attr2': {0: 6.4, 1: 2.5, 2: 3.6, 3: 3.5, 4: 2.5, 5: 4.9, 6: 3.5, 7: 2.9, 8: 7.2, 9: 4.5, 10: 6.6, 11: 4.3, 12: 0.1, 13: 4.0, 14: 4.5, 15: 4.7, 16: 3.2, 17: 8.5, 18: 2.5, 19: 8.5}}»",
    "_id": "Fqk6Qn9Q.76F61PxrTQCK1XmlpaZzsP-5Qmb4v-j",
    "_ids": {
        "X2": "AjhVKi0UOnILU1Xy6FgIN5L4t8ivnWZXbLSwMuVM",
        "y": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "z": "SlXbYEPNdc.MAa05goCiqlFoB36-QXFsRw.A3wAU",
        "_history": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "model": "srsfhoJE3tDX1dG2.WfSrHQuQkUFyrXlhOouWxUp",
        "Xtr": "XfsV-At5YI8NOBUjjJ-0vEa.qJLAX0I5JQ7SxahM",
        "Xts": "CRh.K81gKNvdsngaWupSWn9JwTpU1O-FiM6QfraE",
        "ytr": "ZzQ4QJL8HWGdMxK5QZl2XLDxq2rVjr4XHPILqMUI",
        "yts": "DojEKdt2iJ0wrptdFUnxUqPGet097oXIBIrZpihN",
        "X": "md_cb8b0c76becc1de32236764e91e8e457e826f (content: 34_1738c83af436029507def2710bc5125f58d0e)"
    }
}
"""
cache.send(d)

d = cache.get(d.id)
d.show()
"""
{
    "X2": "→(↑)",
    "y": "→(↑)",
    "z": "→(↑)",
    "_history": "→(↑)",
    "model": "→(↑)",
    "Xtr": "→(↑)",
    "Xts": "→(↑)",
    "ytr": "→(↑)",
    "yts": "→(↑)",
    "X": "→(↑)",
    "_id": "Fqk6Qn9Q.76F61PxrTQCK1XmlpaZzsP-5Qmb4v-j",
    "_ids": {
        "X2": "AjhVKi0UOnILU1Xy6FgIN5L4t8ivnWZXbLSwMuVM",
        "y": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "z": "SlXbYEPNdc.MAa05goCiqlFoB36-QXFsRw.A3wAU",
        "_history": "FbwPhhohM9oJ2RiZe6NOVCGxpc5Z-6jYgymCTa1J",
        "model": "srsfhoJE3tDX1dG2.WfSrHQuQkUFyrXlhOouWxUp",
        "Xtr": "XfsV-At5YI8NOBUjjJ-0vEa.qJLAX0I5JQ7SxahM",
        "Xts": "CRh.K81gKNvdsngaWupSWn9JwTpU1O-FiM6QfraE",
        "ytr": "ZzQ4QJL8HWGdMxK5QZl2XLDxq2rVjr4XHPILqMUI",
        "yts": "DojEKdt2iJ0wrptdFUnxUqPGet097oXIBIrZpihN",
        "X": "md_cb8b0c76becc1de32236764e91e8e457e826f (content: 34_1738c83af436029507def2710bc5125f58d0e)"
    }
}
"""

More info

Aside from the papers on identification and on similarity (not ready yet), the PyPI package and GitHub repository,

A lower level perspective is provided in the API documentation.

Grants

This work was supported by Fapesp under supervision of Prof. André C. P. L. F. de Carvalho at CEPID-CeMEAI (Grants 2013/07375-0 – 2019/01735-0).

.>>>>>>>>> outros <<<<<<<<<<<.

Project details


Download files

Download the file for your platform. If you're not sure which to choose, learn more about installing packages.

Source Distribution

oka-0.211211.2.tar.gz (39.2 kB view details)

Uploaded Source

Built Distribution

oka-0.211211.2-py3-none-any.whl (25.1 kB view details)

Uploaded Python 3

File details

Details for the file oka-0.211211.2.tar.gz.

File metadata

  • Download URL: oka-0.211211.2.tar.gz
  • Upload date:
  • Size: 39.2 kB
  • Tags: Source
  • Uploaded using Trusted Publishing? No
  • Uploaded via: poetry/1.1.7 CPython/3.8.10 Linux/5.4.0-89-generic

File hashes

Hashes for oka-0.211211.2.tar.gz
Algorithm Hash digest
SHA256 9acd55366fef2558dcbf11c0a6f3b68d0080c3b009cfbddfe4875d9334f7a0ed
MD5 4826cae3b767987e8d5138972c0cdfd7
BLAKE2b-256 99b5aae66577b9808424ad89a71d97bf3da8986b53007c2bf8519423da3a0fab

See more details on using hashes here.

File details

Details for the file oka-0.211211.2-py3-none-any.whl.

File metadata

  • Download URL: oka-0.211211.2-py3-none-any.whl
  • Upload date:
  • Size: 25.1 kB
  • Tags: Python 3
  • Uploaded using Trusted Publishing? No
  • Uploaded via: poetry/1.1.7 CPython/3.8.10 Linux/5.4.0-89-generic

File hashes

Hashes for oka-0.211211.2-py3-none-any.whl
Algorithm Hash digest
SHA256 65adaca3af67ae6f04847918891c89c904f4657aa324b5ceaccda16ebc7b2442
MD5 9e4cbf6d562a2484f3c32c3bcab3d976
BLAKE2b-256 9e227cabc01e1c26f617a500b77e5be5612200de9d91e7a9f981429a5fda8be1

See more details on using hashes here.

Supported by

AWS AWS Cloud computing and Security Sponsor Datadog Datadog Monitoring Fastly Fastly CDN Google Google Download Analytics Microsoft Microsoft PSF Sponsor Pingdom Pingdom Monitoring Sentry Sentry Error logging StatusPage StatusPage Status page