A DQ package
Project description
Requirements
pip install -r requirements.txt
Env
.env File
API_KEY=xyz
API_URL=xyz
Or Export variables
export API_KEY=xyz
export API_URL=xyz
Database User
GRANT USAGE on schema "validation" to anon;
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA "validation" TO anon;
GRANT ALL ON SEQUENCE validation.rule_output_rule_output_id_seq TO anon;
GRANT ALL ON SEQUENCE validation. connections_connection_id_seq TO anon;
GRANT ALL ON SEQUENCE validation.owlcheck_q_job_id_seq TO anon;
GRANT ALL ON SEQUENCE validation.job_log_log_id_seq TO anon;
GRANT ALL ON SEQUENCE validation.assignment_q_id_seq TO anon;
Examples
"""
# Invididual Usage Examples
#Client instantiation
api = APIClient(api_client)
engine = duckdb.connect(':memory:')
engine.sql(" select * from read_csv_auto('./data/fake_customers.csv') limit 10").show()
engine.close()
# owl_check_history, Delete, Insert, Read
api.delete_owl_check_history("test")
api.insert_owl_check_history("test", "2024-09-16")
rs = api.get_owl_check_history("test")
print(rs)
# owl_catalog, Delete, Insert, Read
api.delete_owl_catalog("test")
api.insert_owl_catalog("test")
rs = api.get_owl_catalog("test")
print(rs)
# dataset_schema, Delete, Insert, Read
api.delete_dataset_schema("test")
api.insert_dataset_schema("test")
rs = api.get_dataset_schema("test")
print(rs)
# dataset_field, Delete, Insert, Read
api.delete_dataset_field("test", "2024-09-16")
api.insert_dataset_field("test", "2024-09-16")
rs = api.get_dataset_field("test", "2024-09-16")
print(rs)
# Print the result
df = pd.DataFrame(rs.data)
print(df[['dataset','run_id','rc']])
# run rules
api.delete_rule_output("test", "2024-09-16")
api.run_rules("test", "2024-09-16")
# scoring
rule_output = api.get_rule_output("test", "2024-09-16")
rule_score = 0
for r in rule_output.data:
rule_score += r['score']
print(rule_score)
# dataset_scan, Delete, Insert, Read
delete_record = api.delete_dataset_scan("test", "2024-09-16")
add_record = api.insert_dataset_scan("test", "2024-09-16", 100, 100 - rule_score)
Register
dataset = 'test'
# opt_spark
api.delete_opt_spark(dataset)
api.insert_opt_spark(dataset)
rs = api.get_opt_spark(dataset)
print(rs)
df = pd.DataFrame(rs.data)
display(df)
# opt_pushdown
api.delete_opt_pushdown(dataset)
api.insert_opt_pushdown(dataset)
rs = api.get_opt_pushdown(dataset)
print(rs)
df = pd.DataFrame(rs.data)
display(df)
# opt_profile
api.delete_opt_profile(dataset)
api.insert_opt_profile(dataset)
rs = api.get_opt_profile(dataset)
print(rs)
df = pd.DataFrame(rs.data)
display(df)
# opt_load
api.delete_opt_load(dataset)
api.insert_opt_load(dataset)
rs = api.get_opt_load(dataset)
print(rs)
df = pd.DataFrame(rs.data)
display(df)
# opt_profile
api.delete_opt_profile(dataset)
api.insert_opt_profile(dataset)
rs = api.get_opt_profile(dataset)
print(rs)
df = pd.DataFrame(rs.data)
display(df)
# opt_env
api.delete_opt_env(dataset)
api.insert_opt_env(dataset)
rs = api.get_opt_env(dataset)
print(rs)
df = pd.DataFrame(rs.data)
display(df)
# opt_owl
api.delete_opt_owl(dataset)
api.insert_opt_owl(dataset)
rs = api.get_opt_owl(dataset)
print(rs)
df = pd.DataFrame(rs.data)
display(df)
Job
dataset = 'test'
run_id = '2024-09-20'
conn.sql(f"create table if not exists {dataset} as select * from read_csv_auto('./data/fake_customers.csv') ")
# owl_check_history,
# Delete, Insert, Read
api.delete_owl_check_history(dataset)
api.insert_owl_check_history(dataset, run_id)
rs = api.get_owl_check_history(dataset)
print(rs)
# owl_catalog,
# Delete, Insert, Read
api.delete_owl_catalog(dataset)
api.insert_owl_catalog(dataset)
rs = api.get_owl_catalog(dataset)
print(rs)
# dataset_schema,
# Delete, Insert, Read
api.delete_dataset_schema(dataset)
api.insert_dataset_schema(dataset)
rs = api.get_dataset_schema(dataset)
print(rs)
# dataset_field,
# Delete, Insert, Read
api.delete_dataset_field(dataset, run_id)
api.insert_dataset_field(dataset, run_id)
rs = api.get_dataset_field(dataset, run_id)
print(rs)
# run rules
api.delete_rule_output(dataset, run_id)
api.run_rules(dataset, run_id)
# scoring
rule_output = api.get_rule_output(dataset, run_id)
print(rule_output.data)
rule_score = 0
for r in rule_output.data:
rule_score += r['score']
print(str(rule_score))
# dataset_scan,
# Delete, Insert, Read
delete_record = api.delete_dataset_scan(dataset, run_id)
add_record = api.insert_dataset_scan(dataset, run_id, 100, 100 - rule_score)
rs = api.get_dataset_scan(dataset, run_id)
print(rs.data)
Project details
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
duckdq-0.0.3.tar.gz
(2.6 kB
view details)
Built Distribution
Filter files by name, interpreter, ABI, and platform.
If you're not sure about the file name format, learn more about wheel file names.
Copy a direct link to the current filters
File details
Details for the file duckdq-0.0.3.tar.gz.
File metadata
- Download URL: duckdq-0.0.3.tar.gz
- Upload date:
- Size: 2.6 kB
- Tags: Source
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/5.1.1 CPython/3.12.4
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
5b31fdfb05cdec8399d17fae82879fe1342941488739cad247a6a920a1912061
|
|
| MD5 |
691bf0ff309a5065e6e11797e153c792
|
|
| BLAKE2b-256 |
5eeb1a1f0c1e0a8a148c738f1a8cf99701ee975b1f97809aefe60d8fb924af04
|
File details
Details for the file duckdq-0.0.3-py3-none-any.whl.
File metadata
- Download URL: duckdq-0.0.3-py3-none-any.whl
- Upload date:
- Size: 2.2 kB
- Tags: Python 3
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/5.1.1 CPython/3.12.4
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
8a3dc60333c89443a46694c620316a7b197ea046e88101ced17219864e14832c
|
|
| MD5 |
133601b02b39769acb9d7a984109c014
|
|
| BLAKE2b-256 |
ebc31612cc070f95349418f63c87459e638339fe26c1ac2ffdbeee0a93f7ba9c
|