Python context manager to query a SQLite file stored on S3
Project description
sqlite-s3-query
Python context manager to query a SQLite file stored on S3. It uses multiple HTTP range requests per query to avoid downloading the entire file, and so is suitable for large databases.
All queries using the same instance of the context will query the same version of the database object in S3. This means that a context is roughly equivalent to a REPEATABLE READ transaction, and queries should complete succesfully even if the database is replaced concurrently by another S3 client. Versioning must be enabled on the S3 bucket.
SQL statements that write to the database are not supported.
Inspired by phiresky's sql.js-httpvfs, and dacort's Stack Overflow answer.
Installation
pip install sqlite_s3_query
The libsqlite3 binary library is also required, but this is typically already installed on most systems.
Usage
from sqlite_s3_query import sqlite_s3_query
with sqlite_s3_query(url='https://my-bucket.s3.eu-west-2.amazonaws.com/my-db.sqlite') as query:
with query('SELECT * FROM my_table WHERE my_column = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
# Exactly the same results, even if the object in S3 was replaced
with query('SELECT * FROM my_table WHERE my_column = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
If in your project you query the same object from multiple places, functools.partial
can be used to make an interface with less duplication.
from functools import partial
from sqlite_s3_query import sqlite_s3_query
query_my_db = partial(sqlite_s3_query,
url='https://my-bucket.s3.eu-west-2.amazonaws.com/my-db.sqlite',
)
with \
query_my_db() as query, \
query('SELECT * FROM my_table WHERE my_col = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
with \
query_my_db() as query, \
query('SELECT * FROM my_table_2 WHERE my_col = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
Credentials
The AWS region and the credentials are taken from environment variables, but this can be changed using the get_credentials
parameter. Below shows the default implementation of this that can be overriden.
import os
def get_credentials(_):
return (
os.environ['AWS_REGION'],
os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'],
os.environ.get('AWS_SESSION_TOKEN'), # Only needed for temporary credentials
)
query_my_db = partial(sqlite_s3_query,
url='https://my-bucket.s3.eu-west-2.amazonaws.com/my-db.sqlite',
get_credentials=get_credentials,
)
with \
query_my_db() as query, \
query('SELECT * FROM my_table_2 WHERE my_col = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
How to use this to fetch credentials for the IAM role associated with an ECS container is shown in the example below.
import os
import httpx
def GetECSCredentials():
aws_access_key_id, aws_secret_access_key, aws_session_token = None, None, None
expiration = datetime.datetime.fromtimestamp(0)
aws_region = os.environ['AWS_REGION']
creds_path = os.environ['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI']
def get_credentials(now):
nonlocal aws_access_key_id, aws_secret_access_key, aws_session_token
nonlocal expiration
if now > expiration:
creds = httpx.get(f'http://169.254.170.2{creds_path}').json()
aws_access_key_id = creds['AccessKeyId']
aws_secret_access_key = creds['SecretAccessKey']
aws_session_token = creds['Token']
expiration = datetime.datetime.strptime(creds['Expiration'], '%Y-%m-%dT%H:%M:%SZ')
return aws_region, aws_access_key_id, aws_secret_access_key, aws_session_token
return get_credentials
query_my_db = partial(sqlite_s3_query,
url='https://my-bucket.s3.eu-west-2.amazonaws.com/my-db.sqlite',
get_credentials=GetECSCredentials(),
)
with \
query_my_db() as query, \
query('SELECT * FROM my_table_2 WHERE my_col = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
HTTP Client
The HTTP client can be changed by overriding the the default get_http_client
parameter, which is shown below.
from functools import partial
import httpx
from sqlite_s3_query import sqlite_s3_query
query_my_db = partial(sqlite_s3_query,
url='https://my-bucket.s3.eu-west-2.amazonaws.com/my-db.sqlite',
get_http_client=lambda: httpx.Client(),
)
with \
query_my_db() as query, \
query('SELECT * FROM my_table WHERE my_col = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
Location of libsqlite3
The location of the libsqlite3 library can be changed by overriding the get_libsqlite3
parameter.
from ctypes import cdll
from functools import partial
from sys import platform
from sqlite_s3_query import sqlite_s3_query
query_my_db = partial(sqlite_s3_query,
url='https://my-bucket.s3.eu-west-2.amazonaws.com/my-db.sqlite',
get_libsqlite3=lambda: cdll.LoadLibrary({'linux': 'libsqlite3.so.0', 'darwin': 'libsqlite3.dylib'}[platform])
)
with \
query_my_db() as query, \
query('SELECT * FROM my_table WHERE my_col = ?', params=('my-value',)) as (columns, rows):
for row in rows:
print(row)
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
Hashes for sqlite_s3_query-0.0.37-py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | 972814bf004518d4445c72cec8bf067a06652ec4e1aaafb22b61eff1e99c6ac6 |
|
MD5 | cdd036f0df572ebb65a0f0d2a082893f |
|
BLAKE2b-256 | a30419f2f41c97f029b4ae3b21fa2523afbf59e6bc0f2ee91b070d853b39313c |