Python client for the Arctic Shift API.
Project description
Arctic Shift API Client Reference
This page gives practical examples for every public function in arcshiftwrap/arctic_shift.py.
- API Base:
https://arctic-shift.photon-reddit.com - Install:
pip install arcshiftwrap
Setup
from datetime import datetime, timezone
from arcshiftwrap import (
ArcticShiftClient,
collect_posts_by_subreddits_parallel,
collect_comments_by_subreddits_parallel,
)
from arcshiftwrap.arctic_shift import (
collect_comments_by_windows,
collect_posts_by_windows,
deduplicate_items,
format_date,
normalize_response,
split_time_range,
utc_now,
)
client = ArcticShiftClient(timeout=90, max_retries=4, backoff_factor=2.0)
request
Low-level method to send a GET request to the Arctic Shift API.
response = client.request(
endpoint="posts/search",
params={"subreddit": "technology", "limit": 10}
)
ID Lookup Methods
get_posts_by_ids
posts = client.get_posts_by_ids(
ids=["1abcde", "1fghij"],
fields=["id", "subreddit", "title", "created_utc"],
)
get_comments_by_ids
comments = client.get_comments_by_ids(
ids=["k12345", "k67890"],
fields=["id", "subreddit", "body", "created_utc"],
)
get_subreddits_by_ids
subreddits = client.get_subreddits_by_ids(
ids=["technology", "MachineLearning"],
fields=["display_name", "subscribers", "over18"],
)
get_users_by_ids
users = client.get_users_by_ids(
ids=["spez", "AutoModerator"],
fields=["author", "total_karma", "created_utc"],
)
Search Methods
search_posts
posts = client.search_posts(
subreddit="technology",
after="2026-04-01",
before="2026-04-08",
limit=100,
sort="asc",
query="llm",
fields=["id", "title", "selftext", "score", "num_comments"],
)
search_comments
comments = client.search_comments(
subreddit="technology",
after="2026-04-01",
before="2026-04-08",
limit=100,
sort="asc",
body="chatgpt",
fields=["id", "body", "score", "link_id", "parent_id"],
)
get_comment_tree
tree = client.get_comment_tree(
link_id="t3_1abcde",
limit=500,
start_breadth=4,
start_depth=4,
)
Aggregation Methods
aggregate_posts
post_agg = client.aggregate_posts(
aggregate="subreddit",
frequency="day",
after="2026-04-01",
before="2026-04-08",
min_count=5,
)
aggregate_comments
comment_agg = client.aggregate_comments(
aggregate="author",
frequency="day",
subreddit="technology",
after="2026-04-01",
before="2026-04-08",
)
Subreddit Methods
search_subreddits
subs = client.search_subreddits(
subreddit_prefix="tech",
min_subscribers=50000,
limit=25,
sort="desc",
sort_type="subscribers",
fields=["display_name", "subscribers", "over18"],
)
get_subreddit_rules
rules = client.get_subreddit_rules(subreddits=["technology", "MachineLearning"])
get_subreddit_wikis
wikis = client.get_subreddit_wikis(
subreddit="technology",
paths=["index", "faq"],
limit=10,
)
list_subreddit_wikis
wiki_paths = client.list_subreddit_wikis(subreddit="technology")
User Methods
search_users
users = client.search_users(
author_prefix="sam",
min_karma=1000,
limit=25,
sort="desc",
sort_type="total_karma",
)
user_user_interactions
interactions = client.user_user_interactions(
author="spez",
subreddit="technology",
after="2026-04-01",
before="2026-04-08",
min_count=2,
limit=100,
)
user_subreddit_interactions
subreddit_interactions = client.user_subreddit_interactions(
author="spez",
weight_posts=1.0,
weight_comments=1.0,
after="2026-04-01",
before="2026-04-08",
min_count=1,
limit=100,
)
aggregate_flairs
flairs = client.aggregate_flairs(author="spez")
Utility Endpoints
resolve_short_links
resolved = client.resolve_short_links(paths=["3g1jfiw", "3h2kxyz"])
time_series
series = client.time_series(
key="posts",
precision="day",
after="2026-04-01",
before="2026-04-08",
)
Helper Functions
utc_now
now_utc = utc_now()
format_date
dt_str = format_date(datetime(2026, 4, 1, 12, 0, 0, tzinfo=timezone.utc))
# "2026-04-01T12:00:00Z"
split_time_range
start = datetime(2026, 4, 1, tzinfo=timezone.utc)
end = datetime(2026, 4, 3, tzinfo=timezone.utc)
windows = split_time_range(start=start, end=end, step_hours=12)
normalize_response
items = normalize_response({"data": [{"id": "1"}, {"id": "2"}]})
deduplicate_items
deduped = deduplicate_items(
items=[{"id": "a"}, {"id": "a"}, {"id": "b"}],
id_field="id",
)
collect_posts_by_windows
start = datetime(2026, 4, 1, tzinfo=timezone.utc)
end = datetime(2026, 4, 8, tzinfo=timezone.utc)
posts = collect_posts_by_windows(
client=client,
subreddit="technology",
start=start,
end=end,
step_hours=24,
limit=100,
fields=["id", "title", "created_utc"],
)
collect_comments_by_windows
start = datetime(2026, 4, 1, tzinfo=timezone.utc)
end = datetime(2026, 4, 8, tzinfo=timezone.utc)
comments = collect_comments_by_windows(
client=client,
subreddit="technology",
start=start,
end=end,
step_hours=24,
limit=100,
fields=["id", "body", "created_utc"],
)
collect_posts_by_subreddits_parallel
Collect posts from multiple subreddits concurrently using ProcessPoolExecutor. Significantly faster than sequential collection.
from arcshiftwrap import collect_posts_by_subreddits_parallel
start = datetime(2026, 4, 1, tzinfo=timezone.utc)
end = datetime(2026, 4, 8, tzinfo=timezone.utc)
subreddits = ["technology", "MachineLearning", "chatgpt", "OpenAI"]
posts = collect_posts_by_subreddits_parallel(
client=client,
subreddits=subreddits,
start=start,
end=end,
step_hours=24,
limit=100,
max_workers=4, # number of parallel processes
fields=["id", "title", "subreddit", "created_utc"],
)
collect_comments_by_subreddits_parallel
Collect comments from multiple subreddits concurrently using ProcessPoolExecutor. Significantly faster than sequential collection.
from arcshiftwrap import collect_comments_by_subreddits_parallel
start = datetime(2026, 4, 1, tzinfo=timezone.utc)
end = datetime(2026, 4, 8, tzinfo=timezone.utc)
subreddits = ["technology", "MachineLearning", "chatgpt", "OpenAI"]
comments = collect_comments_by_subreddits_parallel(
client=client,
subreddits=subreddits,
start=start,
end=end,
step_hours=24,
limit=100,
max_workers=4, # number of parallel processes
fields=["id", "body", "subreddit", "created_utc"],
)
Notes
- These examples reflect the current public API in
arcshiftwrap/arctic_shift.py. - Internal helper methods like
_join,_bool,_clean,_parse_response, and_rate_limit_waitare intentionally not used directly.
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
Filter files by name, interpreter, ABI, and platform.
If you're not sure about the file name format, learn more about wheel file names.
Copy a direct link to the current filters
File details
Details for the file arcshiftwrap-0.4.1.tar.gz.
File metadata
- Download URL: arcshiftwrap-0.4.1.tar.gz
- Upload date:
- Size: 11.0 kB
- Tags: Source
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/6.1.0 CPython/3.13.12
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
c46850d46371b94ba6da389e8b45f6388630d2c315dad5dc1e24410cdbe85287
|
|
| MD5 |
b90627ef4f7047778eb81e52f0db5509
|
|
| BLAKE2b-256 |
de927f47f3fdbd0b918f5bf22668b3a0fd01b4f8db7582668ec6050529132bae
|
File details
Details for the file arcshiftwrap-0.4.1-py3-none-any.whl.
File metadata
- Download URL: arcshiftwrap-0.4.1-py3-none-any.whl
- Upload date:
- Size: 9.1 kB
- Tags: Python 3
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/6.1.0 CPython/3.13.12
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
6deafdf767cd03f60ba4d23d78e8c2f3af14aac7c0109604513f97f7c2cd6ed8
|
|
| MD5 |
341c669f8904a65b7b2b9d54627d745c
|
|
| BLAKE2b-256 |
78ec9216e67aaf0cffcce1c3cfa28181f8727075cc598344a83302733b0cdb17
|