Composable function transformations for LLM programs
Project description
pip install autoform
Example
Batched semantic gradients: feedback on N outputs → feedback on N inputs.
import autoform as af
def answer(query):
return af.lm_call(f"Answer: {query}", model="gpt-4o")
ir = af.build_ir(answer)("...") # trace
# 3 queries + 3 critiques -> 3 answers + 3 improvement hints
queries = ["What is AI?", "Explain DNS", "Define recursion"]
critiques = ["too technical", "too long", "perfect"]
batched_pb = af.batch(af.pullback(ir), in_axes=(True, True)) # compose
answers, hints = af.call(batched_pb)((queries, critiques))
Trace once. Batch it. Differentiate it. Compose them.
Why
LLM programs are hard to optimize:
- debugging: which agent caused the bad output?
- optimization: how do you improve prompts systematically?
- batching: how do you run N inputs without rewriting code?
autoform solves this with function transformations. Trace once, transform freely.
Full Example
Multi-agent pipeline with checkpoints, batching, gradients, and debugging:
import autoform as af
class Verdict(af.Struct):
decision: str
reasoning: str
def judge_debate(topic: str) -> Verdict:
"""Three agents debate, one judges."""
# agent 1: argue for
pro = af.format("Argue FOR: {}", topic)
pro = af.checkpoint(pro, key="pro", collection="debug")
msgs = [dict(role="user", content=pro)]
pro = af.lm_call(msgs, model="gpt-4o")
# agent 2: argue against
con = af.format("Argue AGAINST: {}", topic)
con = af.checkpoint(con, key="con", collection="debug")
msgs = [dict(role="user", content=con)]
con = af.lm_call(msgs, model="gpt-4o")
# agent 3: judge
prompt = af.format("PRO: {}\nCON: {}\nWho wins?", pro, con)
prompt = af.checkpoint(prompt, key="judge", collection="debug")
msgs = [dict(role="user", content=prompt)]
return af.struct_lm_call(msgs, model="gpt-4o", struct=Verdict)
ir = af.build_ir(judge_debate)("...") # trace (no execution)
# run once
verdict = af.call(ir)("pineapple on pizza")
# batch: parallel topics
batched = af.batch(ir, in_axes=True)
verdicts = af.call(batched)(["pineapple on pizza", "cats vs dogs", "tabs vs spaces"])
# gradients: feedback -> input improvement
pb_ir = af.pullback(ir)
verdict, grad = af.call(pb_ir)(("pineapple on pizza", Verdict(decision="biased", reasoning="pro was weak")))
# collect: capture checkpoint values
verdict, captured = af.collect(ir, collection="debug")("pineapple on pizza")
# inject: override checkpoint values
verdict = af.inject(ir, collection="debug", values=captured)("pineapple on pizza")
# explain how batches are placed
in_axes = (True, Verdict.model_construct(decision=True, reasoning=True))
# compose freely
batched_grads = af.batch(af.pullback(ir), in_axes=in_axes)
⚠️ early development: API may change.
Project details
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
Filter files by name, interpreter, ABI, and platform.
If you're not sure about the file name format, learn more about wheel file names.
Copy a direct link to the current filters
File details
Details for the file autoform-0.0.7.tar.gz.
File metadata
- Download URL: autoform-0.0.7.tar.gz
- Upload date:
- Size: 54.8 kB
- Tags: Source
- Uploaded using Trusted Publishing? Yes
- Uploaded via: uv/0.9.21 {"installer":{"name":"uv","version":"0.9.21","subcommand":["publish"]},"python":null,"implementation":{"name":null,"version":null},"distro":{"name":"Ubuntu","version":"24.04","id":"noble","libc":null},"system":{"name":null,"release":null},"cpu":null,"openssl_version":null,"setuptools_version":null,"rustc_version":null,"ci":true}
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
c6edab334212f7251a24f09655cc5983366e9545037f16119661ec0057d247b0
|
|
| MD5 |
78fe02b4187d663277526676c37e029b
|
|
| BLAKE2b-256 |
ea7bf48dbc7a0dcdc89a875edd38c60fe65a96661ee8d46516c5e4d2acee6076
|
File details
Details for the file autoform-0.0.7-py3-none-any.whl.
File metadata
- Download URL: autoform-0.0.7-py3-none-any.whl
- Upload date:
- Size: 34.8 kB
- Tags: Python 3
- Uploaded using Trusted Publishing? Yes
- Uploaded via: uv/0.9.21 {"installer":{"name":"uv","version":"0.9.21","subcommand":["publish"]},"python":null,"implementation":{"name":null,"version":null},"distro":{"name":"Ubuntu","version":"24.04","id":"noble","libc":null},"system":{"name":null,"release":null},"cpu":null,"openssl_version":null,"setuptools_version":null,"rustc_version":null,"ci":true}
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
f4a37d7b2dbe5127903d494e7b842d2e0d95f852d65fbddbb425cf7a7c0cf03b
|
|
| MD5 |
fce45471ecd15e14032acff7797ce5c4
|
|
| BLAKE2b-256 |
070424366e314dbe1dc96fbc07c1648280f58ba78fee442176d8ebab36306c12
|