A Python library for building AI-powered applications.
Project description
Spice
Usage Examples
All examples can be found in scripts/run.py
from spice import Spice
client = Spice()
messages: List[SpiceMessage] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "list 5 random words"},
]
response = await client.get_response(messages=messages, model="gpt-4-0125-preview")
print(response.text)
Streaming
# You can set a default model for the client instead of passing it with each call
client = Spice(default_text_model="claude-3-opus-20240229")
messages: List[SpiceMessage] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "list 5 random words"},
]
stream = await client.stream_response(messages=messages)
async for text in stream:
print(text, end="", flush=True)
# Retrieve the complete response from the stream
response = await stream.complete_response()
# Response always includes the final text, no need build it from the stream yourself
print(response.text)
# Response also includes helpful stats
print(f"Took {response.total_time:.2f}s")
print(f"Input/Output tokens: {response.input_tokens}/{response.output_tokens}")
Mixing Providers
# Commonly used models and providers have premade constants
from spice.models import GPT_4_0125_PREVIEW
# Alias models for easy configuration, even mixing providers
model_aliases = {
"task1_model": GPT_4_0125_PREVIEW,
"task2_model": "claude-3-opus-20240229",
"task3_model": "claude-3-haiku-20240307",
}
client = Spice(model_aliases=model_aliases)
messages: List[SpiceMessage] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "list 5 random words"},
]
responses = await asyncio.gather(
client.get_response(messages=messages, model="task1_model"),
client.get_response(messages=messages, model="task2_model"),
client.get_response(messages=messages, model="task3_model"),
)
for i, response in enumerate(responses, 1):
print(f"\nModel {i} response:")
print(response.text)
print(f"Characters per second: {response.characters_per_second:.2f}")
Using unknown models
client = Spice()
messages: List[SpiceMessage] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "list 5 random words"},
]
# To use Azure, specify the provider and the deployment model name
response = await client.get_response(messages=messages, model="first-gpt35", provider="azure")
print(response.text)
# Alternatively, to make a model and it's provider known to Spice, create a custom Model object
from spice.models import TextModel
from spice.providers import AZURE
AZURE_GPT = TextModel("first-gpt35", AZURE, context_length=16385)
response = await client.get_response(messages=messages, model=AZURE_GPT)
print(response.text)
# Creating the model automatically registers it in Spice's model list, so listing the provider is no longer needed
response = await client.get_response(messages=messages, model="first-gpt35")
print(response.text)
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
spiceai-0.1.8.tar.gz
(22.8 kB
view hashes)
Built Distribution
Close
Hashes for spiceai-0.1.8-py2.py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | ca20d27e34805aeb6d3f4a9c28e09cd51e1c03d2281f0faea59c0801d85bf4bf |
|
MD5 | 50042b4067a14ff0e4e0d8551308ac13 |
|
BLAKE2b-256 | fc45f3b5ba25367cfbe48d767f898a0ea7024b8b20abe504120c3df2d0f2e9f8 |