Parea python sdk
Project description
parea-sdk
Installation
pip install -U parea-ai
or install with Poetry
poetry add parea-ai
Getting Started
import os
from dotenv import load_dotenv
from parea import Parea
from parea.schemas.models import Completion, UseDeployedPrompt, CompletionResponse, UseDeployedPromptResponse
load_dotenv()
p = Parea(api_key=os.getenv("PAREA_API_KEY"))
# You will find this deployment_id in the Parea dashboard
deployment_id = '<DEPLOYMENT_ID>'
# Assuming your deployed prompt's message is:
# {"role": "user", "content": "Write a hello world program using {{x}} and the {{y}} framework."}
inputs = {"x": "Golang", "y": "Fiber"}
# You can easily unpack a dictionary into an attrs class
test_completion = Completion(
**{
"deployment_id": deployment_id,
"llm_inputs": inputs,
"metadata": {"purpose": "testing"}
}
)
# By passing in my inputs, in addition to the raw message with unfilled variables {{x}} and {{y}},
# you we will also get the filled-in prompt:
# {"role": "user", "content": "Write a hello world program using Golang and the Fiber framework."}
test_get_prompt = UseDeployedPrompt(deployment_id=deployment_id, llm_inputs=inputs)
def main():
completion_response: CompletionResponse = p.completion(data=test_completion)
print(completion_response)
deployed_prompt: UseDeployedPromptResponse = p.get_prompt(data=test_get_prompt)
print("\n\n")
print(deployed_prompt)
async def main_async():
completion_response: CompletionResponse = await p.acompletion(data=test_completion)
print(completion_response)
deployed_prompt: UseDeployedPromptResponse = await p.aget_prompt(data=test_get_prompt)
print("\n\n")
print(deployed_prompt)
Logging results from LLM providers
import os
from dotenv import load_dotenv
import openai
from parea import Parea
from parea.schemas.models import LogRequest
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
p = Parea(api_key=os.getenv("PAREA_API_KEY"))
# define your OpenAI call as you would normally
x = "Golang"
y = "Fiber"
inputs = {"x": x, "y": y}
messages = [
{"role": "user", "content": f"Write a hello world program using {x} and the {y} framework."},
]
model = "gpt-3.5-turbo"
model_params = {
"temperature": 0.7,
"top_p": 1.0,
}
completion = openai.ChatCompletion.create(
model=model,
messages=messages,
**model_params
)
output = completion.choices[0].message['content']
# the LogRequest schema
log_request: LogRequest = LogRequest(
status="success",
name='Test Log',
llm_inputs={
"x": x,
"y": y,
},
llm_configuration={
'model': model,
'messages': messages,
'model_params': model_params,
},
output=output,
input_tokens=completion.usage['prompt_tokens'],
output_tokens=completion.usage['completion_tokens'],
total_tokens=completion.usage['total_tokens'],
)
def main():
p.log(data=log_request)
async def main_async():
await p.alog(data=log_request)
Open source community features
Ready-to-use Pull Requests templates and several Issue templates.
- Files such as:
LICENSE
,CONTRIBUTING.md
,CODE_OF_CONDUCT.md
, andSECURITY.md
are generated automatically. - Semantic Versions specification
with
Release Drafter
.
🛡 License
This project is licensed under the terms of the Apache Software License 2.0
license.
See LICENSE for more details.
📃 Citation
@misc{parea-sdk,
author = {joel-parea-ai},
title = {Parea python sdk},
year = {2023},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/parea-ai/parea-sdk}}
}
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
parea_ai-0.2.0.tar.gz
(675.7 kB
view hashes)
Built Distribution
parea_ai-0.2.0-py3-none-any.whl
(674.6 kB
view hashes)