A collection of utilities by Prompt Engineers
Project description
Prompt Engineers AI Open Source Package
Build and Publish
## Build Package
bash scripts/build.sh
## Publish Package to PyPi
bash scripts/publish.sh
Development
## In the application directory start your virtual env (this would be the workspace
## where your API server that you would like to install the model)
source .venv/bin/activate
## Then change directory to where your package is, make changes and run the following.
pip install .
## Switch back to the directory of your where your workspace is for you app server.
cd <path>/<app>/<server>
pip install -r requirements.txt
## Make sure your app server has the packages shown in setup.py and run your server...
How to use...
User Respository
A User Repository can be added to any application and then extended and then passed to llm-server to fetch user default project configurables via the environment variables or for multi-tenanted user application. This is allows for new variables when adding additional tools.
Retrieval Augemented Generation (RAG) - Stream Chat
import os
import asyncio
from promptengineers.chat import langchain_stream_vectorstore_chat
from promptengineers.retrieval.factories import EmbeddingFactory, RetrievalFactory
from promptengineers.retrieval.strategies import VectorstoreContext
# Environment Variables
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
PINECONE_KEY = os.environ.get('PINECONE_API_KEY')
PINECONE_ENV = os.environ.get('PINECONE_ENV')
PINECONE_INDEX = os.environ.get('PINECONE_INDEX')
# Retrieval Constants
INDEX_PROVIDER = 'pinecone'
INDEX_NAME = 'default'
NAMESPACE = '63f0962f9a09c84c98ab6caf::formio'
EMBEDDING_LLM = 'text-embedding-ada-002'
# Chat Constants
CHAT_LLM = 'gpt-3.5-turbo'
TEMPERATURE = 0.9
MESSAGES = [
{
'role': 'system',
'content': 'You are a helpful document retrieval AI, '
'use the context to answer the user queries.'
},
{
'role': 'user',
'content': 'Can you summarize the context?'
}
]
# Generate Embeddings
embedding = EmbeddingFactory(EMBEDDING_LLM, OPENAI_API_KEY)
# Choose the appropriate vector search provider strategy for Pinecone
retrieval_provider = RetrievalFactory(
INDEX_PROVIDER,
embedding.create_embedding(),
{
'api_key': PINECONE_KEY,
'env': PINECONE_ENV,
'index_name': INDEX_NAME,
'namespace': NAMESPACE
}
)
# Create a vector store service context
vectostore_service = VectorstoreContext(retrieval_provider.create_strategy())
# Load the vectorstore using the service context
vectorstore = vectostore_service.load()
# Run the chat
async def main():
response = langchain_stream_vectorstore_chat(
messages=MESSAGES,
model=CHAT_LLM,
temperature=TEMPERATURE,
vectorstore=vectorstore,
openai_api_key=OPENAI_API_KEY
)
async for data in response:
print(data)
asyncio.run(main())
Agent Chat equipped w/ tools - Stream Chat
import os
import asyncio
from promptengineers.chat import langchain_stream_agent_chat
from promptengineers.core.config.tools import AVAILABLE_TOOLS
from promptengineers.llms.utils import gather_tools
from promptengineers.retrieval.factories import EmbeddingFactory, RetrievalFactory
from promptengineers.retrieval.strategies import VectorstoreContext
# Environment Variables
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
PINECONE_KEY = os.environ.get('PINECONE_API_KEY')
PINECONE_ENV = os.environ.get('PINECONE_ENV')
PINECONE_INDEX = os.environ.get('PINECONE_INDEX')
# Retrieval Constants
INDEX_PROVIDER = 'pinecone'
INDEX_NAME = 'default'
NAMESPACE = '63f0962f9a09c84c98ab6caf::formio'
EMBEDDING_LLM = 'text-embedding-ada-002'
# Chat Constants
CHAT_LLM = 'gpt-4-1106-preview'
TEMPERATURE = 0.1
MESSAGES = [
{
'role': 'system',
'content': 'You are a powerful AI assistant, you are equipped with tools '
'to help you accomplish your tasks. Query the context when you need '
'additional information to complete your task. If the user query is not '
'related to the context then you can use the tools complete the task.'
},
{
'role': 'user',
# 'content': 'What is 14125 compounded annually for 5 years at 4 percent for 23 years?' # Math Agent
'content': 'Can you provide a react code sample to render a form in Form.io?' # Retrieval Agent
}
]
# Generate Embeddings
embedding = EmbeddingFactory(EMBEDDING_LLM, OPENAI_API_KEY)
# Choose the appropriate vector search provider strategy for Pinecone
retrieval_provider = RetrievalFactory(
INDEX_PROVIDER,
embedding.create_embedding(),
{
'api_key': PINECONE_KEY,
'env': PINECONE_ENV,
'index_name': INDEX_NAME,
'namespace': NAMESPACE
}
)
# Create a vector store service context
vectostore_service = VectorstoreContext(retrieval_provider.create_strategy())
# Load the vectorstore using the service context
vectorstore = vectostore_service.load()
# Gather the tools
tools = gather_tools(tools=['math_tool'],
available_tools=AVAILABLE_TOOLS,
vectorstore=vectorstore,
plugins=[])
# Run the chat
async def main():
response = langchain_stream_agent_chat(
messages=MESSAGES,
model=CHAT_LLM,
tools=tools,
temperature=TEMPERATURE,
openai_api_key=OPENAI_API_KEY
)
async for data in response:
print(data)
asyncio.run(main())
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
promptengineers-0.0.3rc1.tar.gz
(45.2 kB
view hashes)
Built Distribution
Close
Hashes for promptengineers-0.0.3rc1-py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | 869df87f01e3a280977505375bbe78911aaef5a406c74e38b65618847b7a8e79 |
|
MD5 | 447210c290ba971ef5afe159a4193f68 |
|
BLAKE2b-256 | 91371ea69f1358c47c249230ef1b718d61371cc0e7ac7961ecea824223f9f92c |