llama-index llms bedrock converse integration
Project description
LlamaIndex Llms Integration: Bedrock Converse
Installation
%pip install llama-index-llms-bedrock-converse
!pip install llama-index
Usage
from llama_index.llms.bedrock_converse import BedrockConverse
# Set your AWS profile name
profile_name = "Your aws profile name"
# Simple completion call
resp = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
profile_name=profile_name,
).complete("Paul Graham is ")
print(resp)
Call chat with a list of messages
from llama_index.core.llms import ChatMessage
from llama_index.llms.bedrock_converse import BedrockConverse
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
profile_name=profile_name,
).chat(messages)
print(resp)
Streaming
# Using stream_complete endpoint
from llama_index.llms.bedrock_converse import BedrockConverse
llm = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
profile_name=profile_name,
)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
# Using stream_chat endpoint
from llama_index.llms.bedrock_converse import BedrockConverse
llm = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
profile_name=profile_name,
)
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
Configure Model
from llama_index.llms.bedrock_converse import BedrockConverse
llm = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
profile_name=profile_name,
)
resp = llm.complete("Paul Graham is ")
print(resp)
Connect to Bedrock with Access Keys
from llama_index.llms.bedrock_converse import BedrockConverse
llm = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
aws_access_key_id="AWS Access Key ID to use",
aws_secret_access_key="AWS Secret Access Key to use",
aws_session_token="AWS Session Token to use",
region_name="AWS Region to use, eg. us-east-1",
)
resp = llm.complete("Paul Graham is ")
print(resp)
Function Calling
# Claude, Command, and Mistral Large models support native function calling through AWS Bedrock Converse.
# There is seamless integration with LlamaIndex tools through the predict_and_call function on the LLM.
from llama_index.llms.bedrock_converse import BedrockConverse
from llama_index.core.tools import FunctionTool
# Define some functions
def multiply(a: int, b: int) -> int:
"""Multiply two integers and return the result"""
return a * b
def mystery(a: int, b: int) -> int:
"""Mystery function on two integers."""
return a * b + a + b
# Create tools from functions
mystery_tool = FunctionTool.from_defaults(fn=mystery)
multiply_tool = FunctionTool.from_defaults(fn=multiply)
# Instantiate the BedrockConverse model
llm = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
profile_name=profile_name,
)
# Use function tools with the LLM
response = llm.predict_and_call(
[mystery_tool, multiply_tool],
user_msg="What happens if I run the mystery function on 5 and 7",
)
print(str(response))
response = llm.predict_and_call(
[mystery_tool, multiply_tool],
user_msg=(
"""What happens if I run the mystery function on the following pairs of numbers?
Generate a separate result for each row:
- 1 and 2
- 8 and 4
- 100 and 20
NOTE: you need to run the mystery function for all of the pairs above at the same time"""
),
allow_parallel_tool_calls=True,
)
print(str(response))
for s in response.sources:
print(f"Name: {s.tool_name}, Input: {s.raw_input}, Output: {str(s)}")
Async usage
from llama_index.llms.bedrock_converse import BedrockConverse
llm = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
aws_access_key_id="AWS Access Key ID to use",
aws_secret_access_key="AWS Secret Access Key to use",
aws_session_token="AWS Session Token to use",
region_name="AWS Region to use, eg. us-east-1",
)
# Use async complete
resp = await llm.acomplete("Paul Graham is ")
print(resp)
LLM Implementation example
https://docs.llamaindex.ai/en/stable/examples/llm/bedrock_converse/
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
Close
Hashes for llama_index_llms_bedrock_converse-0.3.2.tar.gz
Algorithm | Hash digest | |
---|---|---|
SHA256 | 334ea302fe19b48b596ba42a90581278f49b7767974cc3cde5312f39297655b8 |
|
MD5 | ad4bafb0b4e37a31df67b85d78b70309 |
|
BLAKE2b-256 | 7a662d8978dcb3cef2a348566aed415f67e32f000c2b68b5cc3434d00b6770c4 |
Close
Hashes for llama_index_llms_bedrock_converse-0.3.2-py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | 0d896ea36b606721fba8da77729a7d000649947801e5891d7a39483535c38dab |
|
MD5 | 3e68e290122bf7ae6de907dc87db7909 |
|
BLAKE2b-256 | a7a7c9d734423f895029d39b9f72a0826afa11b7cbebf81e16630bdeaa32da72 |