Narrative AI SDK (v0.2.1)
🔑 LLM Engine (nai.llm)
generate()
| Description |
Inputs |
Outputs |
| Generates a complete text response based on a prompt. |
prompt (str), model (str), max_tokens (int) |
LLMResponse (Object) |
import narrative_ai as nai
import asyncio
async def main():
nai.llm.set_api_key("key", provider="openai")
res = await nai.llm.generate("Hello")
print(res.text)
asyncio.run(main())
generate_stream()
| Description |
Inputs |
Outputs |
| Streams text generation token-by-token for real-time apps. |
prompt (str), model (str) |
AsyncIterator[str] |
import narrative_ai as nai
import asyncio
async def main():
nai.llm.set_api_key("key", provider="openai")
async for chunk in nai.llm.generate_stream("Hi"):
print(chunk, end="", flush=True)
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Outputs |
| Configures the API key for a specific LLM provider. |
api_key (str), provider (str) |
None |
import narrative_ai as nai
nai.llm.set_api_key("sk-...", provider="openai")
set_llm_provider()
| Description |
Inputs |
Outputs |
| Switches the active LLM provider globally. |
provider (str) |
None |
import narrative_ai as nai
nai.llm.set_llm_provider("gemini")
set_service_url()
| Description |
Inputs |
Outputs |
| Sets a custom base URL for the LLM API. |
url (str) |
None |
import narrative_ai as nai
nai.llm.set_service_url("https://api.openai.com/v1")
get_engine()
| Description |
Inputs |
Outputs |
| Retrieves the underlying LLM engine instance. |
None |
LLMEngine |
import narrative_ai as nai
engine = nai.llm.get_engine()
LLMClient
| Description |
Inputs |
Outputs |
| Creates a stateful LLM client for session management. |
user_id (str), tenant_id (str) |
LLMClient |
import narrative_ai as nai
client = nai.llm.LLMClient(user_id="u123")
🎙️ STT Engine (nai.stt)
transcribe()
| Description |
Inputs |
Outputs |
| Converts an audio file into text. |
audio_path (str), language (str) |
STTResult |
import narrative_ai as nai
import asyncio
async def main():
nai.stt.set_api_key("key", provider="elevenlabs")
res = await nai.stt.transcribe("audio.mp3")
print(res.text)
asyncio.run(main())
stream_transcribe()
| Description |
Inputs |
Outputs |
| Transcribes real-time audio streams. |
audio_stream |
AsyncIterator |
import narrative_ai as nai
import asyncio
async def main():
async for result in nai.stt.stream_transcribe(stream):
print(result.text)
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Outputs |
| Sets the API key for the STT provider. |
api_key (str), provider (str) |
None |
import narrative_ai as nai
nai.stt.set_api_key("key", provider="elevenlabs")
set_stt_provider()
| Description |
Inputs |
Outputs |
| Changes the default STT provider. |
provider (str) |
None |
import narrative_ai as nai
nai.stt.set_stt_provider("whisper")
get_engine()
| Description |
Inputs |
Outputs |
| Retrieves the raw STT engine instance. |
None |
STTEngine |
import narrative_ai as nai
engine = nai.stt.get_engine()
STTClient
| Description |
Inputs |
Outputs |
| Creates a stateful STT client. |
user_id (str) |
STTClient |
import narrative_ai as nai
client = nai.stt.STTClient()
🔊 TTS Engine (nai.tts)
synthesize()
| Description |
Inputs |
Outputs |
| Converts text into an audio file. |
text (str), voice (str) |
str (Path) |
import narrative_ai as nai
import asyncio
async def main():
nai.tts.set_api_key("key", provider="openai")
path = await nai.tts.synthesize("Hello")
print(path)
asyncio.run(main())
stream_synthesize()
| Description |
Inputs |
Outputs |
| Streams synthesized audio bytes. |
text (str), voice (str) |
AsyncIterator[bytes] |
import narrative_ai as nai
import asyncio
async def main():
async for chunk in nai.tts.stream_synthesize("Hello"):
print(len(chunk))
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Outputs |
| Sets the TTS provider API key. |
api_key (str), provider (str) |
None |
import narrative_ai as nai
nai.tts.set_api_key("key", provider="openai")
set_tts_provider()
| Description |
Inputs |
Outputs |
| Changes the TTS engine provider. |
provider (str) |
None |
import narrative_ai as nai
nai.tts.set_tts_provider("elevenlabs")
get_engine()
| Description |
Inputs |
Outputs |
| Retrieves the TTS engine instance. |
None |
TTSEngine |
import narrative_ai as nai
engine = nai.tts.get_engine()
TTSClient
| Description |
Inputs |
Outputs |
| Creates a stateful TTS client. |
user_id (str) |
TTSClient |
import narrative_ai as nai
client = nai.tts.TTSClient()
📚 RAG Engine (nai.rag)
remember()
| Description |
Inputs |
Outputs |
| Indexes a document into the vector store. |
document (Doc), doc_id (str) |
bool |
import narrative_ai as nai
import asyncio
async def main():
nai.rag.set_api_key("key", provider="cohere")
doc = await nai.input_processor.process("f.pdf")
await nai.rag.remember(doc, "id1")
asyncio.run(main())
recall()
| Description |
Inputs |
Outputs |
| Retrieves relevant context based on a query. |
query (str), top_k (int) |
RichContext |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.rag.recall("policy info")
print(res.formatted_text)
asyncio.run(main())
forget()
| Description |
Inputs |
Outputs |
| Deletes a document from the vector store. |
doc_id (str) |
bool |
import narrative_ai as nai
import asyncio
async def main():
await nai.rag.forget("id1")
asyncio.run(main())
clear_memory()
| Description |
Inputs |
Outputs |
| Wipes the entire vector database. |
None |
bool |
import narrative_ai as nai
import asyncio
async def main():
await nai.rag.clear_memory()
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Outputs |
| Sets the embedding provider API key. |
api_key (str), provider (str) |
None |
import narrative_ai as nai
nai.rag.set_api_key("key", provider="openai")
get_manager()
| Description |
Inputs |
Outputs |
| Retrieves the internal memory manager. |
None |
MemoryManager |
import narrative_ai as nai
mgr = nai.rag.get_manager()
RAGClient
| Description |
Inputs |
Outputs |
| Creates a stateful RAG client. |
user_id (str) |
RAGClient |
import narrative_ai as nai
client = nai.rag.RAGClient()
👁️ OCR Engine (nai.ocr)
process_image()
| Description |
Inputs |
Outputs |
| Extracts text from an image. |
image_path (str) |
OCRResult |
import narrative_ai as nai
import asyncio
async def main():
nai.ocr.set_service_url("https://...")
res = await nai.ocr.process_image("i.jpg")
print(res.text)
asyncio.run(main())
process_pdf()
| Description |
Inputs |
Outputs |
| Extracts text from all pages of a PDF. |
pdf_path (str) |
OCRResult |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.ocr.process_pdf("d.pdf")
print(res.text)
asyncio.run(main())
set_service_url()
| Description |
Inputs |
Outputs |
| Configures the OCR service endpoint. |
url (str) |
None |
import narrative_ai as nai
nai.ocr.set_service_url("https://ocr.api.com")
set_ocr_provider()
| Description |
Inputs |
Outputs |
| Changes the OCR technology provider. |
provider (str) |
None |
import narrative_ai as nai
nai.ocr.set_ocr_provider("google_vision")
get_pipeline()
| Description |
Inputs |
Outputs |
| Retrieves the OCR processing pipeline. |
None |
OCRPipeline |
import narrative_ai as nai
p = nai.ocr.get_pipeline()
OCRClient
| Description |
Inputs |
Outputs |
| Creates a stateful OCR client. |
user_id (str) |
OCRClient |
import narrative_ai as nai
client = nai.ocr.OCRClient()
🛠️ Input Processor (nai.input_processor)
process()
| Description |
Inputs |
Outputs |
| Detects file type and routes to the correct engine. |
source (Any) |
StructuredDocument |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process("any_file.mp3")
print(doc.text)
asyncio.run(main())
process_batch()
| Description |
Inputs |
Outputs |
| Processes multiple files concurrently. |
sources (List) |
List[Doc] |
import narrative_ai as nai
import asyncio
async def main():
docs = await nai.input_processor.process_batch(["f1.jpg", "f2.pdf"])
asyncio.run(main())
process_audio()
| Description |
Inputs |
Outputs |
| Specifically routes to the STT engine. |
path (str) |
Doc |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process_audio("a.wav")
asyncio.run(main())
process_document()
| Description |
Inputs |
Outputs |
| Routes to PDF/OCR document processing. |
path (str) |
Doc |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process_document("d.pdf")
asyncio.run(main())
process_image()
| Description |
Inputs |
Outputs |
| Routes specifically to Image OCR processing. |
path (str) |
Doc |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process_image("i.jpg")
asyncio.run(main())
process_url()
| Description |
Inputs |
Outputs |
| Fetches and processes content from a URL. |
url (str) |
Doc |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process_url("https://...")
asyncio.run(main())
InputClient
| Description |
Inputs |
Outputs |
| Creates a stateful Input Processing client. |
user_id (str) |
InputClient |
import narrative_ai as nai
client = nai.input_processor.InputClient()
🤖 Voice Mode (nai.voice_mode)
start_agent()
| Description |
Inputs |
Outputs |
| Runs the LiveKit conversational agent loop. |
None |
None |
import narrative_ai as nai
nai.voice_mode.start_agent()
set_livekit_config()
| Description |
Inputs |
Outputs |
| Sets LiveKit connection credentials. |
url (str), api_key (str), api_secret (str) |
None |
import narrative_ai as nai
nai.voice_mode.set_livekit_config(url="...", api_key="...", api_secret="...")
set_agent_name()
| Description |
Inputs |
Outputs |
| Sets the agent's display name. |
name (str) |
None |
import narrative_ai as nai
nai.voice_mode.set_agent_name("Jarvis")
VoiceClient
| Description |
Inputs |
Outputs |
| Creates a stateful Voice Mode client. |
user_id (str) |
VoiceClient |
import narrative_ai as nai
client = nai.voice_mode.VoiceClient()
🔍 Web Intelligence (nai.web_intel)
search()
| Description |
Inputs |
Outputs |
| Performs a real-time web search. |
query (str) |
WebResult |
import narrative_ai as nai
import asyncio
async def main():
nai.web_intel.set_api_key("key")
res = await nai.web_intel.search("Current events")
asyncio.run(main())
research()
| Description |
Inputs |
Outputs |
| Generates a deep research report on a topic. |
topic (str) |
str (Markdown) |
import narrative_ai as nai
import asyncio
async def main():
report = await nai.web_intel.research("Global warming")
print(report)
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Outputs |
| Sets the search provider API key. |
api_key (str) |
None |
import narrative_ai as nai
nai.web_intel.set_api_key("key")
get_engine()
| Description |
Inputs |
Outputs |
| Retrieves the Web Intelligence engine instance. |
None |
WebIntelEngine |
import narrative_ai as nai
engine = nai.web_intel.get_engine()
WebIntelClient
| Description |
Inputs |
Outputs |
| Creates a stateful Web Intel client. |
user_id (str) |
WebIntelClient |
import narrative_ai as nai
client = nai.web_intel.WebIntelClient()
🎨 VLM Engine (nai.vlm)
analyze_image()
| Description |
Inputs |
Outputs |
| Performs visual reasoning on an image. |
image (Any), prompt (str) |
VLMResponse |
import narrative_ai as nai
import asyncio
async def main():
nai.vlm.set_api_key("key")
res = await nai.vlm.analyze_image("i.jpg", "Describe this.")
asyncio.run(main())
chat_with_image()
| Description |
Inputs |
Outputs |
| Converses with the AI about an image. |
image (Any), history (List) |
VLMResponse |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.vlm.chat_with_image("i.jpg", history=[])
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Outputs |
| Sets the Vision API key. |
api_key (str) |
None |
import narrative_ai as nai
nai.vlm.set_api_key("key")
get_processor()
| Description |
Inputs |
Outputs |
| Retrieves the VLM processing instance. |
None |
VLMProcessor |
import narrative_ai as nai
p = nai.vlm.get_processor()
VLMClient
| Description |
Inputs |
Outputs |
| Creates a stateful VLM client. |
user_id (str) |
VLMClient |
import narrative_ai as nai
client = nai.vlm.VLMClient()
License
MIT License.