Narrative AI SDK
🔑 LLM Engine (nai.llm)
generate()
| Description |
Inputs |
Returns |
| Standard text generation |
prompt, model, max_tokens |
LLMResponse |
import narrative_ai as nai
import asyncio
async def main():
nai.llm.set_api_key("key", provider="openai")
res = await nai.llm.generate("Hi")
print(res.text)
asyncio.run(main())
generate_stream()
| Description |
Inputs |
Returns |
| Streaming responses |
prompt, model |
AsyncIterator |
import narrative_ai as nai
import asyncio
async def main():
nai.llm.set_api_key("key", provider="openai")
async for chunk in nai.llm.generate_stream("Hi"):
print(chunk)
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Returns |
| Set API key |
api_key, provider |
None |
import narrative_ai as nai
nai.llm.set_api_key("key", "openai")
set_llm_provider()
| Description |
Inputs |
Returns |
| Change provider |
provider |
None |
import narrative_ai as nai
nai.llm.set_llm_provider("anthropic")
set_service_url()
| Description |
Inputs |
Returns |
| Set base URL |
url |
None |
import narrative_ai as nai
nai.llm.set_service_url("https://api.openai.com/v1")
🎙️ STT Engine (nai.stt)
transcribe()
| Description |
Inputs |
Returns |
| Audio to text |
audio_path, language |
STTResult |
import narrative_ai as nai
import asyncio
async def main():
nai.stt.set_api_key("key", "elevenlabs")
res = await nai.stt.transcribe("a.mp3")
print(res.text)
asyncio.run(main())
stream_transcribe()
| Description |
Inputs |
Returns |
| Real-time transcribing |
audio_stream |
AsyncIterator |
import narrative_ai as nai
import asyncio
async def main():
async for t in nai.stt.stream_transcribe(stream):
print(t.text)
asyncio.run(main())
set_api_key()
| Description |
Inputs |
Returns |
| Set STT key |
key, provider |
None |
import narrative_ai as nai
nai.stt.set_api_key("key", "elevenlabs")
🔊 TTS Engine (nai.tts)
synthesize()
| Description |
Inputs |
Returns |
| Text to audio |
text, voice |
str (Path) |
import narrative_ai as nai
import asyncio
async def main():
nai.tts.set_api_key("key", "openai")
path = await nai.tts.synthesize("Hi")
print(path)
asyncio.run(main())
stream_synthesize()
| Description |
Inputs |
Returns |
| Stream audio bytes |
text, voice |
AsyncIterator |
import narrative_ai as nai
import asyncio
async def main():
async for chunk in nai.tts.stream_synthesize("Hi"):
print(len(chunk))
asyncio.run(main())
📚 RAG Engine (nai.rag)
remember()
| Description |
Inputs |
Returns |
| Index document |
doc, doc_id |
bool |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process("f.pdf")
await nai.rag.remember(doc, "id_1")
asyncio.run(main())
recall()
| Description |
Inputs |
Returns |
| Search context |
query, top_k |
RichContext |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.rag.recall("query")
print(res.formatted_text)
asyncio.run(main())
forget()
| Description |
Inputs |
Returns |
| Delete doc |
doc_id |
bool |
import narrative_ai as nai
import asyncio
async def main():
await nai.rag.forget("id_1")
asyncio.run(main())
clear_memory()
| Description |
Inputs |
Returns |
| Wipe vector store |
None |
bool |
import narrative_ai as nai
import asyncio
async def main():
await nai.rag.clear_memory()
asyncio.run(main())
👁️ OCR Engine (nai.ocr)
process_image()
| Description |
Inputs |
Returns |
| Image to text |
image_path |
OCRResult |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.ocr.process_image("i.jpg")
print(res.text)
asyncio.run(main())
process_pdf()
| Description |
Inputs |
Returns |
| PDF to text |
pdf_path |
OCRResult |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.ocr.process_pdf("d.pdf")
print(res.text)
asyncio.run(main())
🛠️ Input Processor (nai.input_processor)
process()
| Description |
Inputs |
Returns |
| Smart processing |
source |
StructuredDocument |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process("data.zip")
print(doc.text)
asyncio.run(main())
process_batch()
| Description |
Inputs |
Returns |
| Batch process |
list |
List[Doc] |
import narrative_ai as nai
import asyncio
async def main():
docs = await nai.input_processor.process_batch(["f1", "f2"])
asyncio.run(main())
process_audio()
| Description |
Inputs |
Returns |
| Explicit audio |
path |
Doc |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process_audio("a.wav")
asyncio.run(main())
process_document()
| Description |
Inputs |
Returns |
| Explicit PDF/DOCX |
path |
Doc |
import narrative_ai as nai
import asyncio
async def main():
doc = await nai.input_processor.process_document("d.pdf")
asyncio.run(main())
🤖 Voice Mode (nai.voice_mode)
start_agent()
| Description |
Inputs |
Returns |
| Run agent loop |
None |
None |
import narrative_ai as nai
nai.voice_mode.start_agent()
set_livekit_config()
| Description |
Inputs |
Returns |
| Set connection |
url, key, secret |
None |
import narrative_ai as nai
nai.voice_mode.set_livekit_config("url", "key", "secret")
set_agent_name()
| Description |
Inputs |
Returns |
| Set name |
name |
None |
import narrative_ai as nai
nai.voice_mode.set_agent_name("Assistant")
🔍 Web Intelligence (nai.web_intel)
search()
| Description |
Inputs |
Returns |
| Real-time search |
query |
WebResult |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.web_intel.search("AI")
asyncio.run(main())
research()
| Description |
Inputs |
Returns |
| Deep report |
topic |
str |
import narrative_ai as nai
import asyncio
async def main():
report = await nai.web_intel.research("Topic")
asyncio.run(main())
🎨 VLM Engine (nai.vlm)
analyze_image()
| Description |
Inputs |
Returns |
| Image reasoning |
image, prompt |
VLMResponse |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.vlm.analyze_image("i.jpg", "What is this?")
asyncio.run(main())
chat_with_image()
| Description |
Inputs |
Returns |
| Multi-turn chat |
image, history |
VLMResponse |
import narrative_ai as nai
import asyncio
async def main():
res = await nai.vlm.chat_with_image("i.jpg", history=[])
asyncio.run(main())
License
MIT License.