Skip to main content

An integration package connecting LindormIntegration and LangChain

Project description

def LindormAIEmbeddings(): passis package contains the LangChain integration with LindormIntegration

Installation

pip install -U langchain-lindorm-integration

Embeddings

LindormAIEmbeddings class exposes embeddings from LindormIntegration.

import os
from langchain_lindorm_integration import LindormAIEmbeddings

query = "What is the meaning of life?"
embedding = LindormAIEmbeddings(
    endpoint=os.environ.get("AI_ENDPOINT", "<AI_ENDPOINT>"),
    username=os.environ.get("AI_USERNAME", "root"),
    password=os.environ.get("AI_PASSWORD", "<PASSWORD>"),
    model_name=os.environ.get("AI_DEFAULT_EMBEDDING_MODEL", "bge_m3_model"),
)  # type: ignore[call-arg]
output = embedding.embed_query(query)
embedding.embed_query("What is the meaning of life?")

Rerank

LindormAIRerank class exposes rerank from LindormIntegration.

import os
from langchain_core.documents import Document
from langchain_lindorm_integration.reranker import LindormAIRerank

reranker = LindormAIRerank(
    endpoint=os.environ.get("AI_ENDPOINT", "<AI_ENDPOINT>"),
    username=os.environ.get("AI_USERNAME", "root"),
    password=os.environ.get("AI_PASSWORD", "<PASSWORD>"),
    model_name=os.environ.get("AI_DEFAULT_EMBEDDING_MODEL", "bge_m3_model"),
    max_workers=5,
    client=None,
)

docs = [
    Document(page_content="量子计算是计算科学的一个前沿领域"),
    Document(page_content="预训练语言模型的发展给文本排序模型带来了新的进展"),
    Document(
        page_content="文本排序模型广泛用于搜索引擎和推荐系统中,它们根据文本相关性对候选文本进行排序"
    ),
    Document(page_content="random text for nothing"),
]

for i, doc in enumerate(docs):
    doc.metadata = {"rating": i, "split_setting": str(i % 5)}
    doc.id = str(i)
results = list()
for i in range(10):
    results.append(
        reranker.compress_documents(
            query="什么是文本排序模型",
            documents=docs,
        )
    )

VectorStore

LindormVectorStore class exposes vector store from LindormIntegration.

import os
from langchain_lindorm_integration import LindormVectorStore
from langchain_lindorm_integration import LindormAIEmbeddings
from langchain_core.documents import Document

index_name = "langchain_test_index"
dimension = 1024
http_auth = (
    os.environ.get("SEARCH_USERNAME", "root"),
    os.environ.get("SEARCH_PASSWORD", "<PASSWORD>"),
)

def get_default_embedding():
    return LindormAIEmbeddings(
    endpoint=os.environ.get("AI_ENDPOINT", "<AI_ENDPOINT>"),
    username=os.environ.get("AI_USERNAME", "root"),
    password=os.environ.get("AI_PASSWORD", "<PASSWORD>"),
    model_name=os.environ.get("AI_DEFAULT_EMBEDDING_MODEL", "bge_m3_model"),
)  

BUILD_INDEX_PARAMS = {
    "lindorm_search_url": os.environ.get("SEARCH_ENDPOINT", "<SEARCH_ENDPOINT>"),
    "dimension": dimension,
    "embedding": get_default_embedding(),
    "http_auth": http_auth,
    "index_name": index_name,
    "use_ssl": False,
    "verify_certs": False,
    "ssl_assert_hostname": False,
    "ssl_show_warn": False,
    "bulk_size": 500,
    "timeout": 60,
    "max_retries": 3,
    "retry_on_timeout": True,
    "embed_thread_num": 2,
    "write_thread_num": 5,
    "pool_maxsize": 20,
    "engine": "lvector",
    "space_type": "l2",
}

vectorstore = LindormVectorStore(**BUILD_INDEX_PARAMS)
original_documents = [
    Document(page_content="foo", metadata={"id": 1}),
    Document(page_content="bar", metadata={"id": 2}),
]
ids = vectorstore.add_documents(original_documents)
documents = vectorstore.similarity_search("bar", k=2)

Project details


Download files

Download the file for your platform. If you're not sure which to choose, learn more about installing packages.

Source Distribution

langchain_lindorm_integration-0.1.1.tar.gz (20.1 kB view details)

Uploaded Source

Built Distribution

File details

Details for the file langchain_lindorm_integration-0.1.1.tar.gz.

File metadata

File hashes

Hashes for langchain_lindorm_integration-0.1.1.tar.gz
Algorithm Hash digest
SHA256 b7cf8419c2068951ff1668f55afd917a15e0f39558d6c5a42ecd039a7819a6d4
MD5 2611111e721bc89dbc1479d94aca40df
BLAKE2b-256 c726f12c43cfbffb0d37e57511c13e026ab3ede81bd894647c6701f817e1dfd7

See more details on using hashes here.

File details

Details for the file langchain_lindorm_integration-0.1.1-py3-none-any.whl.

File metadata

File hashes

Hashes for langchain_lindorm_integration-0.1.1-py3-none-any.whl
Algorithm Hash digest
SHA256 90ab852323f021863a43389f236034e7bffba0b211b62ae4cd516a6e08ba782c
MD5 01186e04761aa9c805432663bd1be9ca
BLAKE2b-256 be5ad67e09031a47aea66b57cd1f2ba461c50c1e78458e746225070b331a8a6c

See more details on using hashes here.

Supported by

AWS AWS Cloud computing and Security Sponsor Datadog Datadog Monitoring Fastly Fastly CDN Google Google Download Analytics Pingdom Pingdom Monitoring Sentry Sentry Error logging StatusPage StatusPage Status page