There are two requirements for running the examples in this file.
You must install and set up the JaguarDB server and its HTTP gateway server.
Please refer to the instructions in:
www.jaguardb.com
For quick setup in docker environment:
docker pull jaguardb/jaguardb
docker run -d -p 8888:8888 -p 8080:8080 —name jaguardb jaguardb/jaguardb
You must install the http client package for JaguarDB:
Copy
Ask AI
pip install -U jaguardb-http-client
You’ll need to install langchain-community with pip install -qU langchain-community to use this integration
This section demonstrates chatting with LLM together with Jaguar in the langchain software stack.
Copy
Ask AI
from langchain.chains import RetrievalQAWithSourcesChainfrom langchain_community.document_loaders import TextLoaderfrom langchain_community.vectorstores.jaguar import Jaguarfrom langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplitter""" Load a text file into a set of documents """loader = TextLoader("../../how_to/state_of_the_union.txt")documents = loader.load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=300)docs = text_splitter.split_documents(documents)"""Instantiate a Jaguar vector store"""### Jaguar HTTP endpointurl = "http://192.168.5.88:8080/fwww/"### Use OpenAI embedding modelembeddings = OpenAIEmbeddings()### Pod is a database for vectorspod = "vdb"### Vector store namestore = "langchain_rag_store"### Vector index namevector_index = "v"### Type of the vector index# cosine: distance metric# fraction: embedding vectors are decimal numbers# float: values stored with floating-point numbersvector_type = "cosine_fraction_float"### Dimension of each embedding vectorvector_dimension = 1536### Instantiate a Jaguar store objectvectorstore = Jaguar( pod, store, vector_index, vector_type, vector_dimension, url, embeddings)"""Login must be performed to authorize the client.The environment variable JAGUAR_API_KEY or file $HOME/.jagrcshould contain the API key for accessing JaguarDB servers."""vectorstore.login()"""Create vector store on the JaguarDB database server.This should be done only once."""# Extra metadata fields for the vector storemetadata = "category char(16)"# Number of characters for the text field of the storetext_size = 4096# Create a vector store on the servervectorstore.create(metadata, text_size)"""Add the texts from the text splitter to our vectorstore"""vectorstore.add_documents(docs)# or tag the documents:# vectorstore.add_documents(more_docs, text_tag="tags to these documents")""" Get the retriever object """retriever = vectorstore.as_retriever()# retriever = vectorstore.as_retriever(search_kwargs={"where": "m1='123' and m2='abc'"})template = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.Question: {question}Context: {context}Answer:"""prompt = ChatPromptTemplate.from_template(template)""" Obtain a Large Language Model """LLM = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)""" Create a chain for the RAG flow """rag_chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | LLM | StrOutputParser())resp = rag_chain.invoke("What did the president say about Justice Breyer?")print(resp)