Skip to content

!pip install tiktoken
!pip install wikipedia
!pip install pypdf
!pip install faiss-cpu
!pip install pinecone-client
!pip install langchain
!pip install langgraph
!pip install langchain-core
!pip install unstructured
Hidden output
!pip install pypdf
Hidden output
import os
import pandas as pd
import matplotlib.pyplot as plt
from dotenv import load_dotenv
import uuid

from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain.document_loaders import PyPDFLoader 
from langchain.text_splitter import RecursiveCharacterTextSplitter 
from transformers import GPT2TokenizerFast  
from langchain_community.vectorstores import FAISS, Pinecone 
from langchain_openai import OpenAIEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langgraph.checkpoint.memory import MemorySaver
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.document_loaders import UnstructuredHTMLLoader
from langchain_community.document_loaders import BSHTMLLoader
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import HNLoader
from langchain_core.runnables import RunnablePassthrough
openai_api_key = os.environ["OPENAI_API_KEY"]
pinecone_api_key = os.environ["PINECONE_API"]
# pinecone_env_key = os.environ["PINECONE_ENV_KEY"]
embeddings = OpenAIEmbeddings(
    model="text-embedding-3-large", dimensions=1024)

parser = StrOutputParser()

llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)

high_level = """You're an assistant that helps to talk with pdf documents"""

template = ChatPromptTemplate.from_messages([("system",high_level), ("human","{input}")]) 
#we define template first,
#we plug in this template into fromat_messages and passing our input
#we invoke the llm passing the plugged in template with format_messages and input

chat1= template.format_messages(input="Hello, there!")

response = llm.invoke(chat1)

print(response.content)
#use embeddings here later, change the input: single_vector = embeddings.embed_query(text)
#creating a memory new version from langgraph
memory = MemorySaver()

thread_id = uuid.uuid4() #unique key for thread identification
config = {"configurable": {"thread_id": thread_id}}


#example of how to bound memory to a prebuilt agent:
# app = create_react_agent(
#     model,
#     tools=[get_user_age],
#     checkpointer=memory,
# )
#create a tool for retrieving the documents before, and pass to the app
#defining and loading our documents
loader = PyPDFLoader("Ivan_Savov_-_No_Bullshit_Guide_to_Math_and_Physics (1).pdf")
book = loader.load_and_split()
print(book[0])

hn_loader = HNLoader("https://news.ycombinator.com/item?id=34817881")
hn = hn_loader.load()

#create tokenizer, splitting the docs into chunks
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")

def count_tokens(text:str):
    return len(tokenizer.encode(text))

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size = 500,
    chunk_overlap = 50,
    length_function = count_tokens
)

book_chunks = text_splitter.split_documents(book)
print(len(book_chunks))

hn_chunks = text_splitter.split_documents(hn)
print(len(hn_chunks))
#creating vectorstore with embeddings

db_book = FAISS.from_documents(book_chunks, embeddings)
db_hn = FAISS.from_documents(hn_chunks, embeddings)
pip install langchainhub
Hidden output
#testing with similarity search and LLM to answer our questions
from langchain import hub
prompt = hub.pull("rlm/rag-prompt")


query1 = "key principles in learning physics"
query2 = "How to learn calculus, step by step"
query3 = "What is the product rule in finding derivatives stands for?"

retriever = db_book.as_retriever(search_type="similarity", search_kwargs={"k": 3})

rag_chain = (
    {"context": retriever, "question": RunnablePassthrough() }
    | prompt
    | llm
    | parser
)

#invoking the chain with multiple queries
print("INVOKING QUERY 1")
print(rag_chain.invoke(query1))
print("INVOKING QUERY 2")
print(rag_chain.invoke(query2))
print("INVOKING QUERY 3")
print(rag_chain.invoke(query3))
#answer like Daenerys
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain_core.prompts import PromptTemplate



template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Use the style of Daenerys Targaryen to answer the questions.

{context}

Question: {question}

Helpful Answer:"""

custom_rag_prompt = PromptTemplate.from_template(template)

c_rag_chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | custom_rag_prompt
    | llm
    | StrOutputParser()
)


c_query1 = ("key principles in learning physics")
c_query2 = ("How to learn calculus, step by step")
c_query3 = ("What is the product rule in finding derivatives stands for?")

print("INVOKING QUERY 1")
print(c_rag_chain.invoke(c_query1))
print("INVOKING QUERY 2")
print(c_rag_chain.invoke(c_query2))
print("INVOKING QUERY 3")
print(c_rag_chain.invoke(c_query3))
Hidden output