Skip to content
# Install the openai package, locked to version 1.27
!pip install openai==1.27

# Install the datasets package, locked to version
!pip install pinecone-client==4.0.0

# Install the langchain package, locked to version 0.1.19
!pip install langchain==0.1.19

# Install the langchain-openai package, locked to version 0.1.6
!pip install langchain-openai==0.1.6

# Update the langchain-pinecone package, locked to version 0.1.0
!pip install langchain-pinecone==0.1.0

# Update the tiktoken package, locked to version 0.7.0
!pip install tiktoken==0.7.0

# Install the datasets package, locked to version 2.19.1
!pip install datasets==2.19.1

# Update the typing_extensions package, locked to version 4.11.0
!pip install typing_extensions==4.11.0
Hidden output
import os
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
# From the langchain_openai package, import ChatOpenAI

from langchain_openai import ChatOpenAI
# Initialize a ChatOpenAI object with the gpt-3.5-turbo model. Assign to chat.
model = ChatOpenAI(openai_api_key=OPENAI_API_KEY, model_name="gpt-3.5-turbo", temperature=0)
# From the langchain.schema module, import SystemMessage, HumanMessage, AIMessage
from langchain.schema import SystemMessage, HumanMessage, AIMessage

# Create a conversation as a list of messages. Assign to messages.
messages = [
    SystemMessage(content="You are a helpful assistant."),
    HumanMessage(content="Hi AI, how are you today?"),
    AIMessage(content="I'm great thank you. How can I help you?"),
    HumanMessage(content="I'd like to understand string theory.")
]
# Invoke a chat with GPT, passing the messages
response = model(messages)
# Print the response
print(response)

Notice that the AIMessage object looks a bit like a dictionary. The most important element is content, which contains the chat text.

# Print the contents of the response
response.content
# Append the latest AI response to messages
messages.append(response)
print(messages)
# Create a new human message. Assign to prompt.
prompt=HumanMessage(content="Why do physicists believe it can produce a 'unified theory'?")

# Append the prompt to messages
messages.append(prompt)
# Sanity check before you send to GPT: what does messages contain?
messages
# Print the number of messages in the conversation
print(len(messages))

# Append the response to the list of messages
messages.append(response)

# Print the number of messages in the conversation again
print(len(messages))
messages
# Create a new human message about Llama 2
prompt=HumanMessage(content="What is so special about Llama 2?")

# Append this message to the conversation. Assign to prompt.
messages.append(prompt)

# Invoke the chat with the latest list of messages
response=model(messages)

# Print the contents of the response
print(response)
# Append the latest AI response to messages
messages.append(response)

# Create a new human message. Assign to prompt.
prompt=HumanMessage(content="Can you tell me about the LLMChain in LangChain?")

# Append the latest prompt to messages
messages.append(prompt)

# Invoke the chat with the latest list of message
response=model(messages)

# Print the contents of the response
print(response.content)