Skip to content
import os
import openai
import yfinance as yf
from IPython.display import display, Markdown

# Allow openai to see API key
openai.api_key = os.environ["OPENAI"]

# Models: 'gpt-3.5-turbo', 'gpt-4'
# System messages - telling model how to behave
# User messages - conversation with model

# response = openai.ChatCompletion.create(
#     model="gpt-3.5-turbo",
#     messages=[
#         {"role": "system", "content": 'You are a useful assistant'},
#         {"role": "user", "content": 'Something useful to ask the AI'}
#     ]
# )

system_msg = "You are a helpful assistant who understands datascience"
user_msg = 'Create a small dataset of data about people. The format of the dataset should be a dataframe with 5 rows and 3 columns. The columns should be called "name", "height_cm". Provide python code to generate the dataset, then provide the output in teh format of a markdown table'

response = openai.ChatCompletion.create(
    model="gpt-3.5-turbo",
    messages=[
        {"role": "system", "content": system_msg},
        {"role": "user", "content": user_msg}
    ]
)
#Check response code
response["choices"][0]["finish_reason"]
#Read raw message
print(response["choices"][0]["message"]["content"])
#Read Markdown
display(Markdown(response["choices"][0]["message"]["content"]))
#CHat function
def chat(system, user_assistant):
    assert isinstance(system, str), "`system` should be a string"
    assert isinstance(user_assistant, list), "`user_assistant` should be a list"
    system_msg = [{"role": "system", "content": system}]
    user_assistant_msgs = [
        {"role":"assistant", "content":user_assistant[i]} if i % 2 else {"role": "user", "content": user_assistant[i]}
        for i in range(len(user_assistant))
    ]
    msgs = system_msg + user_assistant_msgs
    response = openai.ChatCompletion.create(
        model = "gpt-3.5-turbo",
    messages=msgs
    )
    status_code = response["choices"][0]["finish_reason"]
    assert status_code == "stop", f"The status code was {status_code}."
    return response["choices"][0]["message"]["content"]
#Second call: Tersely means less words. Good for testign functionality
response_fn_test = chat(
    "You are a machine learning expoert who writes tersely",
    ["Explain what a support vector machine model is"]
)

display(Markdown(response_fn_test))
#Assign content from the response
assistant_msg = response["choices"][0]["message"]["content"]

#Define a new user message
user_msg2 = "Using the dataset you just created, write code to calculate teh mean of the height_cm column. Also include the result of the calculation"

user_assistant_msgs = [user_msg, assistant_msg, user_msg2]
#Get the GPT response
response_about_calcs = chat(system_msg, user_assistant_msgs)

#Display teh response
display(Markdown(response_about_calcs))

#Can make up a conversation to prime the AI, it doesn't have to be a real conversation
#A good way to steer the AI

#GPT-3.5 can forget the system messages so may have to break workflow to resend the system message
import os
import openai
from IPython.display import display, Markdown, Image

# Allow openai to see API key
openai.api_key = os.environ["OPENAI"]

system_msg = "You are a helpful assistant who understands datascience"
user_msg = 'Create a small dataset of data about people. The format of the dataset should be a dataframe with 5 rows and 3 columns. The columns should be called "name", "height_cm". Provide python code to generate the dataset, then provide the output in teh format of a markdown table'


response = openai.Image.create(
  prompt="Jason Bourne from The Bourne Identity",
  n=1,
  size="512x512"
)
image_url = response['data'][0]['url']
Image(url=image_url)
print(image_url)