Skip to content
Playing with GPT-4.1 API
pip install -U openai -q
Using the GPT-4.1 Nano model for text-to-text generation
from openai import OpenAI
from IPython.display import Markdown, display
client = OpenAI()
response = client.responses.create(
model="gpt-4.1-nano",
input= "Write a proper blog on getting rich."
)
Markdown(response.output_text)
Using the GPT-4.1 Mini model for image understanding
response = client.responses.create(
model="gpt-4.1-mini",
input=[{
"role": "user",
"content": [
{"type": "input_text", "text": "Please describe the image as a philosopher would."},
{
"type": "input_image",
"image_url": "https://thumbs.dreamstime.com/b/lucha-de-dos-vacas-56529466.jpg",
},
],
}],
)
print(response.output_text)
Using the GPT-4.1 (Full) model for code generation
import sys
# Request a streamed response from the model.
stream = client.responses.create(
model="gpt-4.1",
instructions="You are a machine learning engineer, which is an expert in creating model inference.",
input="Create a FastAPI app for image classification",
stream=True,
)
# Iterate over stream events and print text as soon as it's received.
for event in stream:
# Check if the event includes a text delta.
if hasattr(event, "delta") and event.delta:
sys.stdout.write(event.delta)
sys.stdout.flush()
Using the GPT-4.1 (Full) model for code generation with file inputs
import base64
from openai import OpenAI
client = OpenAI()
with open("main.py", "rb") as f:
data = f.read()
base64_string = base64.b64encode(data).decode("utf-8")
response = client.responses.create(
model="gpt-4.1",
input=[
{
"role": "user",
"content": [
{
"type": "input_file",
"filename": "main.py",
"file_data": f"data:text/x-python;base64,{base64_string}",
},
{
"type": "input_text",
"text": "Enhance the code by incorporating additional features to improve the user experience.",
},
],
},
]
)
Markdown(response.output_text)