Skip to content
Mistral Medium 3
Setting Up
%%capture
!pip install -U \
mistralai \
langchain langchain-mistralai \
langchain-experimental \
langgraph \
tavily-pythonText Generation with Streaming
import os
from mistralai import Mistral
api_key = os.environ["MISTRAL_API_KEY"]
model = "mistral-medium-latest"
client = Mistral(api_key=api_key)
stream_response = client.chat.stream(
model = model,
messages = [
{
"role": "user",
"content": "What is the best course to take from DataCamp?",
},
]
)
for chunk in stream_response:
print(chunk.data.choices[0].delta.content, end="" )Image Understanding of Base64 Encoded Image
import base64
import requests
def encode_image(image_path):
"""Encode the image to base64."""
try:
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
except FileNotFoundError:
print(f"Error: The file {image_path} was not found.")
return None
except Exception as e: # Added general exception handling
print(f"Error: {e}")
return None
# Path to your image
image_path = "datacamp_fp.png"
# Getting the base64 string
base64_image = encode_image(image_path)# Define the messages for the chat
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Explain the image in a Donald Trump style."
},
{
"type": "image_url",
"image_url": f"data:image/png;base64,{base64_image}"
}
]
}
]
stream_response = client.chat.stream(
model = model,
messages = messages
)
for chunk in stream_response:
print(chunk.data.choices[0].delta.content, end="" )Building Agentic Application with Mistral Medium 3
from langchain_mistralai import ChatMistralAI
llm = ChatMistralAI(
model="mistral-medium-latest",
temperature=0.2,
streaming=True
)from langchain_community.tools import TavilySearchResults
from langchain_experimental.tools.python.tool import PythonREPLTool
search_tool = TavilySearchResults(max_results=5, include_answer=True)
code_tool = PythonREPLTool()
tools = [search_tool, code_tool]from langgraph.prebuilt import create_react_agent
agent = create_react_agent(
model=llm, # any ChatModel works
tools=tools, # list[BaseTool]
)
def extract_tool_names(conversation: dict) -> list[str]:
"""
Given a conversation dict with a 'messages' list (where each message
may be a dict or a Pydantic model), extract all unique tool names
used in any tool call.
"""
tool_names = set()
for msg in conversation.get('messages', []):
# 1) Try direct attribute access (for Pydantic models)
calls = []
if hasattr(msg, 'tool_calls'):
calls = getattr(msg, 'tool_calls') or []
# 2) If that fails, the message might be a dict
elif isinstance(msg, dict):
calls = msg.get('tool_calls')
# also check nested in additional_kwargs
if not calls and isinstance(msg.get('additional_kwargs'), dict):
calls = msg['additional_kwargs'].get('tool_calls')
# 3) Finally, check additional_kwargs on objects
else:
ak = getattr(msg, 'additional_kwargs', None)
if isinstance(ak, dict):
calls = ak.get('tool_calls', [])
# Normalize to list
calls = calls or []
# Extract names
for call in calls:
# dict-style tool call
if isinstance(call, dict):
# top-level 'name'
if 'name' in call:
tool_names.add(call['name'])
# nested under 'function'
elif 'function' in call and isinstance(call['function'], dict):
fn = call['function']
if 'name' in fn:
tool_names.add(fn['name'])
return sorted(tool_names)question = "What are the top 5 breaking news stories?"
def run_agent(question):
result = agent.invoke(
{
"messages": [
{"role": "user", "content": question}
]
}
)
tool_name = extract_tool_names(result)
# The LLM’s final answer is always in the last message
raw_answer = result["messages"][-1].content
clean_text = "".join(part for part in raw_answer if isinstance(part, str))
return tool_name, clean_text
tool_name, clean_text = run_agent(question)
print("Tool used ⫸", tool_name, "\n")
print(clean_text)question = "Write a code to display the stars in triangle. Please execute the code too."
tool_name, clean_text = run_agent(question)
print("Tool used ⫸", tool_name, "\n")
print(clean_text)question = "Get the latest gold price data for the past 7 days and use it to generate a line plot"
tool_name, clean_text = run_agent(question)
print("Tool used ⫸", tool_name, "\n")
print(clean_text)