Skip to content

Traffic data fluctuates constantly or is affected by time. Predicting it can be challenging, but this task will help sharpen your time-series skills. With deep learning, you can use abstract patterns in data that can help boost predictability.

Your task is to build a system that can be applied to help you predict traffic volume or the number of vehicles passing at a specific point and time. Determining this can help reduce road congestion, support new designs for roads or intersections, improve safety, and more! Or, you can use to help plan your commute to avoid traffic!

The dataset provided contains the hourly traffic volume on an interstate highway in Minnesota, USA. It also includes weather features and holidays, which often impact traffic volume.

Time to predict some traffic!

The data:

The dataset is collected and maintained by UCI Machine Learning Repository. The target variable is traffic_volume. The dataset contains the following and has already been normalized and saved into training and test sets:

train_scaled.csv, test_scaled.csv

ColumnTypeDescription
tempNumericAverage temp in kelvin
rain_1hNumericAmount in mm of rain that occurred in the hour
snow_1hNumericAmount in mm of snow that occurred in the hour
clouds_allNumericPercentage of cloud cover
date_timeDateTimeHour of the data collected in local CST time
holiday_ (11 columns)CategoricalUS National holidays plus regional holiday, Minnesota State Fair
weather_main_ (11 columns)CategoricalShort textual description of the current weather
weather_description_ (35 columns)CategoricalLonger textual description of the current weather
traffic_volumeNumericHourly I-94 ATR 301 reported westbound traffic volume
hour_of_dayNumericThe hour of the day
day_of_weekNumericThe day of the week (0=Monday, Sunday=6)
day_of_monthNumericThe day of the month
monthNumericThe number of the month
traffic_volumeNumericHourly I-94 ATR 301 reported westbound traffic volume
# Import the relevant libraries
import numpy as np
import pandas as pd

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
# Read the traffic data from the CSV training and test files
train_scaled_df = pd.read_csv('train_scaled.csv')
test_scaled_df = pd.read_csv('test_scaled.csv')

# Convert the DataFrame to NumPy arrays
train_scaled = train_scaled_df.to_numpy()
test_scaled = test_scaled_df.to_numpy()
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error

# Separate features and target variable from the training and test data
X_train = train_scaled[:, :-1]  # All columns except the last one (traffic_volume)
y_train = train_scaled[:, -1]   # Only the last column (traffic_volume)

X_test = test_scaled[:, :-1]    # All columns except the last one (traffic_volume)
y_test = test_scaled[:, -1]    # Only the last column (traffic_volume)

# Check for categorical columns 
categorical_columns = train_scaled_df.select_dtypes(include=['object']).columns

# If there are categorical columns, proceed with one-hot encoding
if len(categorical_columns) > 0:
    print("Categorical columns detected:", categorical_columns)

    # Extract categorical data for encoding
    categorical_train = train_scaled_df[categorical_columns].to_numpy()
    categorical_test = test_scaled_df[categorical_columns].to_numpy()

    # Encode categorical columns using OneHotEncoder
    encoder = OneHotEncoder(sparse=False, drop='first')
    encoded_train = encoder.fit_transform(categorical_train)
    encoded_test = encoder.transform(categorical_test)

    # Drop original categorical columns and concatenate the encoded ones
    X_train = np.delete(X_train, categorical_columns, axis=1)
    X_test = np.delete(X_test, categorical_columns, axis=1)

    # Append the one-hot encoded categorical features to the features matrix
    X_train = np.concatenate([X_train, encoded_train], axis=1)
    X_test = np.concatenate([X_test, encoded_test], axis=1)
else:
    print("No categorical columns found, continuing with numerical columns only.")

# Convert the data to PyTorch tensors
X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32).view(-1, 1)

X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
y_test_tensor = torch.tensor(y_test, dtype=torch.float32).view(-1, 1)


# Define an LSTM network
class TrafficVolume(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers):
        super().__init__()
        # Define the LSTM layer
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True
        )
        # Define the activation function
        self.relu = nn.LeakyReLU()
        
        # Define the fully connected layer
        self.fc1 = nn.Linear(hidden_size, 1)

    def forward(self, x):
        # Capture the final hidden state
        _, (h_0, _) = self.lstm(x)
        # Take the hidden state from the last layer
        out = h_0[-1]
        # Apply ReLU
        return self.relu(self.fc1(out))
    
# Set-up for training 
n_features = X_train.shape[1]
hidden_size = 64
num_layers = 2

# Initialize the model, saving it to traffic_model
traffic_model = TrafficVolume(input_size=n_features, hidden_size=hidden_size, num_layers=num_layers)

loss_function = nn.MSELoss()  # Mean Squared Error loss function
optimizer = optim.Adam(traffic_model.parameters(), lr=0.001)  # Adam optimizer

# Train the Model
num_epochs = 100
final_training_loss = None

for epoch in range(num_epochs):
    traffic_model.train()

    # Forward pass
    y_pred = traffic_model(X_train_tensor.unsqueeze(1))

    # Compute the loss
    loss = loss_function(y_pred, y_train_tensor)

    # Backward pass and optimization
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if epoch % 10 == 0:
        print(f'Epoch [{epoch}/{num_epochs}], Loss: {loss.item()}')

    final_training_loss = loss.item()

# Save the model
torch.save(traffic_model.state_dict(), 'traffic_model.pth')

# Evaluate the model on the Test Set
traffic_model.eval()  # Set the model to evaluation mode
with torch.no_grad():
    y_test_pred = traffic_model(X_test_tensor.unsqueeze(1))  # Predict the traffic volume for the test set
    test_mse = F.mse_loss(y_test_tensor, y_test_pred)  # Calculate MSE for the test set

test_mse_tensor = torch.tensor(test_mse.item(), dtype=torch.float32)  # Convert MSE to tensor
print(f"Test MSE: {test_mse_tensor.item()}")

# Save the final training loss as a tensor variable
final_training_loss_tensor = torch.tensor(final_training_loss, dtype=torch.float32)
print(f"Final Training Loss: {final_training_loss_tensor.item()}")