Skip to content

A DVD rental company needs your help! They want to figure out how many days a customer will rent a DVD for based on some features and has approached you for help. They want you to try out some regression models which will help predict the number of days a customer will rent a DVD for. The company wants a model which yeilds a MSE of 3 or less on a test set. The model you make will help the company become more efficient inventory planning.

The data they provided is in the csv file rental_info.csv. It has the following features:

  • "rental_date": The date (and time) the customer rents the DVD.
  • "return_date": The date (and time) the customer returns the DVD.
  • "amount": The amount paid by the customer for renting the DVD.
  • "amount_2": The square of "amount".
  • "rental_rate": The rate at which the DVD is rented for.
  • "rental_rate_2": The square of "rental_rate".
  • "release_year": The year the movie being rented was released.
  • "length": Lenght of the movie being rented, in minuites.
  • "length_2": The square of "length".
  • "replacement_cost": The amount it will cost the company to replace the DVD.
  • "special_features": Any special features, for example trailers/deleted scenes that the DVD also has.
  • "NC-17", "PG", "PG-13", "R": These columns are dummy variables of the rating of the movie. It takes the value 1 if the move is rated as the column name and 0 otherwise. For your convinience, the reference dummy has already been dropped.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split, KFold, RandomizedSearchCV
from sklearn.metrics import mean_squared_error as MSE
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor

SEED = 9

# Import data from CSV
rental_info = pd.read_csv('rental_info.csv', parse_dates=['rental_date','return_date'])

# EDA
print(rental_info.info())
print(rental_info.head())
assert ~rental_info.isna().any().all()
# Preprocessing data

# Create column "rental_length_days" using "return_date" and "rental_date"
rental_info['rental_length_days'] = (rental_info['return_date'] - rental_info['rental_date']).dt.days

# Create dummy variables for "special_features"
rental_info['deleted_scenes'] = np.where(rental_info['special_features'].str.contains('Deleted Scenes'), 1, 0)
rental_info['behind_the_scenes'] = np.where(rental_info['special_features'].str.contains('Behind the Scenes'), 1, 0)
# Create features and target
X = rental_info.drop(columns=['rental_date','return_date', 'rental_length_days', 'special_features'])
y = rental_info['rental_length_days']
feature_names = X.columns

print(X.info())

# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=SEED)
import pandas as pd
from sklearn.linear_model import Lasso

# Assuming SEED, X_train, y_train, and feature_names are defined elsewhere in the notebook

# Feature selection 
lasso = Lasso(alpha=0.1, random_state=SEED)

# Fit the model
lasso.fit(X_train, y_train)

# Get the coefficients from the Lasso model
lasso_coefficients = lasso.coef_

# Create a DataFrame to display feature names and their corresponding coefficients
feature_importance = pd.DataFrame({
    'Feature': feature_names,
    'Coefficient': lasso_coefficients
})

# Sort the DataFrame by the absolute value of the coefficients
feature_importance = feature_importance.reindex(feature_importance['Coefficient'].abs().sort_values(ascending=False).index)

# Display the feature importance
feature_importance
# Find the best model
models = {
   'lr': LinearRegression(),
   'dt': DecisionTreeRegressor(random_state = SEED),
   'rf': RandomForestRegressor(random_state = SEED)
}

param_distributions = {
    'lr': {},  # LinearRegression has no hyperparameter to be tuned here
    'dt': {
        'max_depth': [None, 5, 10, 20, 50],
        'min_samples_split': [2, 5, 10],
        'min_samples_leaf': [1, 2, 4]
    },
    'rf': {
        'n_estimators': [50, 100, 200],
        'max_depth': [None, 5, 10, 20],
        'min_samples_split': [2, 5, 10],
        'min_samples_leaf': [1, 2, 4]
    }
}

# Create empty dictionnaries to store best models and MSE
best_models = {}
mse_scores = {}

# Apply RandomizedSearchCV to the models
for name, model in models.items():
    print(f"\n🔍 Tuning {name.upper()}...")
    if param_distributions[name]:
        search = RandomizedSearchCV(
            model,
            param_distributions[name],
            n_iter=10,
            scoring='neg_mean_squared_error',
            cv=5,
            random_state=SEED,
            n_jobs=-1
        )
        search.fit(X_train, y_train)
        best_models[name] = search.best_estimator_
        print("Best params:", search.best_params_)
    else:
        model.fit(X_train, y_train)
        best_models[name] = model
        print("No hyperparameter tuning needed.")

    # Predict values on test set and compute MSE
    y_pred = best_models[name].predict(X_test)
    mse = MSE(y_test, y_pred)
    mse_scores[name] = mse
    print(f"{name.upper()} - MSE: {mse:.2f}")

# Save the best model
best_model = best_models[min(mse_scores, key=mse_scores.get)]
best_mse = mse_scores[best_model_name]

print(f"\n🏆 Meilleur modèle : {best_model_name.upper()} avec MSE = {best_mse:.2f}")
min(mse_scores)