Skip to content

A DVD rental company needs your help! They want to figure out how many days a customer will rent a DVD for based on some features and has approached you for help. They want you to try out some regression models which will help predict the number of days a customer will rent a DVD for. The company wants a model which yeilds a MSE of 3 or less on a test set. The model you make will help the company become more efficient inventory planning.

The data they provided is in the csv file rental_info.csv. It has the following features:

  • "rental_date": The date (and time) the customer rents the DVD.
  • "return_date": The date (and time) the customer returns the DVD.
  • "amount": The amount paid by the customer for renting the DVD.
  • "amount_2": The square of "amount".
  • "rental_rate": The rate at which the DVD is rented for.
  • "rental_rate_2": The square of "rental_rate".
  • "release_year": The year the movie being rented was released.
  • "length": Lenght of the movie being rented, in minuites.
  • "length_2": The square of "length".
  • "replacement_cost": The amount it will cost the company to replace the DVD.
  • "special_features": Any special features, for example trailers/deleted scenes that the DVD also has.
  • "NC-17", "PG", "PG-13", "R": These columns are dummy variables of the rating of the movie. It takes the value 1 if the move is rated as the column name and 0 otherwise. For your convinience, the reference dummy has already been dropped.
import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Lasso
#1.- Getting the number of rental days.
# Read the CSV file
df = pd.read_csv("rental_info.csv")

# Calculate rental length in days
df["rental_length"] = pd.to_datetime(df["return_date"]) - pd.to_datetime(df["rental_date"])
df["rental_length_days"] = df["rental_length"].dt.days
#2.- Adding dummy variables using the special features column.
# Create a binary column for 'Deleted Scenes' in special features
df["deleted_scenes"] = np.where(df["special_features"].str.contains("Deleted Scenes"), 1, 0)
df["behind_the_scenes"] = np.where(df["special_features"].str.contains("Behind the Scenes"), 1, 0)

# Drop columns that are not needed for the model
X = df.drop(columns=['rental_date', 'return_date', 'rental_length', 'special_features', 'rental_length_days'])
y = df['rental_length_days']
#3.- Executing a train-test split
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#4.- Performing feature selection
# Initialize and fit the Lasso model
from sklearn.linear_model import Lasso

seed = 9
lasso = Lasso(random_state=seed, alpha = 0.4)
# Fit the model to the training data
lasso.fit(X_train, y_train)

# Access the coefficients after fitting the model
lasso_coef = lasso.coef_ 

# Subset the training and test features for columns with non-zero coefficients
#X_train_subset = X_train.iloc[:, lasso_coef > 0]
#X_test_subset = X_test.iloc[:, lasso_coef > 0]

X_train_l= X_train.iloc[:, lasso_coef > 0]
X_test_l = X_test.iloc[:, lasso_coef > 0]
from sklearn.linear_model import LinearRegression

# Run OLS models on lasso chosen regression
ols = LinearRegression()
ols = ols.fit(X_train_l, y_train)
y_test_pred = ols.predict(X_test_l)
mse_lin_reg_lasso = mean_squared_error(y_test, y_test_pred)

mse_lin_reg_lasso
#5.- Choosing models and performing hyperparameter tuning
from sklearn.model_selection import RandomizedSearchCV
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score, KFold 


# Define hyperparameter distributions for each model
param_distributions_lr = {}
param_distributions_dt = {
    'min_samples_leaf': [1, 2, 4, 6, 8, 10],
    'max_depth': [None, 10, 20, 30, 40, 50]
}
param_distributions_rf = {
    'n_estimators': np.arange(1,201,1),
    'max_depth':np.arange(1,21,1),
    'max_features': ['auto', 'sqrt', 'log2'],
    #'min_samples_split': [ 28, 30, 33],
    #'min_samples_leaf': [ 1,2, 3]
}

# Initialize models
models = {
#    'Linear Regression': (LinearRegression(), param_distributions_lr),
#    'Decision Tree': (DecisionTreeRegressor(random_state=seed), param_distributions_dt),
    'Random Forest': (RandomForestRegressor(random_state=seed), param_distributions_rf)
}

kf = KFold(n_splits=5, shuffle=True, random_state=9)

# Fit models using RandomizedSearchCV
best_models = {}
best_scores = {}
best_params = {}
for model_name, (model, param_dist) in models.items():
    random_search = RandomizedSearchCV(estimator=model, 
                                       param_distributions=param_dist, 
                                       #n_iter=50, 
                                       #scoring= 'neg_mean_squared_error',
                                       cv=kf, 
                                       random_state=seed, 
                                       n_jobs=-1)
    
    random_search.fit(X_train, y_train)
    best_models[model_name] = random_search.best_estimator_
    best_scores[model_name] = random_search.best_score_
    best_params[model_name] = random_search.best_params_

# Display the best models
print(best_models)
print(best_scores)

#6.- Predicting values on test set
# Retrain the best models on the full training data
predictions = {}
for model_name, model in best_models.items():
    model.fit(X_train, y_train)
    predictions[model_name] = model.predict(X_test)
    
# Make predictions on the test data
#predictions = {}
#for model_name, model in best_models.items():
#    predictions[model_name] = model.predict(X_test)


# Display predictions
predictions
#7.- Computing mean squared error
best_mse = 100
best_model = None

for model_name, y_pred in predictions.items():
    print(model_name, mean_squared_error(y_test,y_pred))
    if mean_squared_error(y_test,y_pred) <= best_mse:
        best_mse = mean_squared_error(y_test,y_pred)
        best_model = model_name
        

print('Best model is:',best_model, ', with mean squared error of:',best_mse)
print(best_model)
best_model=best_models.get(list(best_models.keys())[0])
best_model