Skip to content
Project: Predicting Movie Rental Durations
A DVD rental company needs your help! They want to figure out how many days a customer will rent a DVD for based on some features and has approached you for help. They want you to try out some regression models which will help predict the number of days a customer will rent a DVD for. The company wants a model which yeilds a MSE of 3 or less on a test set. The model you make will help the company become more efficient inventory planning.
The data they provided is in the csv file rental_info.csv. It has the following features:
"rental_date": The date (and time) the customer rents the DVD."return_date": The date (and time) the customer returns the DVD."amount": The amount paid by the customer for renting the DVD."amount_2": The square of"amount"."rental_rate": The rate at which the DVD is rented for."rental_rate_2": The square of"rental_rate"."release_year": The year the movie being rented was released."length": Lenght of the movie being rented, in minuites."length_2": The square of"length"."replacement_cost": The amount it will cost the company to replace the DVD."special_features": Any special features, for example trailers/deleted scenes that the DVD also has."NC-17","PG","PG-13","R": These columns are dummy variables of the rating of the movie. It takes the value 1 if the move is rated as the column name and 0 otherwise. For your convinience, the reference dummy has already been dropped.
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# Import any additional modules and start coding below# Read csv file
rental_info = pd.read_csv('rental_info.csv')
# Check missing values
print(rental_info.isna().sum())# Check data types
print(rental_info.dtypes)
# Need to convert dates to dates
print(rental_info['rental_date'].head())
# Conversion
rental_info['rental_date'] = pd.to_datetime(rental_info['rental_date'], format='%Y-%m-%d %H:%M:%S%z')
rental_info['return_date'] = pd.to_datetime(rental_info['return_date'], format='%Y-%m-%d %H:%M:%S%z')
# Create column for rental length in days
rental_info['rental_length_days'] = (rental_info['return_date'] - rental_info['rental_date']).dt.days
# Convert rental length days from time delta to int
rental_info['rental_length_days'] = rental_info['rental_length_days'].astype('int64')# Convert special features from object to dummy
rental_info['deleted_scenes'] = np.where(rental_info['special_features'].str.contains("Deleted Scenes"), 1, 0)
rental_info['behind_the_scenes'] = np.where(rental_info['special_features'].str.contains("Behind the Scenes"), 1, 0)# Create X for appropriate features avoid data leakage, avoid anything to do with date?
# Extract columns as a list
columns = list(rental_info.columns)
features = [
'release_year',
'replacement_cost',
'NC-17',
'PG',
'PG-13',
'R',
'amount_2',
'length_2',
'rental_rate_2',
'deleted_scenes',
'behind_the_scenes',
'amount',
'length',
'rental_rate',
]
# Selection for models
X = rental_info[features]
y = rental_info['rental_length_days']
print(X.shape)# Split the data
SEED = 9
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED)# Attempt Logistic Regression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
model = LogisticRegression(random_state=SEED)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_pred, y_test)
print(accuracy)# Ensemble Learning
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error as MSE
lr = LogisticRegression(random_state=SEED)
knn = KNN()
dt = DecisionTreeClassifier(random_state=SEED)
rf = RandomForestRegressor(random_state=SEED)
classifiers = [('Logistic Regression', lr),
('K Nearest Neighbours', knn),
('Classification Tree', dt),
('Random Forest', rf)]
for name, clf in classifiers:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(f"{name} : {MSE(y_test, y_pred):.3f}")# Random Forest performs best
import matplotlib.pyplot as plt
# Test for feature importance
importances_rf = pd.Series(rf.feature_importances_, index = X.columns)
sorted_importances_rf = importances_rf.sort_values()
sorted_importances_rf.plot(kind='barh', color='lightgreen')
plt.show()# Tuning hyperparameters individually
# Number estimator values
from sklearn.model_selection import cross_val_score
n = [10, 50, 100, 150, 200]
cv_scores = []
for i in n:
rf = RandomForestRegressor(n_estimators=i, random_state=SEED)
scores = cross_val_score(rf, X, y, cv=5, scoring='neg_mean_squared_error')
cv_scores.append(-scores.mean())
plt.plot(n, cv_scores, marker='o')
plt.xlabel('Number of Trees')
plt.ylabel('Cross-validated MSE')
plt.show()
# Not much difference but estimating 50 over 10 seems like a good idea using elbow analysis
estimators = 50# Save best model
rf = RandomForestRegressor(n_estimators=estimators, min_samples_split=5, random_state=SEED)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
mse = MSE(y_pred, y_test)
best_model = rf
best_mse = mse
print(f"Best Model : {best_model}\nBest MSE: {best_mse}")comparison_df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
# General stats for actual and predicted model
display(comparison_df.describe())