Skip to content

Commercial banks receive a lot of applications for credit cards. Many of them get rejected for many reasons, like high loan balances, low income levels, or too many inquiries on an individual's credit report, for example. Manually analyzing these applications is mundane, error-prone, and time-consuming (and time is money!). Luckily, this task can be automated with the power of machine learning and pretty much every commercial bank does so nowadays. In this notebook, we will build an automatic credit card approval predictor using machine learning techniques, just like real banks do.

You have been provided with a small subset of the credit card applications a bank receives. The dataset has been loaded as a Pandas DataFrame for you. You will start from there.

# Import necessary libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV

# Load the dataset
cc_apps = pd.read_csv("cc_approvals.data", header=None) 
cc_apps.head()
# Start coding here
# Use as many cells as you need
# Summarize data with .describe() and get information with .info()
print(cc_apps.describe())
print('\n')
print(cc_apps.info())
# 1 - Splitting the dataset into train and test sets before preprocessing to prevent data leakage

# Drop columns 11 and 13, as they are non-essential for the project
cc_apps = cc_apps.drop([11, 13], axis=1)
# Encoding 1 for approved and 0 for declines
cc_apps[15] = np.where(cc_apps[15] == '+', 1, 0)
# Replacing '?'s with NaN missing values
cc_apps = cc_apps.replace('?', np.NaN)
# X and y sets
X = cc_apps.drop(15, axis=1)
y = cc_apps[15]

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
cc_apps.head()
# 2 - Handling the missing values

# Impute missing values in numeric columns with the mean
X_train_imputed = X_train.fillna(X_train.mean())
X_test_imputed = X_test.fillna(X_test.mean())

print(X_train_imputed.isna().sum())
print(X_test_imputed.isna().sum())
# Iterate through train set for obect data types to impute with most frequent
for col in X_train_imputed.columns:
    if X_train_imputed[col].dtype == 'object':
        X_train_imputed = X_train_imputed.fillna(X_train_imputed[col].value_counts().index[0])
        X_test_imputed = X_test_imputed.fillna(X_train_imputed[col].value_counts().index[0])
        
# Check for missing values in train and test sets
print(X_train_imputed.isna().sum())
print(X_test_imputed.isna().sum())
# 3 - Preprocessing the data

# Encoding categorical features
X_train_cat_encoding = pd.get_dummies(X_train_imputed)
X_test_cat_encoding = pd.get_dummies(X_test_imputed)

# Reindexing test set with columns from train for uniformity in both sets
X_test_cat_encoding = X_test_cat_encoding.reindex(columns=X_train_cat_encoding.columns, fill_value=0)

print(X_train_cat_encoding.shape, X_test_cat_encoding.shape)
X_train_cat_encoding.iloc[:5, :10]
# 4 - Segregating features and labels and feature rescaling

# Instantiate MinMaxScaler with feature_range = (0,1)
scaler = MinMaxScaler(feature_range=(0,1))

# Rescaling train and test feature sets on numerical features
rescaledX_train = scaler.fit_transform(X_train_cat_encoding.values)
rescaledX_test = scaler.transform(X_test_cat_encoding.values)
# 5 - Training and evaluating a logistic regression model

# Instantiate LogisticRegression
logreg = LogisticRegression()
logreg.fit(rescaledX_train, y_train)
y_pred = logreg.predict(rescaledX_test)
print(confusion_matrix(y_test, y_pred))
print(logreg.score(rescaledX_test, y_test))
# 6 - Hyperparameter search and making the model perform better

# Hyperparameters to test
tol = [0.01, 0.001, 0.0001]
max_iter = [100, 150, 200]

# Create parameter grid dictionary
param_grid = {'tol': tol, 'max_iter': max_iter}
# Instantiate GridSearchCV()
grid_model = GridSearchCV(logreg, param_grid, cv=5)
grid_model_result = grid_model.fit(rescaledX_train, y_train)

# best_model, best_params, and best_score
best_model, best_params, best_score = grid_model_result.best_estimator_, grid_model_result.best_params_, grid_model_result.best_score_

print('Best model score: {}'.format(best_model.score(rescaledX_test, y_test)))