Skip to content

Commercial banks receive a lot of applications for credit cards. Many of them get rejected for many reasons, like high loan balances, low income levels, or too many inquiries on an individual's credit report, for example. Manually analyzing these applications is mundane, error-prone, and time-consuming (and time is money!). Luckily, this task can be automated with the power of machine learning and pretty much every commercial bank does so nowadays. In this workbook, you will build an automatic credit card approval predictor using machine learning techniques, just like real banks do.

The Data

The data is a small subset of the Credit Card Approval dataset from the UCI Machine Learning Repository showing the credit card applications a bank receives. This dataset has been loaded as a pandas DataFrame called cc_apps. The last column in the dataset is the target value.

# Import necessary libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV

# Load the dataset
cc_apps = pd.read_csv("cc_approvals.data", header=None) 
cc_apps.head()
# Adding column names to the DataFrame
column_names = [
    'Gender', 'Age', 'Debt', 'Marital_Status', 'Employment',
    'Residence_Type', 'Years_Employed', 'Credit_Amount', 'Prior_Default',
    'Bankruptcies', 'Credit_Score', 'Approval_Risk', 'Income', 'Approval_Status'
]

# Reassign column names
cc_apps.columns = column_names

# Display the first few rows to confirm
cc_apps.head()
df1 = cc_apps['Gender'].unique().tolist()
df1
# Replace all '?' values in the DataFrame with NaN
cc_apps.replace('?', np.nan, inplace=True)

# Verify the replacement by checking for missing values
print(cc_apps.isna().sum())
from sklearn.impute import SimpleImputer

# Identify numerical and categorical columns
numerical_columns = ['Age']
categorical_columns = ['Gender', 'Marital_Status', 'Employment', 'Residence_Type', 'Years_Employed']

# Initialize imputers
imputer_num = SimpleImputer(strategy='mean')  # For numerical columns
imputer_cat = SimpleImputer(strategy='most_frequent')  # For categorical columns

# Apply imputers to handle missing values
cc_apps[numerical_columns] = imputer_num.fit_transform(cc_apps[numerical_columns])
cc_apps[categorical_columns] = imputer_cat.fit_transform(cc_apps[categorical_columns])

# Verify that there are no more missing values
print(cc_apps.isna().sum())

# Separate features (X) and target (y)
X = cc_apps.iloc[:, :-1]
y = cc_apps.iloc[:, -1]

from sklearn.preprocessing import LabelEncoder

# Initialize LabelEncoder
le = LabelEncoder()

# Encode the target column
y = le.fit_transform(y)

# Identify categorical columns
categorical_columns = ['Gender', 'Marital_Status', 'Employment', 
                       'Residence_Type', 'Years_Employed', 
                       'Prior_Default', 'Bankruptcies', 'Approval_Risk']

# Apply Label Encoding to the categorical features in X
for col in categorical_columns:
    X[col] = le.fit_transform(X[col])

# Check X to confirm encoding
X.head()
# Identify numerical columns
numerical_columns = ['Age', 'Debt', 'Credit_Amount', 'Credit_Score', 'Income']

# Initialize StandardScaler
scaler = StandardScaler()

# Apply scaling to numerical columns in X
X[numerical_columns] = scaler.fit_transform(X[numerical_columns])

# Check X to confirm scaling
X.head()
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Confirm the split
print("Training features shape:", X_train.shape)
print("Testing features shape:", X_test.shape)
print("Training target shape:", y_train.shape)
print("Testing target shape:", y_test.shape)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score

# Initialize the Logistic Regression model
log_reg = LogisticRegression(max_iter=1000)

# Define the hyperparameter grid
param_grid = {
    'C': [0.1, 1, 10, 100],       # Regularization strength
    'solver': ['liblinear', 'lbfgs']  # Solvers for optimization
}

# Perform Grid Search with Cross-Validation
grid_search = GridSearchCV(log_reg, param_grid, cv=5, scoring='accuracy')
grid_search.fit(X_train, y_train)

# Get the best model and parameters
best_model = grid_search.best_estimator_

# Evaluate the model on the test set
y_pred = best_model.predict(X_test)

# Save the test accuracy to 'best_score'
best_score = accuracy_score(y_test, y_pred)

# Print results
print("Best Hyperparameters:", grid_search.best_params_)
print(f"Best Score (Accuracy): {best_score:.4f}")