Personal loans are a lucrative revenue stream for banks. The typical interest rate of a two-year loan in the United Kingdom is around 10%. This might not sound like a lot, but in September 2022 alone UK consumers borrowed around £1.5 billion, which would mean approximately £300 million in interest generated by banks over two years!
You have been asked to work with a bank to clean the data they collected as part of a recent marketing campaign, which aimed to get customers to take out a personal loan. They plan to conduct more marketing campaigns going forward so would like you to ensure it conforms to the specific structure and data types that they specify so that they can then use the cleaned data you provide to set up a PostgreSQL database, which will store this campaign's data and allow data from future campaigns to be easily imported.
They have supplied you with a csv file called "bank_marketing.csv", which you will need to clean, reformat, and split the data, saving three final csv files. Specifically, the three files should have the names and contents as outlined below:
client.csv
client.csv| column | data type | description | cleaning requirements |
|---|---|---|---|
client_id | integer | Client ID | N/A |
age | integer | Client's age in years | N/A |
job | object | Client's type of job | Change "." to "_" |
marital | object | Client's marital status | N/A |
education | object | Client's level of education | Change "." to "_" and "unknown" to np.NaN |
credit_default | bool | Whether the client's credit is in default | Convert to boolean data type:1 if "yes", otherwise 0 |
mortgage | bool | Whether the client has an existing mortgage (housing loan) | Convert to boolean data type:1 if "yes", otherwise 0 |
campaign.csv
campaign.csv| column | data type | description | cleaning requirements |
|---|---|---|---|
client_id | integer | Client ID | N/A |
number_contacts | integer | Number of contact attempts to the client in the current campaign | N/A |
contact_duration | integer | Last contact duration in seconds | N/A |
previous_campaign_contacts | integer | Number of contact attempts to the client in the previous campaign | N/A |
previous_outcome | bool | Outcome of the previous campaign | Convert to boolean data type:1 if "success", otherwise 0. |
campaign_outcome | bool | Outcome of the current campaign | Convert to boolean data type:1 if "yes", otherwise 0. |
last_contact_date | datetime | Last date the client was contacted | Create from a combination of day, month, and a newly created year column (which should have a value of 2022); Format = "YYYY-MM-DD" |
economics.csv
economics.csv| column | data type | description | cleaning requirements |
|---|---|---|---|
client_id | integer | Client ID | N/A |
cons_price_idx | float | Consumer price index (monthly indicator) | N/A |
euribor_three_months | float | Euro Interbank Offered Rate (euribor) three-month rate (daily indicator) | N/A |
import datetime
import logging
import numpy as np
import os
import pandas as pd
from pandas.api.types import is_datetime64_ns_dtype
EXPECTED_INPUT_COLS = [
'client_id', 'age', 'job', 'marital',
'education', 'credit_default', 'mortgage',
'month', 'day', 'contact_duration',
'number_contacts','previous_campaign_contacts',
'previous_outcome', 'cons_price_idx',
'euribor_three_months', 'campaign_outcome'
]
FINAL_DATA_TYPES = {
'client_id': int,
'number_contacts':int,
'contact_duration': int,
'previous_campaign_contacts': int,
'previous_outcome': bool,
'campaign_outcome': bool,
# Pandas doesn't like this dt stuff. Alt method used in validate_data()
# 'last_contact_date':datetime.date,
'cons_price_idx':float,
'euribor_three_months':float,
'age':int,
'job':pd.StringDtype,
'marital':pd.StringDtype,
'education':pd.StringDtype,
'credit_default':bool,
'mortgage':bool
}
DATETIME_COLS = ['last_contact_date']
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
Define ETL Functions
def extract(file_path : str) -> pd.DataFrame:
'''
Reads in data from source.
'''
try:
df = pd.read_csv(file_path)
logging.info(f"Extracted file from {file_path}")
return df
except FileNotFoundError as fnfe:
logging.error(f"ERROR: File was not found as {file_path}.")
return
except Exception as e:
logging.error(f"ERROR: Unexpected error occured: {e}")
return
def transform(df : pd.DataFrame) -> pd.DataFrame:
'''
Cleans dataframe columns based on requirements.
'''
# Checkpoint: Expected input?
assert set(EXPECTED_INPUT_COLS).issubset(df.columns), f"Missing columns: {set(EXPECTED_INPUT_COLS)-set(df.columns)}"
bool_cols = ['credit_default', 'mortgage', 'campaign_outcome','previous_outcome']
df['job'] = df['job'].str.replace('.', "_", regex=False)
df['education'] = df['education'].str.replace('.', "_", regex=False).replace('unknown', np.NaN)
logging.info('String column(s) transformed.')
for col in bool_cols:
df[col] = np.where(df[col].isin(["yes",'success']), 1, 0)
df[col] = df[col].astype(bool)
logging.info('Bool column(s) transformed.')
# Checkpoint: Boolean conversion
assert df[bool_cols].dtypes.eq(bool).all(), "bool conversion failed."
df['last_contact_date'] = '2022-'+df['month'].astype(str)+'-'+df['day'].astype(str)
df['last_contact_date'] = pd.to_datetime(df['last_contact_date'])
logging.info('DateTime column(s) transformed.')
# Checkpoint: DateTime conversion
if df['last_contact_date'].isna().any():
logging.warning("Some DateTime entries in 'last_contact_date' could not be parsed.")
assert not df.drop('education',axis=1).isna().any().any(), 'Nulls found at end of transformation'
logging.info("Transform complete. No nulls found (column 'education' was not checked).")
return df
def load(dir:str, df : pd.DataFrame) -> pd.DataFrame:
'''
Subset the transformed DataFrame and save to .csv.
'''
campaign_cols = [
'client_id', 'number_contacts',
'contact_duration', 'previous_campaign_contacts',
'previous_outcome', 'campaign_outcome',
'last_contact_date'
]
economic_cols = [
'client_id', 'cons_price_idx', 'euribor_three_months']
client_cols = [
'client_id', 'age', 'job',
'marital', 'education',
'credit_default', 'mortgage'
]
pth = os.path.join(dir, 'campaign.csv')
validate_data(df[campaign_cols])
df[campaign_cols].to_csv(pth, sep=',', index=False)
logging.info('Campaign data sucessfully loaded to .csv file.')
validate_data(df[economic_cols])
pth = os.path.join(dir, 'economics.csv')
df[economic_cols].to_csv(pth, sep=',', index=False)
logging.info('Economics data sucessfully loaded to .csv file.')
validate_data(df[client_cols])
pth = os.path.join(dir, 'client.csv')
df[client_cols].to_csv(pth, sep=',', index=False)
logging.info('Client data sucessfully loaded to .csv file.')
return
def validate_data(df:pd.DataFrame)->None:
keys = FINAL_DATA_TYPES.keys()
for col in df.columns:
if is_datetime64_ns_dtype(df[col].dtype):
logging.info(f"{col} datetime column is datetime64_ns_dtype.")
else:
assert col in keys, f"Unexpected column {col}"
assert df[col].dtype == FINAL_DATA_TYPES[col], f"{col} is of type {df[col].dtype}, not type {FINAL_DATA_TYPES[col]}"
logging.info('VALIDATION: All columns are expected, and of the expected dtype.')
return
def pipeline(load_dir:str, extract_path:str)->None:
'''
Extracts data from a .csv file at 'extract_path', transforms
it per requirements, splitting it in three separate tables,
which are then saved at the 'load_dir' directory.
'''
raw_data = extract(os.path.join(extract_path))
if raw_data is None:
logging.error("Extraction failed. Exiting pipeline.")
return
cleaned_data = transform( raw_data)
load(load_dir, cleaned_data)
return
if __name__ =="__main__":
output_dir = ''
file_name = 'bank_marketing.csv'
pipeline(output_dir, file_name)
Double Check Data Types of Loaded Data
Manual check of pipeline validity.
client_df = pd.read_csv('client.csv',sep=',')
client_df.dtypescampaign_df = pd.read_csv('campaign.csv',sep=',')
campaign_df.dtypesecon_df = pd.read_csv('economics.csv', sep=',')
econ_df.dtypes