Skip to content

Insurance companies invest a lot of time and money into optimizing their pricing and accurately estimating the likelihood that customers will make a claim. In many countries insurance it is a legal requirement to have car insurance in order to drive a vehicle on public roads, so the market is very large!

Knowing all of this, On the Road car insurance have requested your services in building a model to predict whether a customer will make a claim on their insurance during the policy period. As they have very little expertise and infrastructure for deploying and monitoring machine learning models, they've asked you to identify the single feature that results in the best performing model, as measured by accuracy, so they can start with a simple model in production.

They have supplied you with their customer data as a csv file called car_insurance.csv, along with a table detailing the column names and descriptions below.

The dataset

ColumnDescription
idUnique client identifier
ageClient's age:
  • 0: 16-15
  • 1: 26-39
  • 2: 40-64
  • 3: 65+
genderClient's gender:
  • 0: Female
  • 1: Male
driving_experienceYears the client has been driving:
  • 0: 0-9
  • 1: 10-19
  • 2: 20-29
  • 3: 30+
educationClient's level of education:
  • 0: No education
  • 1: High school
  • 2: University
incomeClient's income level:
  • 0: Poverty
  • 1: Working class
  • 2: Middle class
  • 3: Upper class
credit_scoreClient's credit score (between zero and one)
vehicle_ownershipClient's vehicle ownership status:
  • 0: Does not own their vehilce (paying off finance)
  • 1: Owns their vehicle
vehcile_yearYear of vehicle registration:
  • 0: Before 2015
  • 1: 2015 or later
marriedClient's marital status:
  • 0: Not married
  • 1: Married
childrenClient's number of children
postal_codeClient's postal code
annual_mileageNumber of miles driven by the client each year
vehicle_typeType of car:
  • 0: Sedan
  • 1: Sports car
speeding_violationsTotal number of speeding violations received by the client
duisNumber of times the client has been caught driving under the influence of alcohol
past_accidentsTotal number of previous accidents the client has been involved in
outcomeWhether the client made a claim on their car insurance (response variable):
  • 0: No claim
  • 1: Made a claim
# Import required modules
import pandas as pd
import numpy as np
from statsmodels.formula.api import logit

# Start coding!
df = pd.read_csv('car_insurance.csv')



category_suggestions = []
educ = df['education'].unique()

for column in df.columns:
    if df[column].dtype == 'object':
        unique_values = df[column].unique()  
        num_unique_values = len(unique_values)

        # Check if the number of unique values is between 1 and 30
        if 1 <= num_unique_values <= 30:
            unique_string = ', '.join(unique_values)
            prompt = (f"The column '{column}' contains the following unique values: {unique_string}.")

  
            category_suggestions.append(prompt)
    
category_prompt = "Category:\n"+'\n'.join(category_suggestions) + "\nIf these unique values are correctly spelled and show no abnormalities, please skip this column. Otherwise, suggest corrections or improvements for any potential misspellings or abnormalities in the data."

# Display the final output

     
 

get_dtypes = dict(df.dtypes)
store_prompt = []

for column_name, data_type in get_dtypes.items():
    store_prompt.append(f"The column '{column_name}' has a data type of '{data_type}'.")


data_type_prompt = "Data Type:\n" + "\n".join(store_prompt) + \
                   "\n\nOverview: If all data types are appropriate based on the column names, no changes are needed. " \
                   "Please provide an overview of the columns and their respective data types. " \
                   "If there are any mismatches, kindly suggest corrections or fixes based on the column names and expected data types."




columns_with_nulls = df.columns[df.isnull().any()].tolist()
null_counts = df.isnull().sum()   
if not columns_with_nulls:
    columns_with_nulls = "No columns contain null values."

   
null_prompt = "Null Values:\n"+"\n".join([f"{column}: {null_counts[column]} null values" for column in columns_with_nulls])  +  "\n1. Suggest filling null values with appropriate substitutes (mean, median, mode, etc.).\n" + \
                  "2. Tell the user if null values are insignificant, think about dropping those rows or columns.\n" + \
                  "3. Document any assumptions or decisions made regarding null values."



    
            

numeric_columns = df.select_dtypes(include=['number']).columns
outliers = {}
outlier_data = []

for column in numeric_columns:
    Q1 = df[column].quantile(0.25)
    Q3 = df[column].quantile(0.75)
    IQR = Q3 - Q1
    lower_bound = Q1 - 1.5 * IQR
    upper_bound = Q3 + 1.5 * IQR
    lower_outliers = (df[column] < lower_bound).sum()
    upper_outliers = (df[column] > upper_bound).sum()
    outlier_count = ((df[column] < lower_bound) | (df[column] > 
upper_bound)).sum()
    if outlier_count > 0:
        outliers[column] = {
            "lower_outlier": lower_outliers,
            "upper_outlier": upper_outliers,
            "total": outlier_count
        }

for index, values in outliers.items():
    outlier_data.append(
        f"{index} has a total of {values['total']} outliers: "
        f"{values['lower_outlier']} lower outliers and {values['upper_outlier']} upper outliers."
    )

    
outlier_prompt = "Outliers:\n" +\
    "\n".join(outlier_data) + \
    "\nReview the total number of outliers in numerical columns. Tell user to check  data and add suggestions on how to address the outliers."




def get_outlier_objects(df):
    numeric_columns = df.select_dtypes(include=['number']).columns
    
    for columns in numeric_columns:
        min_value = df[columns].min()
        Q1 = df[columns].quantile(0.25)
        Q2 = df[columns].quantile(0.50)
        Q3 = df[columns].quantile(0.75)
        max_value = df[columns].max()





                



def get_data_type_prompt(df):
    get_dtypes = dict(df.dtypes)
    store_prompt = []

    # Loop through columns and data types
    for column_name, data_type in get_dtypes.items():
        store_prompt.append(f"The column '{column_name}' has a data type of '{data_type}'.")

   
    data_type_prompt = "Data Type:\n" + "\n".join(store_prompt) + \
                   "\n\nOverview: If all data types are appropriate based on the column names, no changes are needed. " \
                   "Please provide an overview of the columns and their respective data types. " \
                   "If there are any mismatches, kindly suggest corrections or fixes based on the column names and expected data types."

    return data_type_prompt

 
def get_categorical_prompt(df):
    category_suggestions = []
    for column in df.columns:
        if df[column].dtype == 'object' or df[column].dtype.name == 'category':
            unique_values = df[column].unique()  
            num_unique_values = len(unique_values)

            if 1 <= num_unique_values <= 30:
                unique_string = ', '.join(map(str,unique_values))
                prompt = (f"The column '{column}' contains the following unique values: {unique_string}. ")

                category_suggestions.append(prompt)
        
    category_prompt = "Category:\n"+'\n'.join(category_suggestions) + "\nIf these unique values are correctly spelled and show no abnormalities, please skip this column. Otherwise, suggest corrections or improvements for any potential misspellings or abnormalities in the data."
       
    return category_prompt


def get_null_prompt(df):
    columns_with_nulls = df.columns[df.isnull().any()].tolist()
    null_counts = df.isnull().sum()

    if not columns_with_nulls:
        null_prompt = "No columns contain null values."
    
    null_prompt = "Null Values:\n"+"\n".join([f"{column}: {null_counts[column]} null values" for column in columns_with_nulls])  +  "\n1. Suggest filling null values with appropriate substitutes (mean, median, mode, etc.).\n" + \
                  "2. Tell the user if null values are insignificant, think about dropping those rows or columns.\n" + \
                  "3. Document any assumptions or decisions made regarding null values."
    
    return null_prompt

def get_outliers_prompt(df):
    numeric_columns = df.select_dtypes(include=['number']).columns
    outliers = {}
    outlier_data = []
    
    for column in numeric_columns:
        Q1 = df[column].quantile(0.25)
        Q3 = df[column].quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR
        lower_outliers = (df[column] < lower_bound).sum()
        upper_outliers = (df[column] > upper_bound).sum()
        outlier_count = ((df[column] < lower_bound) | (df[column] > upper_bound)).sum()
        if outlier_count > 0:
           outliers[column] = {
              "lower_outlier": lower_outliers,
              "upper_outlier": upper_outliers,
              "total": outlier_count
        }
           
    for index, values in outliers.items():
       outlier_data.append(
          f"{index} has a total of {values['total']} outliers: "
          f"{values['lower_outlier']} lower outliers and {values['upper_outlier']} upper outliers."
    )
       
    outlier_prompt = "Outliers:\n" +\
    "\n".join(outlier_data) + \
    "\nReview the total number of outliers in numerical columns. Tell user to check  data and add suggestions on how to address the outliers."
    
    return outlier_prompt


data_type_prompt = get_data_type_prompt(df)
categorical_prompt = get_categorical_prompt(df)
null_prompt = get_null_prompt(df)
outlier_prompt = get_outliers_prompt(df)

final_prompt = (
    data_type_prompt + "\n\n"+ 
    category_prompt + "\n\n" +  
    null_prompt + "\n\n" +    
    outlier_prompt
)



f" Assess the information provide and follow the prompt carefully. Generate a Data Here is the provided prompt {prompt} Also dont generate tables only text"

f"Summarize Data Types: For each column, provide the data type and indicate if there are any mismatches. Suggest appropriate corrections based on the column names.\n" \
    "Categorical Value Review: Analyze the unique values in categorical columns and suggest any necessary corrections or improvements for misspellings or abnormalities.\n" \
    "Null Value Analysis: List columns with null values and provide recommendations for handling these values, including suggestions for filling or removing them.\n" \
    "Outlier Evaluation: Review the outlier data, detailing the counts of lower and upper outliers for each column. Provide suggestions on how to address these outliers, including verifying data accuracy and deciding whether to retain, transform, or remove them.\n" \
    f"Here is the prompt: {prompt}. Dont add Tables"
def get_outlier_objects(df):
    numeric_columns = df.select_dtypes(include=['number']).columns
    labels = []
    numeric_data = []
    
    for columns in numeric_columns:
        
        min_value = df[columns].min()
        Q1 = df[columns].quantile(0.25)
        Q2 = df[columns].quantile(0.50)
        Q3 = df[columns].quantile(0.75)
        max_value = df[columns].max()
        get_data = {"min":min_value,"q1":Q1,"median":Q2,"q3":Q3,"max":max_value}
        labels.append(columns)
        numeric_data.append(get_data)
        
    return labels, numeric_data

my_data = get_outlier_objects(df)
print(my_data)