Skip to content
kaggle competition
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import accuracy_score, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import xgboost as xgb
# Load the training set
train_data = pd.read_csv('train.csv')
# Load the test set
test_data = pd.read_csv('test.csv')
# Load the supplemental metadata
metadata = pd.read_csv('greeks.csv')
# Merge metadata with the training set
train_data = train_data.merge(metadata, on='Id', how='left')
# Remove date-related data
train_data1 = train_data.select_dtypes(exclude='object')
test_data1 = test_data.select_dtypes(exclude='object')
# Handle missing values
train_data1.fillna(0, inplace=True)
test_data1.fillna(0, inplace=True)
# Separate features and target variable in the training set
X_train = train_data1.drop(['Class'], axis=1)
y_train = train_data1['Class']
# Label encoding for non-numerical values
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
# Split the training set into train and validation sets
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
# Standardize the features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_val_scaled = scaler.transform(X_val)
X_test_scaled = scaler.transform(test_data1)
# Logistic Regression
logreg_model = LogisticRegression()
logreg_model.fit(X_train_scaled, y_train)
y_val_pred_logreg = logreg_model.predict(X_val_scaled)
accuracy_logreg = accuracy_score(y_val, y_val_pred_logreg)
print("Logistic Regression Accuracy:", accuracy_logreg)
print(classification_report(y_val, y_val_pred_logreg))
# Decision Trees
dt_model = DecisionTreeClassifier()
dt_model.fit(X_train_scaled, y_train)
y_val_pred_dt = dt_model.predict(X_val_scaled)
accuracy_dt = accuracy_score(y_val, y_val_pred_dt)
print("Decision Tree Accuracy:", accuracy_dt)
print(classification_report(y_val, y_val_pred_dt))
# Random Forests
rf_model = RandomForestClassifier()
rf_model.fit(X_train_scaled, y_train)
y_val_pred_rf = rf_model.predict(X_val_scaled)
accuracy_rf = accuracy_score(y_val, y_val_pred_rf)
print("Random Forest Accuracy:", accuracy_rf)
print(classification_report(y_val, y_val_pred_rf))
# Support Vector Machines (SVM)
svm_model = SVC()
svm_model.fit(X_train_scaled, y_train)
y_val_pred_svm = svm_model.predict(X_val_scaled)
accuracy_svm = accuracy_score(y_val, y_val_pred_svm)
print("SVM Accuracy:", accuracy_svm)
print(classification_report(y_val, y_val_pred_svm))
# XGBoost
xgb_model = xgb.XGBClassifier()
xgb_model.fit(X_train_scaled, y_train)
y_val_pred_xgb = xgb_model.predict(X_val_scaled)
accuracy_xgb = accuracy_score(y_val, y_val_pred_xgb)
print("XGBoost Accuracy:", accuracy_xgb)
print(classification_report(y_val, y_val_pred_xgb))
# Make predictions on the test set
y_test_pred = xgb_model.predict_proba(X_test_scaled)[:, 1] # Probability of class 1
# Create submission dataframe
submission = pd.DataFrame({'Probability': y_test_pred})
# Retrieve the 'Id' column from the test data
submission['Id'] = test_data['Id']
# Split the column into two based on the values
Class_0 = train_data[train_data['Class'] == 0]['Class'].head(5)
Class_1 = train_data[train_data['Class'] == 1]['Class'].head(5)
# Create submission dataframe
submission = pd.DataFrame({'Id': test_data['Id'], 'Class_0': Class_0, 'Class_1': Class_1})
# Fill missing values in Class_0 and Class_1 with 0
#submission['Class_0'].fillna(0, inplace=True)
#submission['Class_1'].fillna(0, inplace=True)
submission.to_csv('submission.csv', index=False)