Skip to content

1 hidden cell
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
from time import time
from torchvision import datasets, transforms
from torch import nn, optim
normalized = transforms.Normalize((0.5,), (0.5,))
tensor = transforms.ToTensor()
transformation = transforms.Compose([tensor, normalized])
import os

# Define the directory to store the MNIST dataset
data_dir = './data'

# Create the directory if it does not exist
os.makedirs(data_dir, exist_ok=True)

# Load the MNIST dataset
training_dataset = datasets.MNIST(data_dir, download=True, train=True, transform=transformation)
testing_dataset = datasets.MNIST(data_dir, download=True, train=False, transform=transformation)

train_data = torch.utils.data.DataLoader(training_dataset, batch_size=64, shuffle=True)
test_data = torch.utils.data.DataLoader(testing_dataset, batch_size=64, shuffle=True)
Hidden output
images, labels = next(iter(train_data))
for i in range(30):
    plt.subplot(3, 10, i+1)
    plt.subplots_adjust(wspace=0.3)
    plt.imshow(images[i].numpy().squeeze())
input_layer = 784
hidden_layer1 = 64
hidden_layer2 = 32
output_layer = 10
model = nn.Sequential(nn.Linear(input_layer, hidden_layer1),
nn.ReLU(),
nn.Linear(hidden_layer1,hidden_layer2),
nn.ReLU(),
nn.Linear(hidden_layer2, output_layer))
images = images.view(images.shape[0], -1)
outputs = model(images)
lossFunction = nn.CrossEntropyLoss()
loss = lossFunction(outputs, labels)
gradient_descent = optim.SGD(model.parameters(), lr=0.1)

epochs = 10
for epoch in range(epochs):
    running_loss = 0.0
    for images, labels in train_data:
        images = images.view(images.shape[0], -1)
        # Feed-Forward
        gradient_descent.zero_grad()
        loss = lossFunction(model(images), labels)
        # Back Propagation
        loss.backward()
        # Optimize the weights
        gradient_descent.step()
        running_loss += loss.item() * images.size(0)
    epoch_loss = running_loss / len(labels)
    print("Iteration : ", epoch+1, end = "\t")
    print("Loss: ", epoch_loss)
def get_predicted_label(image):
    image = image.view(1, 28*28)
    with torch.no_grad():
        prediction_score = model(image)
    return np.argmax(prediction_score)


images, labels = next(iter(test_data))
print("Predicted label: ", get_predicted_label(images[0]))
print("Actual label: ", labels.numpy()[0])
totalCount = 0
accurateCount = 0
wrong = 0
for images, labels in test_data:
    for i in range(len(labels)):
        predictedLabel = get_predicted_label(images[i])
        actualLabel = labels.numpy()[i]
        # print("Predicted Label: ", predictedLabel, " / Actual Label: ", actualLabel)
        if(predictedLabel == actualLabel):
            accurateCount += 1
            if accurateCount == 1:
                plt.imshow(images[i].squeeze(), cmap='gray')
                plt.title(f"Sample for RIGHT \n Predicted: {predictedLabel}, Actual: {actualLabel}")
                plt.axis('off')
                plt.show()
        else:
            wrong += 1
            if wrong == 1:
                plt.imshow(images[i].squeeze(), cmap='gray')
                plt.title(f"Sample for WRONG \n Predicted: {predictedLabel}, Actual: {actualLabel}")
                plt.axis('off')
                plt.show()
    totalCount += len(labels)
print("Total images tested: : ", totalCount)
print("Accurate predictions: ", accurateCount)
print("Accuracy percentage: ", ((accurateCount/totalCount)*100), "%")