Shallow Neural Network for Classification

In [1]:
import gzip
import numpy as np
import math    
import matplotlib.pyplot as plt
import pandas as pd

We define a Neural Network class that implements a shallow neural network

In [2]:
class NeuralNetwork:
    
    def __init__(self,n_x,n_h,n_y,hidden_activation,learning_rate,num_iterations):
        self.n_x = n_x
        self.n_h = n_h
        self.n_y = n_y
        self.hidden_activation = hidden_activation
        self.learning_rate = learning_rate
        self.num_iterations = num_iterations
        self.initialize_parameters()
        
    # Weight Initialization
    def initialize_parameters(self):
        """
        Argument:
        n_x -- size of the input layer
        n_h -- size of the hidden layer
        n_y -- size of the output layer

        Returns:
        params -- python dictionary containing your parameters:
                        W1 -- weight matrix of shape (n_h, n_x)
                        b1 -- bias vector of shape (n_h, 1)
                        W2 -- weight matrix of shape (n_y, n_h)
                        b2 -- bias vector of shape (n_y, 1)
        """   

        W1 = np.random.randn(self.n_h,self.n_x)*0.01
        b1 = np.zeros((self.n_h,1))
        W2 = np.random.randn(self.n_y,self.n_h)*0.01
        b2 = np.zeros((self.n_y,1))

        self.parameters = {"W1": W1,
                      "b1": b1,
                      "W2": W2,
                      "b2": b2}
        
    # Define activation functions
    
    def sigmoid(self,z):
        return 1/(1+np.exp(-z))
    
    def relu(self,z):
        return np.maximum(0,z)
    
    def softmax(self,z):
        e = np.exp(z)
        e_sum = np.sum(e,axis=0)    
        return e/e_sum
    
    def swish(self,z):
        return z/(1+np.exp(-z))
        
    
    # Define derivatives
    
    def sigmoid_derivative(self,z):
        return self.sigmoid(z) * (1 - self.sigmoid(z))
    
    def relu_derivative(self,z):
        z[z<=0] = 0
        z[z>0] = 1    
        return z
    
    def swish_derivative(self,z):        
        return self.swish(z)+self.sigmoid(z) * (1 - self.swish(z))
        
    
    # Define loss
    
    def cross_entropy(self,predictions, targets, epsilon = 1e-12):
        """
        Computes cross entropy between targets (encoded as one-hot vectors)
        and predictions. 

        Arguments:
        predictions -- A numpy array of size (k, N)  
        targets     -- A numpy array of size (k, N) 
        N: number of examples
        k: number of classes

        Return: cross entropy loss

        """    
        predictions = predictions.T
        targets = targets.T

        predictions = np.clip(predictions, epsilon, 1. - epsilon) 
        #N = predictions.shape[0]
        ce = -np.sum(targets*np.log(predictions+1e-9))

        return ce
    
    
    # forward and backward pass
    
    def forward_propagation(self,X):
        #X = self.inputs
        #parameters = self.parameters
        """
        Argument:
        X -- input data of size (n_x, m)
        parameters -- python dictionary containing our parameters (output of initialization function)

        Returns:
        A2 -- The sigmoid output of the second activation
        neuron_values -- a dictionary containing "Z1", "A1", "Z2" and "A2"
        """
        # Retrieve each parameter from the dictionary "parameters"
        W1 = self.parameters["W1"]
        b1 = self.parameters["b1"]
        W2 = self.parameters["W2"]
        b2 = self.parameters["b2"]

        # Implement Forward Propagation to calculate A2 (probabilities)
        Z1 = np.dot(W1,X)+b1
        if self.hidden_activation=='sigmoid':
            A1 = self.sigmoid(Z1)
        elif self.hidden_activation=='swish':
            A1 = self.swish(Z1)
        else:
            A1 = self.relu(Z1)
            
        Z2 = np.dot(W2,A1)+b2
        A2 = self.softmax(Z2)


        neuron_values = {"Z1": Z1,
                 "A1": A1,
                 "Z2": Z2,
                 "A2": A2}

        return A2, neuron_values
    
    # backward propagation
    
    def backward_propagation(self, neuron_values, X, Y):
        """

        Arguments:
        parameters -- python dictionary containing our parameters 
        neuron_values -- a dictionary containing "Z1", "A1", "Z2" and "A2".
        X -- input data of shape (784, number of examples)
        Y -- one-hot vector of shape (10, number of examples)

        Returns:
        grads -- python dictionary containing your gradients with respect to different parameters

        """
        m = X.shape[1]

        # First, retrieve W1 and W2 from the dictionary "parameters".
        W1 = self.parameters["W1"]
        W2 = self.parameters["W2"]

        # Retrieve also A1 and A2 from dictionary "neuron_values".
        A1 = neuron_values["A1"]
        A2 = neuron_values["A2"]


        # Backward propagation: calculate dW1, db1, dW2, db2. 
        dZ2 = A2-Y
        dW2 = (1/m)*np.dot(dZ2,A1.T)
        db2 = (1/m)*np.sum(dZ2,axis=1,keepdims=True)
        
        if self.hidden_activation=='sigmoid':
            dZ1 = np.multiply(np.dot(W2.T,dZ2),(self.sigmoid_derivative(A1)))
        elif self.hidden_activation=='swish':
            dZ1 = np.multiply(np.dot(W2.T,dZ2),(self.swish_derivative(A1)))
        else:
            dZ1 = np.multiply(np.dot(W2.T,dZ2),(self.relu_derivative(A1)))   

        
        dW1 = (1/m)*np.dot(dZ1,X.T)
        db1 = (1/m)*np.sum(dZ1,axis=1,keepdims=True)

        grads = {"dW1": dW1,
                 "db1": db1,
                 "dW2": dW2,
                 "db2": db2}

        return grads
    
    # Update parameters
    def update_parameters(self, grads):
        """
        Updates parameters using the gradient descent update rule

        Arguments:
        parameters -- python dictionary containing our parameters 
        grads -- python dictionary containing our gradients 

        Returns:
        parameters -- python dictionary containing our updated parameters 
        """
        # Retrieve each parameter from the dictionary "parameters"

        W1 = self.parameters["W1"]
        b1 = self.parameters["b1"]
        W2 = self.parameters["W2"]
        b2 = self.parameters["b2"]


        # Retrieve each gradient from the dictionary "grads"

        dW1 = grads["dW1"]
        db1 = grads["db1"]
        dW2 = grads["dW2"]
        db2 = grads["db2"]


        # Update rule for each parameter

        W1 = W1 - self.learning_rate*dW1
        b1 = b1 - self.learning_rate*db1
        W2 = W2 - self.learning_rate*dW2
        b2 = b2 - self.learning_rate*db2

        self.parameters = {"W1": W1,
                      "b1": b1,
                      "W2": W2,
                      "b2": b2}
        
        
    # dividing into minibatches
    def random_mini_batches(self,X, Y, mini_batch_size = 64):
        """
        Creates a list of random minibatches from (X, Y)

        Arguments:
        X -- input data, of shape (input size, number of examples)
        Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (10, number of examples)
        mini_batch_size -- size of the mini-batches, integer

        Returns:
        mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
        """

        m = X.shape[1]                  # number of training examples
        mini_batches = []

        # Step 1: Shuffle (X, Y)
        permutation = list(np.random.permutation(m))
        shuffled_X = X[:, permutation]
        shuffled_Y = Y[:, permutation].reshape((self.n_y,m))

        # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
        num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
        for k in range(0, num_complete_minibatches):

            mini_batch_X = shuffled_X[:, k*mini_batch_size : (k+1)*mini_batch_size]
            mini_batch_Y = shuffled_Y[:, k*mini_batch_size : (k+1)*mini_batch_size]

            mini_batch = (mini_batch_X, mini_batch_Y)
            mini_batches.append(mini_batch)

        # Handling the end case (last mini-batch < mini_batch_size)
        if m % mini_batch_size != 0:

            mini_batch_X = shuffled_X[:, num_complete_minibatches*mini_batch_size : ]
            mini_batch_Y = shuffled_Y[:, num_complete_minibatches*mini_batch_size : ]

            mini_batch = (mini_batch_X, mini_batch_Y)
            mini_batches.append(mini_batch)

        return mini_batches
        
        
    # Fitting the model
    def nn_model(self,X, Y,mini_batch_size = 64, print_cost=False):
        """
        Arguments:
        X -- dataset of shape (784, number of examples)
        Y -- labels of shape (10, number of examples)
        n_h -- size of the hidden layer
        num_iterations -- Number of iterations in gradient descent loop
        print_cost -- if True, print the cost every 1000 iterations

        Returns:
        parameters -- parameters learnt by the model. They can then be used to predict.
        """
        n_x = self.n_x
        n_y = self.n_y
        num_iterations = self.num_iterations
        costs = []                       # to keep track of the cost
        m = X.shape[1]                   # number of training examples
        train_accuracy = []
        
       

        for i in range(num_iterations):
            # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
            minibatches = self.random_mini_batches(X, Y, mini_batch_size)
            cost_total = 0

            for minibatch in minibatches:

                # Select a minibatch
                (minibatch_X, minibatch_Y) = minibatch

                # Forward propagation
                predictions,neuron_values  = self.forward_propagation(minibatch_X)

                # Compute cost and add to the cost total
                cost_total += self.cross_entropy(predictions,minibatch_Y)

                # Backward propagation
                grads = self.backward_propagation(neuron_values, minibatch_X, minibatch_Y)

                # Update parameters
                self.update_parameters(grads)


            cost_avg = cost_total / m
            # Print the cost every 1000 epoch
            if num_iterations<200:
                if print_cost and i % 10 == 0:
                    print ("Cost after epoch %i: %f" %(i, cost_avg))
                if print_cost and i % 1 == 0:
                    costs.append(cost_avg)
                    train_accuracy.append(self.Accuracy(X,Y))
            else:
                if print_cost and i % 500 == 0:
                    print ("Cost after epoch %i: %f" %(i, cost_avg))
                if print_cost and i % 50 == 0:
                    costs.append(cost_avg)
                    train_accuracy.append(self.Accuracy(X,Y))
                
        return costs,train_accuracy
    
    # prediction
    
    def Accuracy(self,X,Y):
        predictions,neuron_values  = self.forward_propagation(X)
        predicted_class = np.argmax(predictions,axis=0)
        target_class = np.argmax(Y,axis=0)
        accuracy = (np.mean(predicted_class == target_class)*100)

        return accuracy  
    
    # plot loss Vs Epoch
    
    def loss_v_epoch(self,costs):
        # plot the cost
        plt.plot(costs)
        plt.ylabel('cost')
        if self.num_iterations<200:
            plt.xlabel('epochs (per 10)')
            plt.title('Training loss vs Epoch (MNIST)')
        else:
            plt.xlabel('epochs (per 500)')
            plt.title('Training loss vs Epoch (Madison County)')
        plt.grid()
        plt.show()
        
    
    # plot accuracy vs epoch
    
    def accuracy_v_epoch(self,train_accuracy):
        # plot the cost
        plt.plot(train_accuracy)
        plt.ylabel('accuracy')
        if self.num_iterations<200:
            plt.xlabel('epochs (per 10)')
            plt.title('Accuracy vs Epoch (MNIST)')
        else:
            plt.xlabel('epochs (per 500)')
            plt.title('Accuracy vs Epoch (Madison County)')
        plt.grid()
        plt.show()
        

1. MNIST Dataset

  • Read the images
  • Flatten the 2-d images
  • One-hot code the labels
  • Perform min-max normalization on images
In [3]:
def preprocess(filename_images, filename_labels,image_size,num_images):
    
    # read images
    f = gzip.open(filename_images,'r')
    f.read(16)
    buf = f.read(image_size * image_size * num_images)
    data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
    x = data.reshape(num_images, image_size, image_size, 1)
    
    # flatten the images 
    X = x.flatten().reshape(num_images,image_size*image_size).T

    # read labels
    f = gzip.open(filename_labels,'r')
    f.read(8)
    y = np.empty([num_images,1],dtype=int)

    for i in range(num_images):   
        buf = f.read(1)
        labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
        y[i,:] = labels
    
    # one hot encoding of y's
    digits = 10
    examples = y.shape[0]
    y = y.reshape(1, examples)
    Y = np.eye(digits)[y.astype('int32')]
    Y = Y.T.reshape(digits, examples)
    
    # Min-Max normalization
    X /= 255
    
    return X,Y

The following code takes in the zipped files downloaded from http://yann.lecun.com/exdb/mnist/

In [4]:
# Read Training Images
filename_images = 'G:/My Drive/CS637-Deep Learning/HW1/MNIST/train-images-idx3-ubyte.gz'
filename_labels = 'G:/My Drive/CS637-Deep Learning/HW1/MNIST/train-labels-idx1-ubyte.gz'
X,Y = preprocess(filename_images,filename_labels, image_size = 28, num_images = 60000)

# Read Testing Images
filename_images = 'G:/My Drive/CS637-Deep Learning/HW1/MNIST/t10k-images-idx3-ubyte.gz'
filename_labels = 'G:/My Drive/CS637-Deep Learning/HW1/MNIST/t10k-labels-idx1-ubyte.gz'
X_test,Y_test = preprocess(filename_images,filename_labels, image_size = 28, num_images = 10000)

$X_{f\times m}$ and $Y_{c\times m}$ is the training set

where,
f : no. of features
m : no. on examples
c : no. of classes

Now, let's use the class to train the model for MNIST dataset

In [5]:
if __name__ == "__main__":
    f = X.shape[0]
    m = X.shape[1]
    c = Y.shape[0]
    
    network = NeuralNetwork(n_x = f, n_h = 100, n_y = c, hidden_activation = 'swish', learning_rate = 0.1,num_iterations=80)
    
    costs,train_accuracy = network.nn_model(X, Y ,mini_batch_size = 128, print_cost=True)
    # plotting loss Vs epoch
    network.loss_v_epoch(costs)
    network.accuracy_v_epoch(train_accuracy)
    
    # check Accuracies
    print ('training accuracy: %.2f' % network.Accuracy(X,Y))
    print ('testing accuracy: %.2f' % network.Accuracy(X_test,Y_test))

# Gives highest accuracy with- n_h = 100, hidden_activation = 'swish', learning_rate = 0.1, num_iterations = 80
Cost after epoch 0: 0.941367
Cost after epoch 10: 0.165392
Cost after epoch 20: 0.100283
Cost after epoch 30: 0.071334
Cost after epoch 40: 0.054441
Cost after epoch 50: 0.042844
Cost after epoch 60: 0.034031
Cost after epoch 70: 0.027636
training accuracy: 99.57
testing accuracy: 97.37

2. Madison County Dataset

  • Read the data from xls files
  • Put negative and positive examples together and shuffle
  • One-hot code the labels
  • Take of the missing values
  • Perform Z normalization
  • Divide data into training and testing data
In [6]:
def preprocess(Madison_Irrigated,Madison_Rainfed,train_percent):
    X_pos = ((pd.read_excel(Madison_Irrigated)).values).T
    X_neg = ((pd.read_excel(Madison_Rainfed)).values).T

    Y_pos = np.ones((1,X_pos.shape[1]))
    Y_neg = np.zeros((1,X_neg.shape[1]))

    X_tot = np.hstack((X_pos,X_neg))
    Y_tot = np.hstack((Y_pos,Y_neg))

    Dataset = np.vstack((X_tot,Y_tot))

    # shuffling positive and negative points
    np.random.shuffle(np.transpose(Dataset))

    Y_tot = Dataset[-1,:].reshape((1,X_tot.shape[1])) # last row has labels

    # one-hot encoding of y's
    classes = 2
    Y_tot = np.eye(classes)[Y_tot.astype('int32')]
    Y_tot = Y_tot.T.reshape(classes, X_tot.shape[1])


    X_tot = np.delete(Dataset,-1,axis=0) # all rows except last has data

    # make all nan values to 0
    X_tot[np.isnan(X_tot)] = 0

    # normalizing X column wise since each column is a feature
    X_tot = (X_tot-np.mean(X_tot,axis=0))/np.std(X_tot,axis=0)

    # divide the data into training and test set
    train_size = round(train_percent*X_tot.shape[1])
    #test_size = X_tot.shape[1]-round(0.9*X_tot.shape[1])

    X,Y,X_test,Y_test = X_tot[:,:train_size],Y_tot[:,:train_size],X_tot[:,train_size:],Y_tot[:,train_size:]
    
    return X,Y,X_test,Y_test

Provide the link to the directory that has the files

In [7]:
# Read Data
Madison_Irrigated = 'G:/My Drive/CS637-Deep Learning/HW1/Madison_Irrigated.xls'
Madison_Rainfed = 'G:/My Drive/CS637-Deep Learning/HW1/Madison_Rainfed.xls'
X,Y,X_test,Y_test = preprocess(Madison_Irrigated,Madison_Rainfed,train_percent = 0.85)

$X_{f\times m}$ and $Y_{c\times m}$ is the training set

where,
f : no. of features
m : no. on examples
c : no. of classes

Now, let's use the class to train the model for Madison county dataset

In [8]:
if __name__ == "__main__":
    f = X.shape[0]
    m = X.shape[1]
    c = Y.shape[0]
    
    network = NeuralNetwork(n_x = f, n_h = 20, n_y = c, hidden_activation = 'relu', learning_rate = 0.01,num_iterations = 10000)
    
    costs,train_accuracy = network.nn_model(X, Y,mini_batch_size = 64, print_cost=True)
    
    # plotting loss Vs epoch
    network.loss_v_epoch(costs)
    network.accuracy_v_epoch(train_accuracy)
    
    # check Accuracies
    print ('training accuracy: %.2f' % network.Accuracy(X,Y))
    print ('testing accuracy: %.2f' % network.Accuracy(X_test,Y_test))
    
# # Gives highest accuracy with- n_h = 20, hidden_activation = 'relu', learning_rate = 0.01, num_iterations = 10000
Cost after epoch 0: 0.634192
Cost after epoch 500: 0.322544
Cost after epoch 1000: 0.302083
Cost after epoch 1500: 0.287644
Cost after epoch 2000: 0.278487
Cost after epoch 2500: 0.273434
Cost after epoch 3000: 0.266052
Cost after epoch 3500: 0.262721
Cost after epoch 4000: 0.256676
Cost after epoch 4500: 0.252680
Cost after epoch 5000: 0.249602
Cost after epoch 5500: 0.246361
Cost after epoch 6000: 0.243695
Cost after epoch 6500: 0.243028
Cost after epoch 7000: 0.240407
Cost after epoch 7500: 0.239123
Cost after epoch 8000: 0.234879
Cost after epoch 8500: 0.229648
Cost after epoch 9000: 0.224742
Cost after epoch 9500: 0.223212
training accuracy: 91.50
testing accuracy: 87.07
In [ ]: