0% found this document useful (0 votes)
28 views11 pages

08 Neural Network

The document outlines the implementation of various logic gates (AND, OR, NOT, XNOR) using neural networks in Python, including the initialization of parameters, forward and backward propagation, and weight updates. It provides code snippets for each gate's neural network model, demonstrating how to train the models on respective truth tables and test their predictions. Additionally, it introduces activation functions such as sigmoid and binary step, along with visualizations of these functions.

Uploaded by

Pratham Dhiman
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
28 views11 pages

08 Neural Network

The document outlines the implementation of various logic gates (AND, OR, NOT, XNOR) using neural networks in Python, including the initialization of parameters, forward and backward propagation, and weight updates. It provides code snippets for each gate's neural network model, demonstrating how to train the models on respective truth tables and test their predictions. Additionally, it introduces activation functions such as sigmoid and binary step, along with visualizations of these functions.

Uploaded by

Pratham Dhiman
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 11

4/22/24, 10:06 PM NeuralNetwork

Practical - 8 : Neural Networks


AND GATE
In [ ]: # import Python Libraries
import numpy as np
from matplotlib import pyplot as plt

# Sigmoid Function
def sigmoid(z):
return 1 / (1 + np.exp(-z))

# Initialization of the neural network parameters


# Initialized all the weights in the range of between 0 and 1
# Bias values are initialized to 0
def initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures):
W1 = np.random.randn(neuronsInHiddenLayers, inputFeatures)
W2 = np.random.randn(outputFeatures, neuronsInHiddenLayers)
b1 = np.zeros((neuronsInHiddenLayers, 1))
b2 = np.zeros((outputFeatures, 1))

parameters = {"W1" : W1, "b1": b1,


"W2" : W2, "b2": b2}
return parameters

# Forward Propagation
def forwardPropagation(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]

Z1 = np.dot(W1, X) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)

cache = (Z1, A1, W1, b1, Z2, A2, W2, b2)


logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y)
cost = -np.sum(logprobs) / m
return cost, cache, A2

# Backward Propagation
def backwardPropagation(X, Y, cache):
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2) = cache

dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.sum(dZ2, axis = 1, keepdims = True)

dA1 = np.dot(W2.T, dZ2)


dZ1 = np.multiply(dA1, A1 * (1- A1))
dW1 = np.dot(dZ1, X.T) / m
db1 = np.sum(dZ1, axis = 1, keepdims = True) / m

gradients = {"dZ2": dZ2, "dW2": dW2, "db2": db2,


"dZ1": dZ1, "dW1": dW1, "db1": db1}

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 1/11
4/22/24, 10:06 PM NeuralNetwork
return gradients

# Updating the weights based on the negative gradients


def updateParameters(parameters, gradients, learningRate):
parameters["W1"] = parameters["W1"] - learningRate * gradients["dW1"]
parameters["W2"] = parameters["W2"] - learningRate * gradients["dW2"]
parameters["b1"] = parameters["b1"] - learningRate * gradients["db1"]
parameters["b2"] = parameters["b2"] - learningRate * gradients["db2"]
return parameters

# Model to learn the AND truth table


X = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) # AND input
Y = np.array([[0, 0, 0, 1]]) # AND output

# Define model parameters


neuronsInHiddenLayers = 2 # number of hidden layer neurons (2)
inputFeatures = X.shape[0] # number of input features (2)
outputFeatures = Y.shape[0] # number of output features (1)
parameters = initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatu
epoch = 100000
learningRate = 0.01
losses = np.zeros((epoch, 1))

for i in range(epoch):
losses[i, 0], cache, A2 = forwardPropagation(X, Y, parameters)
gradients = backwardPropagation(X, Y, cache)
parameters = updateParameters(parameters, gradients, learningRate)

# Testing
X = np.array([[1, 1, 0, 0], [0, 1, 0, 1]]) # AND input
cost, _, A2 = forwardPropagation(X, Y, parameters)
prediction = (A2 > 0.5) * 1.0
# print(A2)
print(prediction)

[[0. 1. 0. 0.]]

OR GATE
In [ ]: # Sigmoid Function
def sigmoid(z):
return 1 / (1 + np.exp(-z))

# Initialization of the neural network parameters


# Initialized all the weights in the range of between 0 and 1
# Bias values are initialized to 0
def initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures):
W1 = np.random.randn(neuronsInHiddenLayers, inputFeatures)
W2 = np.random.randn(outputFeatures, neuronsInHiddenLayers)
b1 = np.zeros((neuronsInHiddenLayers, 1))
b2 = np.zeros((outputFeatures, 1))

parameters = {"W1" : W1, "b1": b1,


"W2" : W2, "b2": b2}
return parameters

# Forward Propagation
def forwardPropagation(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 2/11
4/22/24, 10:06 PM NeuralNetwork

Z1 = np.dot(W1, X) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)

cache = (Z1, A1, W1, b1, Z2, A2, W2, b2)


logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y)
cost = -np.sum(logprobs) / m
return cost, cache, A2

# Backward Propagation
def backwardPropagation(X, Y, cache):
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2) = cache

dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.sum(dZ2, axis = 1, keepdims = True)

dA1 = np.dot(W2.T, dZ2)


dZ1 = np.multiply(dA1, A1 * (1- A1))
dW1 = np.dot(dZ1, X.T) / m
db1 = np.sum(dZ1, axis = 1, keepdims = True) / m

gradients = {"dZ2": dZ2, "dW2": dW2, "db2": db2,


"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients

# Updating the weights based on the negative gradients


def updateParameters(parameters, gradients, learningRate):
parameters["W1"] = parameters["W1"] - learningRate * gradients["dW1"]
parameters["W2"] = parameters["W2"] - learningRate * gradients["dW2"]
parameters["b1"] = parameters["b1"] - learningRate * gradients["db1"]
parameters["b2"] = parameters["b2"] - learningRate * gradients["db2"]
return parameters

# Model to learn the OR truth table


X = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) # OR input
Y = np.array([[0, 1, 1, 1]]) # OR output

# Define model parameters


neuronsInHiddenLayers = 2 # number of hidden layer neurons (2)
inputFeatures = X.shape[0] # number of input features (2)
outputFeatures = Y.shape[0] # number of output features (1)
parameters = initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatu
epoch = 100000
learningRate = 0.01
losses = np.zeros((epoch, 1))

for i in range(epoch):
losses[i, 0], cache, A2 = forwardPropagation(X, Y, parameters)
gradients = backwardPropagation(X, Y, cache)
parameters = updateParameters(parameters, gradients, learningRate)

# Testing
X = np.array([[1, 1, 0, 0], [0, 1, 0, 1]]) # OR input
cost, _, A2 = forwardPropagation(X, Y, parameters)
prediction = (A2 > 0.5) * 1.0
# print(A2)
print(prediction)

[[1. 1. 0. 1.]]

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 3/11
4/22/24, 10:06 PM NeuralNetwork

NOT GATE
In [ ]: # define Unit Step Function
def unitStep(v):
if v >= 0:
return 1
else:
return 0

# design Perceptron Model


def perceptronModel(x, w, b):
v = np.dot(w, x) + b
y = unitStep(v)
return y

# NOT Logic Function


# w = -1, b = 0.5
def NOT_logicFunction(x):
w = -1
b = 0.5
return perceptronModel(x, w, b)

# testing the Perceptron Model


test1 = np.array(1)
test2 = np.array(0)

print("NOT({}) = {}".format(1, NOT_logicFunction(test1)))


print("NOT({}) = {}".format(0, NOT_logicFunction(test2)))

NOT(1) = 0
NOT(0) = 1

XNOR GATE
In [ ]: # Sigmoid Function
def sigmoid(z):
return 1 / (1 + np.exp(-z))

# Initialization of the neural network parameters


# Initialized all the weights in the range of between 0 and 1
# Bias values are initialized to 0
def initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures):
W1 = np.random.randn(neuronsInHiddenLayers, inputFeatures)
W2 = np.random.randn(outputFeatures, neuronsInHiddenLayers)
b1 = np.zeros((neuronsInHiddenLayers, 1))
b2 = np.zeros((outputFeatures, 1))

parameters = {"W1" : W1, "b1": b1,


"W2" : W2, "b2": b2}
return parameters

# Forward Propagation
def forwardPropagation(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]

Z1 = np.dot(W1, X) + b1
A1 = sigmoid(Z1)

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 4/11
4/22/24, 10:06 PM NeuralNetwork
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)

cache = (Z1, A1, W1, b1, Z2, A2, W2, b2)


logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y)
cost = -np.sum(logprobs) / m
return cost, cache, A2

# Backward Propagation
def backwardPropagation(X, Y, cache):
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2) = cache

dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.sum(dZ2, axis = 1, keepdims = True)

dA1 = np.dot(W2.T, dZ2)


dZ1 = np.multiply(dA1, A1 * (1- A1))
dW1 = np.dot(dZ1, X.T) / m
db1 = np.sum(dZ1, axis = 1, keepdims = True) / m

gradients = {"dZ2": dZ2, "dW2": dW2, "db2": db2,


"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients

# Updating the weights based on the negative gradients


def updateParameters(parameters, gradients, learningRate):
parameters["W1"] = parameters["W1"] - learningRate * gradients["dW1"]
parameters["W2"] = parameters["W2"] - learningRate * gradients["dW2"]
parameters["b1"] = parameters["b1"] - learningRate * gradients["db1"]
parameters["b2"] = parameters["b2"] - learningRate * gradients["db2"]
return parameters

# Model to learn the XNOR truth table


X = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) # XNOR input
Y = np.array([[1, 0, 0, 1]]) # XNOR output

# Define model parameters


neuronsInHiddenLayers = 2 # number of hidden layer neurons (2)
inputFeatures = X.shape[0] # number of input features (2)
outputFeatures = Y.shape[0] # number of output features (1)
parameters = initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatu
epoch = 100000
learningRate = 0.01
losses = np.zeros((epoch, 1))

for i in range(epoch):
losses[i, 0], cache, A2 = forwardPropagation(X, Y, parameters)
gradients = backwardPropagation(X, Y, cache)
parameters = updateParameters(parameters, gradients, learningRate)

# Testing
X = np.array([[1, 1, 0, 0], [0, 1, 0, 1]]) # XNOR input
cost, _, A2 = forwardPropagation(X, Y, parameters)
prediction = (A2 > 0.5) * 1.0
# print(A2)
print(prediction)

[[0. 1. 1. 0.]]

Activation Functions

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 5/11
4/22/24, 10:06 PM NeuralNetwork

Binary Step

In [ ]: def binaryStep(x):
return np.heaviside(x,1)
x = np.linspace(-10, 10)
plt.plot(x, binaryStep(x))
plt.axis('tight')
plt.title('Activation Function :binaryStep')
plt.show()

Linear Activation

In [ ]: def linear(x):
return x
x = np.linspace(-10, 10)
plt.plot(x, linear(x))
plt.axis('tight')
plt.title('Activation Function :Linear')
plt.show()

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 6/11
4/22/24, 10:06 PM NeuralNetwork

Sigmoid Activation

In [ ]: def sigmoid(x):
return 1/(1+np.exp(-x))
x = np.linspace(-10, 10)
plt.plot(x, sigmoid(x))
plt.axis('tight')
plt.title('Activation Function :Sigmoid')
plt.show()

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 7/11
4/22/24, 10:06 PM NeuralNetwork

Tanh Activation

In [ ]: def tanh(x):
return np.tanh(x)
x = np.linspace(-10, 10)
plt.plot(x, tanh(x))
plt.axis('tight')
plt.title('Activation Function :Tanh')
plt.show()

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 8/11
4/22/24, 10:06 PM NeuralNetwork

RELU Activation

In [ ]: def RELU(x):
x1=[]
for i in x:
if i<0:
x1.append(0)
else:
x1.append(i)
return x1
x = np.linspace(-10, 10)
plt.plot(x, RELU(x))
plt.axis('tight')
plt.title('Activation Function :RELU')
plt.show()

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 9/11
4/22/24, 10:06 PM NeuralNetwork

Softmax Activation

In [ ]: def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
x = np.linspace(-10, 10)
plt.plot(x, softmax(x))
plt.axis('tight')
plt.title('Activation Function :Softmax')
plt.show()

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 10/11
4/22/24, 10:06 PM NeuralNetwork

In [ ]:

localhost:8888/nbconvert/html/Desktop/ML/NeuralNetwork.ipynb?download=false 11/11

You might also like