Neuron¶
import numpy as np
np.set_printoptions(precision=None, suppress=True)
# define input as x
x = np.array([2.3,4.5,1.3])
# define weights as w
w = np.array([3.2,-1.9,2.5])
# define bias
b = np.array([-1])
# define relu function
def relu(z):
return np.maximum(z, 0)
# define sigmoid function
def sigmoid(z):
return 1.0/(1.0 + np.exp(-z))
# define neuron function
def neuron_output(x,weights,bias,phi):
z = np.dot(weights,x.T)+bias
return phi(z)
Neuron with ReLU Activation Function¶
neuron_output(x,w,b,relu)
array([1.06])
Neuron with Sigmoid Activation Function¶
neuron_output(x,w,b,sigmoid)
array([0.74269055])
Single Neuron, Multiple Training Examples¶
# define input as X
X = np.array([[2.3,4.5,1.3],[-1.1,3.2,-2],[-.2,1.9,3],[-.2,-.1,.4],[-.1,-.3,-.2]])
def neuron_output2(X,weights,bias,phi):
z = np.dot(weights,X.T)+bias
return phi(z)
neuron_output2(X,w,b,sigmoid)
array([0.74269055, 0.00000017, 0.90465054, 0.38936077, 0.22270014])
X.T
array([[ 2.3, -1.1, -0.2, -0.2, -0.1],
[ 4.5, 3.2, 1.9, -0.1, -0.3],
[ 1.3, -2. , 3. , 0.4, -0.2]])
w
array([ 3.2, -1.9, 2.5])
# create Neuron class
class Neuron:
def __init__(self, neuron_input,weights,bias,activation_function):
self.w = weights
self.b = bias
self.X = neuron_input
self.phi = activation_function
def calc_output(self):
z = np.dot(w,X.T) + b
a = self.phi(z)
return a
Neuron with ReLU Activation Function¶
# instantiate neuron class and calculate output for relu activation function
Neuron_relu = Neuron(X,w,b,relu)
Neuron_relu.calc_output()
array([1.06, 0. , 2.25, 0. , 0. ])
Neuron with Sigmoid Activation Function¶
# instantiate neuron class and calculate output for sigmoid activation function
Neuron_sigmoid = Neuron(X,w.T,b,sigmoid)
Neuron_sigmoid.calc_output()
array([0.74269055, 0.00000017, 0.90465054, 0.38936077, 0.22270014])