mirror of
https://github.com/guezoloic/neural-network.git
synced 2026-01-25 09:34:23 +00:00
feat(main.py): add comments for neuron class
This commit is contained in:
30
main.py
30
main.py
@@ -12,6 +12,11 @@ def sigmoid_deriv(x):
|
|||||||
|
|
||||||
# neuron class
|
# neuron class
|
||||||
class Neuron:
|
class Neuron:
|
||||||
|
"""
|
||||||
|
z: linear combination of inputs and weights plus bias (pre-activation)
|
||||||
|
y : output of the activation function (sigmoid(z))
|
||||||
|
w : list of weights, one for each input
|
||||||
|
"""
|
||||||
def __init__(self, isize):
|
def __init__(self, isize):
|
||||||
# number of inputs to this neuron
|
# number of inputs to this neuron
|
||||||
self.isize = isize
|
self.isize = isize
|
||||||
@@ -21,24 +26,39 @@ class Neuron:
|
|||||||
self.bias = random.uniform(-1, 1)
|
self.bias = random.uniform(-1, 1)
|
||||||
|
|
||||||
# last z (linear combination) value
|
# last z (linear combination) value
|
||||||
self.last_z = 0
|
self.z = 0
|
||||||
# last output sigmoid(z)
|
# last output sigmoid(z)
|
||||||
self.last_output = 0
|
self.last_output = 0
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias
|
# computes the weighted sum of inputs and add the bias
|
||||||
self.last_z = z
|
self.z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias
|
||||||
self.last_output = sigmoid(z)
|
# normalize the output between 0 and 1
|
||||||
|
self.last_output = sigmoid(self.z)
|
||||||
return self.last_output
|
return self.last_output
|
||||||
|
|
||||||
|
# adjust weight and bias
|
||||||
def backward(self, x, dcost_dy, learning_rate):
|
def backward(self, x, dcost_dy, learning_rate):
|
||||||
dy_dz = sigmoid_deriv(self.last_z)
|
"""
|
||||||
|
x : list of input values to the neuron
|
||||||
|
dcost_dy : derivate of the cost function `(2 * (output - target))`
|
||||||
|
learning_rate : learning factor (adjust the speed of weight/bias change during training)
|
||||||
|
|
||||||
|
weight -= learning_rate * dC/dy * dy/dz * dz/dw
|
||||||
|
bias -= learning_rate * dC/dy * dy/dz * dz/db
|
||||||
|
"""
|
||||||
|
# dy/dz: derivate of the sigmoid activation
|
||||||
|
dy_dz = sigmoid_deriv(self.z)
|
||||||
|
# dz/dw = x
|
||||||
dz_dw = x
|
dz_dw = x
|
||||||
|
# dz/db = 1
|
||||||
dz_db = 1
|
dz_db = 1
|
||||||
|
|
||||||
for i in range(self.isize):
|
for i in range(self.isize):
|
||||||
|
# update all weights by `learning_rate * cost * derivate sigmoid * dz/dw`
|
||||||
self.weight[i] -= learning_rate * dcost_dy * dy_dz * dz_dw[i]
|
self.weight[i] -= learning_rate * dcost_dy * dy_dz * dz_dw[i]
|
||||||
|
|
||||||
|
# update bias by`learning_rate * cost * derivate sigmoid * dz/db`
|
||||||
self.bias -= learning_rate * dcost_dy * dy_dz * dz_db
|
self.bias -= learning_rate * dcost_dy * dy_dz * dz_db
|
||||||
|
|
||||||
# def forward(self, inputs: list[float]) -> float:
|
# def forward(self, inputs: list[float]) -> float:
|
||||||
|
|||||||
Reference in New Issue
Block a user