feat(main.py): add layer class

This commit is contained in:
2025-06-01 08:29:24 +02:00
parent cb7a82ba9f
commit f8ab6cf4ea

65
main.py
View File

@@ -31,13 +31,16 @@ class Neuron:
self.last_output = 0 self.last_output = 0
def forward(self, x): def forward(self, x):
"""
x : list of input values to the neuron
"""
# computes the weighted sum of inputs and add the bias # computes the weighted sum of inputs and add the bias
self.z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias self.z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias
# normalize the output between 0 and 1 # normalize the output between 0 and 1
self.last_output = sigmoid(self.z) self.last_output = sigmoid(self.z)
return self.last_output return self.last_output
# adjust weight and bias # adjust weight and bias of neuron
def backward(self, x, dcost_dy, learning_rate): def backward(self, x, dcost_dy, learning_rate):
""" """
x : list of input values to the neuron x : list of input values to the neuron
@@ -55,44 +58,44 @@ class Neuron:
dz_db = 1 dz_db = 1
for i in range(self.isize): for i in range(self.isize):
# update all weights by `learning_rate * cost * derivate sigmoid * dz/dw` # update each weight `weight -= learning_rate * dC/dy * dy/dz * x_i`
self.weight[i] -= learning_rate * dcost_dy * dy_dz * dz_dw[i] self.weight[i] -= learning_rate * dcost_dy * dy_dz * dz_dw[i]
# update bias by`learning_rate * cost * derivate sigmoid * dz/db` # update bias: bias -= learning_rate * dC/dy * dy/dz * dz/db
self.bias -= learning_rate * dcost_dy * dy_dz * dz_db self.bias -= learning_rate * dcost_dy * dy_dz * dz_db
# def forward(self, inputs: list[float]) -> float: # return gradient vector len(input) dimension
# assert len(inputs) == self.isize, "error: incorrect inputs number" return [dcost_dy * dy_dz * w for w in self.weight]
# total = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias
# return sigmoid(total)
# def train(self, inputs: list[float], target: float, learning_rate: float = 0.1):
# assert len(inputs) == self.isize, "error: incorrect inputs number"
# z = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias class Layer:
# output = sigmoid(z) def __init__(self, input_size, output_size):
"""
input_size : size of each neuron input
output_size : size of neurons
"""
self.size = output_size
# list of neurons
self.neurons = [Neuron(input_size) for _ in range(output_size)]
# error = output - target def forward(self, inputs):
# d_sigmoid = output * (1 - output) self.inputs = inputs
# dz = error * d_sigmoid # compute and return the outputs of all neurons in the layer
return [neuron.forward(inputs) for neuron in self.neurons]
# for i in range(self.isize): # adjust weight and bias of the layer (all neurons)
# self.weight[i] -= learning_rate * dz * inputs[i] def backward(self, dcost_dy_list, learning_rate=0.1):
# init layer gradient vector len(input) dimention
input_gradients = [0.0] * len(self.inputs)
# self.bias -= learning_rate * dz for i, neuron in enumerate(self.neurons):
dcost_dy = dcost_dy_list[i]
grad_to_input = neuron.backward(self.inputs, dcost_dy, learning_rate)
# class Layer: # compute all neuron's gradient inside layer gradient
# def __init__(self, input_size, output_size): # accumulate the input gradients from all neurons
# self.size = output_size for j in range(len(grad_to_input)):
# self.neurons = [Neuron(output_size) for _ in range(input_size)] input_gradients[j] += grad_to_input[j]
# def forward(self, inputs): # return layer gradient
# return [n.forward(inputs) for n in self.neurons] return input_gradients
# def train(self, inputs: list[float], targets: list[float], learning_rate: float = 0.1):
# outputs = self.forward(inputs)
# errors = [outputs[i] - targets[i] for i in range(self.size)]
# for i in range(self.neurons):
# self.neurons[i].train(inputs, errors[i], learning_rate)