mirror of
https://github.com/guezoloic/neural-network.git
synced 2026-01-25 03:34:21 +00:00
feat(main.py): add Layer class
This commit is contained in:
36
main.py
36
main.py
@@ -1,24 +1,26 @@
|
||||
import math
|
||||
import random
|
||||
|
||||
def sigmoid(x: float) -> float:
|
||||
return 1/(1 + math.exp(-x))
|
||||
|
||||
|
||||
class Neuron:
|
||||
def __init__(self, isize: int) -> None:
|
||||
self.isize = isize
|
||||
self.weight = [random.uniform(0, 1) for _ in range(self.isize)]
|
||||
self.bias = random.uniform(0, 1)
|
||||
self.weight = [random.uniform(-1, 1) for _ in range(self.isize)]
|
||||
self.bias = random.uniform(-1, 1)
|
||||
|
||||
def forward(self, inputs: list) -> float:
|
||||
def forward(self, inputs: list[float]) -> float:
|
||||
assert len(inputs) == self.isize, "error: incorrect inputs number"
|
||||
total = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias
|
||||
return self.sigmoid(total)
|
||||
return sigmoid(total)
|
||||
|
||||
def sigmoid(self, x: float) -> float:
|
||||
return 1/(1 + math.exp(-x))
|
||||
def train(self, inputs: list[float], target: float, learning_rate: float = 0.1):
|
||||
assert len(inputs) == self.isize, "error: incorrect inputs number"
|
||||
|
||||
# target needs to be between 0 and 1
|
||||
def train(self, inputs: list, target: float, learning_rate: float = 0.1):
|
||||
z = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias
|
||||
output = self.sigmoid(z)
|
||||
output = sigmoid(z)
|
||||
|
||||
error = output - target
|
||||
d_sigmoid = output * (1 - output)
|
||||
@@ -28,3 +30,19 @@ class Neuron:
|
||||
self.weight[i] -= learning_rate * dz * inputs[i]
|
||||
|
||||
self.bias -= learning_rate * dz
|
||||
|
||||
class Layer:
|
||||
def __init__(self, input_size, output_size):
|
||||
self.size = output_size
|
||||
self.neurons = [Neuron(output_size) for _ in range(input_size)]
|
||||
|
||||
def forward(self, inputs):
|
||||
return [n.forward(inputs) for n in self.neurons]
|
||||
|
||||
def train(self, inputs: list[float], targets: list[float], learning_rate: float = 0.1):
|
||||
outputs = self.forward(inputs)
|
||||
|
||||
errors = [outputs[i] - targets[i] for i in range(self.size)]
|
||||
|
||||
for i in range(self.neurons):
|
||||
self.neurons[i].train(inputs, errors[i], learning_rate)
|
||||
|
||||
Reference in New Issue
Block a user