fix: rework partial main.py (might change soon)

This commit is contained in:
2025-05-29 19:17:41 +02:00
parent ee9662f37f
commit ab2ee46422
2 changed files with 60 additions and 185 deletions

86
main.py
View File

@@ -1,48 +1,78 @@
import math import math
import random import random
def sigmoid(x: float) -> float: # transform all numbers between 0 and 1
return 1/(1 + math.exp(-x)) def sigmoid(x):
return 1 / (1 + math.exp(-x))
# sigmoid's derivation
def sigmoid_deriv(x):
y = sigmoid(x)
return y * (1 - y)
# neuron class
class Neuron: class Neuron:
def __init__(self, isize: int) -> None: def __init__(self, isize):
# number of inputs to this neuron
self.isize = isize self.isize = isize
# importance to each input
self.weight = [random.uniform(-1, 1) for _ in range(self.isize)] self.weight = [random.uniform(-1, 1) for _ in range(self.isize)]
# importance of the neuron
self.bias = random.uniform(-1, 1) self.bias = random.uniform(-1, 1)
def forward(self, inputs: list[float]) -> float: # last z (linear combination) value
assert len(inputs) == self.isize, "error: incorrect inputs number" self.last_z = 0
total = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias # last output sigmoid(z)
return sigmoid(total) self.last_output = 0
def train(self, inputs: list[float], target: float, learning_rate: float = 0.1): def forward(self, x):
assert len(inputs) == self.isize, "error: incorrect inputs number" z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias
self.last_z = z
self.last_output = sigmoid(z)
return self.last_output
z = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias def backward(self, x, dcost_dy, learning_rate):
output = sigmoid(z) dy_dz = sigmoid_deriv(self.last_z)
dz_dw = x
error = output - target dz_db = 1
d_sigmoid = output * (1 - output)
dz = error * d_sigmoid
for i in range(self.isize): for i in range(self.isize):
self.weight[i] -= learning_rate * dz * inputs[i] self.weight[i] -= learning_rate * dcost_dy * dy_dz * dz_dw[i]
self.bias -= learning_rate * dz self.bias -= learning_rate * dcost_dy * dy_dz * dz_db
class Layer: # def forward(self, inputs: list[float]) -> float:
def __init__(self, input_size, output_size): # assert len(inputs) == self.isize, "error: incorrect inputs number"
self.size = output_size # total = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias
self.neurons = [Neuron(output_size) for _ in range(input_size)] # return sigmoid(total)
def forward(self, inputs): # def train(self, inputs: list[float], target: float, learning_rate: float = 0.1):
return [n.forward(inputs) for n in self.neurons] # assert len(inputs) == self.isize, "error: incorrect inputs number"
def train(self, inputs: list[float], targets: list[float], learning_rate: float = 0.1): # z = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias
outputs = self.forward(inputs) # output = sigmoid(z)
errors = [outputs[i] - targets[i] for i in range(self.size)] # error = output - target
# d_sigmoid = output * (1 - output)
# dz = error * d_sigmoid
for i in range(self.neurons): # for i in range(self.isize):
self.neurons[i].train(inputs, errors[i], learning_rate) # self.weight[i] -= learning_rate * dz * inputs[i]
# self.bias -= learning_rate * dz
# class Layer:
# def __init__(self, input_size, output_size):
# self.size = output_size
# self.neurons = [Neuron(output_size) for _ in range(input_size)]
# def forward(self, inputs):
# return [n.forward(inputs) for n in self.neurons]
# def train(self, inputs: list[float], targets: list[float], learning_rate: float = 0.1):
# outputs = self.forward(inputs)
# errors = [outputs[i] - targets[i] for i in range(self.size)]
# for i in range(self.neurons):
# self.neurons[i].train(inputs, errors[i], learning_rate)

View File

@@ -1,160 +1,5 @@
{ {
"cells": [ "cells": [],
{
"cell_type": "markdown",
"id": "9b3f1635",
"metadata": {},
"source": [
"# Neural Network"
]
},
{
"cell_type": "markdown",
"id": "478651c8",
"metadata": {},
"source": [
"## What is a *Neuron* (artifical)\n",
"\n",
"First of all, **I'm not an Neurologist so i might say some nonsense, i only researched online**. \n",
"\n",
"An artifical *neuron* works similary to a biological *neuron* in the way it process information. In a brain, like yours, a *neuron* receives signals from other *neurons*, processes them and sends an *output*.\n",
"\n",
"An artifical *neuron* takes an **input** (such as numbers), applies updated values called **weights** to each *inputs*, adds a constant called **bias**, apply a specific function to normalize the value called **Activation function**, and then `returns` the *output* of the Activation function (such as: **sigmoid**, **ReLU**, etc...).\n",
"\n",
"## Vocabulary / key components\n",
"\n",
"1. **inputs**: inputs are usually a unique list of numbers, they are simply values sent to a neuron, which then process them.\n",
"\n",
"2. **weights**: weights are also a list of numbers that has the same size of inputs. The weight determines how important de the number of the input is. If it's high, the input matters. Else, if the weight is low, the number matters less.\n",
"\n",
"3. **bias**: the bias are constant that are added after all the inputs are multiplied by the weight. it helps shift the resultat up or down.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7d9d6072",
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"\n",
"# Neuron 1\n",
"class Neuron:\n",
" def __init__(self, input_size: int) -> None:\n",
" self.input_size = input_size\n",
" self.weight = [random.uniform(0, 1) for _ in range(self.input_size)]\n",
" self.bias = random.uniform(0, 1)"
]
},
{
"cell_type": "markdown",
"id": "1aff9ee6",
"metadata": {},
"source": [
"# 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ca39a42",
"metadata": {},
"outputs": [],
"source": [
"import math\n",
"import random\n",
"\n",
"# Neuron 2\n",
"class Neuron:\n",
" def __init__(self, input_size: int) -> None:\n",
" self.input_size = input_size\n",
" self.weight = [random.uniform(0, 1) for _ in range(self.input_size)]\n",
" self.bias = random.uniform(0, 1)\n",
"\n",
" def sigmoid(x: float) -> float:\n",
" return 1/(1 + math.exp(-x))\n",
" \n",
" def forward(self, inputs: list) -> float:\n",
" assert len(inputs) == self.input_size, \"error: misnumber inputs number\"\n",
" total = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias\n",
" return self.sigmoid(total)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6709c5c7",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Neuron output : 0.9001175686881125\n"
]
}
],
"source": [
"# 8 for 8 bits (1 Byte)\n",
"nbits: int = 8\n",
"neuron = Neuron(nbits)\n",
"inputs: list = [1, 0, 1, 0, 0, 1, 1, 0] \n",
"\n",
"output = neuron.forward(inputs)\n",
"print(\"Neuron output :\", output)"
]
},
{
"cell_type": "markdown",
"id": "aa57ae8e",
"metadata": {},
"source": [
"# 3"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6de25ea",
"metadata": {},
"outputs": [],
"source": [
"import math\n",
"import random\n",
"\n",
"# Neuron 3\n",
"class Neuron:\n",
" def __init__(self, isize: int) -> None:\n",
" self.isize = isize\n",
" self.weight = [random.uniform(0, 1) for _ in range(self.isize)]\n",
" self.bias = random.uniform(0, 1)\n",
"\n",
" def forward(self, inputs: list) -> float:\n",
" assert len(inputs) == self.isize, \"error: incorrect inputs number\"\n",
" total = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias\n",
" return self.sigmoid(total)\n",
" \n",
" def sigmoid(x: float) -> float:\n",
" return 1/(1 + math.exp(-x))\n",
"\n",
" # target needs to be between 0 and 1\n",
" def train(self, inputs: list, target: float, learning_rate: float = 0.1):\n",
" z = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias\n",
" output = self.sigmoid(z)\n",
"\n",
" error = output - target\n",
" d_sigmoid = output * (1 - output)\n",
" dz = error * d_sigmoid\n",
"\n",
" for i in range(self.isize):\n",
" self.weight[i] -= learning_rate * dz * inputs[i]\n",
"\n",
" self.bias -= learning_rate * dz\n"
]
}
],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": ".venv", "display_name": ".venv",