mirror of
https://github.com/guezoloic/neural-network.git
synced 2026-01-25 09:34:23 +00:00
feat(nnetwork.ipynb): Step 1, 2, 3
- Initialization - Activation Functions - Forward Pass Function also change `self.last_output` to `last_output` in network.py
This commit is contained in:
12
network.py
12
network.py
@@ -25,22 +25,16 @@ class Neuron:
|
|||||||
# importance of the neuron
|
# importance of the neuron
|
||||||
self.bias = random.uniform(-1, 1)
|
self.bias = random.uniform(-1, 1)
|
||||||
|
|
||||||
# last z (linear combination) value
|
|
||||||
self.z = 0
|
|
||||||
# last output sigmoid(z)
|
|
||||||
self.last_output = 0
|
|
||||||
|
|
||||||
def forward(self, x, activate=True):
|
def forward(self, x, activate=True):
|
||||||
"""
|
"""
|
||||||
x : list of input values to the neuron
|
x : list of input values to the neuron
|
||||||
"""
|
"""
|
||||||
# computes the weighted sum of inputs and add the bias
|
# computes the weighted sum of inputs and add the bias
|
||||||
self.z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias
|
self.z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias
|
||||||
# normalize the output between 0 and 1
|
# normalize the output between 0 and 1 if activate
|
||||||
if activate: self.last_output = sigmoid(self.z)
|
last_output = sigmoid(self.z) if activate else self.z
|
||||||
else: self.last_output = self.z
|
|
||||||
|
|
||||||
return self.last_output
|
return last_output
|
||||||
|
|
||||||
# adjust weight and bias of neuron
|
# adjust weight and bias of neuron
|
||||||
def backward(self, x, dcost_dy, learning_rate):
|
def backward(self, x, dcost_dy, learning_rate):
|
||||||
|
|||||||
148
nnetwork.ipynb
148
nnetwork.ipynb
@@ -13,31 +13,24 @@
|
|||||||
"id": "478651c8",
|
"id": "478651c8",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## What is a *Neuron* (artifical)\n",
|
"## What is a *Neuron* (artificial)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"> **disclaimer**: I'm not an Neurologist. This is only based on online research.\n",
|
"> **disclaimer**: I'm no neurologist. This explanation below is only based on online research.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"An **artificial neuron** works *similary* to a **biological neron** in the way it process information.\n",
|
"An **artificial neuron** works *similarly* to a **biological neuron** in the way it processes information.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In a brain, like yours, a neuron receive **electrical signals** from others, process them and sends an output signal.\n",
|
"In a brain (like yours), a **biological neuron** receives **electrical signals** from others, processes them, and sends an output signal.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"An **artifical neuron** countrary to biological ones:\n",
|
"An **artificial neuron** contrary to biological ones, follows these steps:\n",
|
||||||
"1. **Takes inputs** (usually numbers between 0 and 1).\n",
|
"1. **Takes inputs** (usually numbers between 0 and 1).\n",
|
||||||
"2. **Multiplies** each by a corresponding **weight** (importance of that input).\n",
|
"2. **Multiplies** each by a corresponding **weight** (representing the importance of that input).\n",
|
||||||
"3. **Adds a bias**, which shifts the result up or down.\n",
|
"3. **Adds a bias**, which shifts the result up or down.\n",
|
||||||
"4. **Applies an activation function**, which normalizes or squashes the output (commonly: **sigmoid**, **ReLU**, etc.).\n",
|
"4. **Applies an activation function**, which normalizes or squashes the output (commonly: **sigmoid**, **ReLU**, etc.).\n",
|
||||||
"5. **Returns the final output**, often a value between 0 and 1. \n",
|
"5. **Returns the final output**, often a value between 0 and 1. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"---\n",
|
"---\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Vocabulary / key components\n",
|
"## Vocabulary / Key Components\n",
|
||||||
"1. **inputs**: inputs are usually a unique list of numbers, they are simply values sent to a neuron, which then process them.\n",
|
|
||||||
"2. **weights**: weights are also a list of numbers that has the same size of inputs. The weight determines how important de the number of the input is. If it's high, the input matters. Else, if the weight is low, the number matters less.\n",
|
|
||||||
"3. **bias**: the bias are constant that are added after all the inputs are multiplied by the weight. it helps shift the resultat up or down.\n",
|
|
||||||
"\n",
|
|
||||||
"---\n",
|
|
||||||
"\n",
|
|
||||||
"## 🔑 Vocabulary / Key Components\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"| Term | Meaning |\n",
|
"| Term | Meaning |\n",
|
||||||
"|----------|---------|\n",
|
"|----------|---------|\n",
|
||||||
@@ -48,21 +41,21 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"---\n",
|
"---\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## ⚙️ Minimal Neuron Implementation\n",
|
"## Minimal Neuron Implementation\n",
|
||||||
"\n",
|
"\n",
|
||||||
"### Step 1 – Initialization"
|
"### Step 1 – Initialization"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": 18,
|
||||||
"id": "7d9d6072",
|
"id": "7d9d6072",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import random\n",
|
"import random\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# neuron class\n",
|
"# neuron class 1\n",
|
||||||
"class Neuron:\n",
|
"class Neuron:\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" z : linear combination of inputs and weights plus bias (pre-activation)\n",
|
" z : linear combination of inputs and weights plus bias (pre-activation)\n",
|
||||||
@@ -82,45 +75,120 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "6dd28c51",
|
"id": "6dd28c51",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": []
|
"source": [
|
||||||
|
"On their own, you can't do much yet, but they form a good starting point to illustrate how a neuron behaves: \n",
|
||||||
|
"it takes a input size as parameter, generates a corresponding list of random weights, and assigns a random bias."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "0c47647c",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Step 2 – Activation Functions"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 17,
|
||||||
|
"id": "ee0fdb39",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import math\n",
|
||||||
|
"\n",
|
||||||
|
"# transform all numbers between 0 and 1\n",
|
||||||
|
"def sigmoid(x):\n",
|
||||||
|
" return 1 / (1 + math.exp(-x))\n",
|
||||||
|
"\n",
|
||||||
|
"# sigmoid's derivation\n",
|
||||||
|
"def sigmoid_deriv(x): \n",
|
||||||
|
" y = sigmoid(x)\n",
|
||||||
|
" return y * (1 - y)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "79e011c2",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"These functions are called activation functions. Their goal is to transform any raw values (which can be any number) into a more reasonable range, usually between 0 and 1. The most well-known ones are:\n",
|
||||||
|
"- sigmoid \n",
|
||||||
|
"- ReLU (Rectified Linear Unit)\n",
|
||||||
|
"- Tanh\n",
|
||||||
|
"\n",
|
||||||
|
"### Sigmoid Graphical Representation\n",
|
||||||
|
""
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "1aff9ee6",
|
"id": "1aff9ee6",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# 2"
|
"## Step 3 - Forward Pass Function"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": 19,
|
||||||
"id": "7ca39a42",
|
"id": "7ca39a42",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import math\n",
|
"# neuron class 2\n",
|
||||||
"import random\n",
|
|
||||||
"\n",
|
|
||||||
"# Neuron 2\n",
|
|
||||||
"class Neuron:\n",
|
"class Neuron:\n",
|
||||||
" def __init__(self, input_size: int) -> None:\n",
|
" \"\"\"\n",
|
||||||
" self.input_size = input_size\n",
|
" z : linear combination of inputs and weights plus bias (pre-activation)\n",
|
||||||
" self.weight = [random.uniform(0, 1) for _ in range(self.input_size)]\n",
|
" y : output of the activation function (sigmoid(z))\n",
|
||||||
" self.bias = random.uniform(0, 1)\n",
|
" w : list of weights, one for each input\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" def __init__(self, isize):\n",
|
||||||
|
" # number of inputs to this neuron\n",
|
||||||
|
" self.isize = isize\n",
|
||||||
|
" # importance to each input\n",
|
||||||
|
" self.weight = [random.uniform(-1, 1) for _ in range(self.isize)]\n",
|
||||||
|
" # importance of the neuron\n",
|
||||||
|
" self.bias = random.uniform(-1, 1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" def sigmoid(x: float) -> float:\n",
|
" def forward(self, x, activate=True):\n",
|
||||||
" return 1/(1 + math.exp(-x))\n",
|
" \"\"\"\n",
|
||||||
|
" x : list of input values to the neuron\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" # computes the weighted sum of inputs and add the bias\n",
|
||||||
|
" self.z = sum(w * xi for w, xi in zip(self.weight, x)) + self.bias\n",
|
||||||
|
" # normalize the output between 0 and 1 if activate\n",
|
||||||
|
" output = sigmoid(self.z) if activate else self.z\n",
|
||||||
"\n",
|
"\n",
|
||||||
" def forward(self, inputs: list) -> float:\n",
|
" return output"
|
||||||
" assert len(inputs) == self.input_size, \"error: misnumber inputs number\"\n",
|
]
|
||||||
" total = sum(self.weight[i] * inputs[i] for i in range(self.isize)) + self.bias\n",
|
},
|
||||||
" return self.sigmoid(total)"
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "3e7b79fa",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The `forward()` method simulates how a neuron proccesses its inputs:\n",
|
||||||
|
"1. **Weighted Sum and Bias** (z variable): \n",
|
||||||
|
" \n",
|
||||||
|
" Each input is multiplied by its corresponding weight, then all are summed and the bias added.\n",
|
||||||
|
" ```z = w1 * x1 + w2 * x2 + .... + bias```\n",
|
||||||
|
"\n",
|
||||||
|
"2. **Activation**: \n",
|
||||||
|
"\n",
|
||||||
|
" The z output is then passed through an **Activation function** (like sigmoid). This squashes the output between 1 and 0.\n",
|
||||||
|
" However, it can be disabled with `activate=False`. It's useful for **output neurons** in some tasks.\n",
|
||||||
|
"\n",
|
||||||
|
"3. **Returns the output**:\n",
|
||||||
|
"\n",
|
||||||
|
" The output has become the neuron's final output\n",
|
||||||
|
"\n",
|
||||||
|
"#### Test - Forward Pass"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": 20,
|
||||||
"id": "6709c5c7",
|
"id": "6709c5c7",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@@ -128,7 +196,7 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Neuron output : 0.9001175686881125\n"
|
"Neuron output : 0.7539649973230405\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -142,6 +210,14 @@
|
|||||||
"print(\"Neuron output :\", output)"
|
"print(\"Neuron output :\", output)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "5593a84a",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The test result is a bit random due to the randomly initialized weights and bias in each Neuron. None of the neurons has been trained for this input."
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "aa57ae8e",
|
"id": "aa57ae8e",
|
||||||
|
|||||||
Reference in New Issue
Block a user