* correction de probleme et ajout d'un quick multi

* correction de probleme

* changer le main

* programme fini~ pour l'ia

* correction de probleme

* Robot (#3)

* Robot qui marche pas encore

* Robot teubé mais marche bien, marche très bien en local mais problème en réseau

* Robot fini mais problème en réseau

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

---------

Co-authored-by: Cpt-Adok <theo.faria@laposte.net>
Co-authored-by: Cpt-Adok <126670243+Cpt-Adok@users.noreply.github.com>

* correction de probleme et ajout de learn.ser

---------

Co-authored-by: Cpt-Adok <theo.faria@laposte.net>
Co-authored-by: Cpt-Adok <126670243+Cpt-Adok@users.noreply.github.com>
This commit is contained in:
Loïc GUEZO
2024-05-26 23:33:12 +02:00
committed by GitHub
parent 79178886b6
commit 6cced603df
20 changed files with 1093 additions and 43 deletions

149
src/tests/IATest.java Normal file
View File

@@ -0,0 +1,149 @@
package tests;
import java.io.File;
import IA.QTable;
import IA.State;
import environnements.Grid;
import environnements.Map;
import personnages.IAQLearning;
import personnages.Personnage;
import types.Mouvement;
public class IATest {
private final static String path = "res" + File.separator +
"save" + File.separator +
"learn.ser";
public static void learnIA() {
double alpha = 0.1;
double gamma = 0.9;
double epsilon = 1.0;
double decay_rate = 0.995;
double minEpsilon = 0.01;
int totalEpisodes = 1000;
Personnage.n = 2;
for(int episode = 0; episode < totalEpisodes; episode++) {
QTable qTable = new QTable();
IAQLearning iaqLearning = new IAQLearning(new int[] {0, 0}, qTable, alpha, gamma, epsilon);
Map map = new Map(20, 20);
qTable.getValues(path);
while (true) {
Map mapIA = new Map(map.getGrid()[0].length, map.getGrid().length);
mapIA.replaceGrid(map.getGrid());
map.placePersonnages(iaqLearning);
State currentState = iaqLearning.getCurrentState(map.getGrid());
Mouvement mouvement = iaqLearning.bestMouvement(currentState);
iaqLearning.moveSnake(mouvement);
int[] coordinate = iaqLearning.getHeadCoordinate();
if(map.isGameOver(coordinate) || iaqLearning.applyEffects(map.getEffect(coordinate))) {
iaqLearning.receiveReward(currentState, mouvement, -1.0, currentState);
break;
}
mapIA.placePersonnages(iaqLearning);
State nextState = iaqLearning.getCurrentState(mapIA.getGrid());
iaqLearning.receiveReward(currentState, mouvement, 0.1, nextState);
iaqLearning.increaseRound();
mapIA.clearMap();
map.clearMap();
}
qTable.save(path);
epsilon = Math.max(minEpsilon, epsilon * decay_rate);
System.out.println("Episode : " + episode + " | Robot 1 States : " + qTable.getqValues().size());
}
}
public static void learnIAvsIA() {
double alpha = 0.1;
double gamma = 0.9;
double[] epsilon = new double[] {1.0,};
double decay_rate = 0.995;
double minEpsilon = 0.01;
int totalEpisodes = 1000;
Personnage.n = 4;
for (int episode = 0; episode < totalEpisodes; episode++) {
QTable qTable = new QTable();
IAQLearning[] iaqLearnings = new IAQLearning[] {
new IAQLearning(new int[] {2, 2}, qTable, alpha, gamma, epsilon[0]),
new IAQLearning(new int[] {9, 19}, qTable, alpha, gamma, epsilon[1])
};
Map map = new Map(12, 22);
boolean isGameOver = false;
qTable.getValues(path);
while(true) {
for (int i = 0; i < iaqLearnings.length; i++) {
IAQLearning iaqLearning = iaqLearnings[i];
Grid[][] gridMap = map.getGrid();
Map mapIA = new Map(gridMap[0].length, gridMap.length);
mapIA.replaceGrid(gridMap);
for (IAQLearning value : iaqLearnings) {
map.placePersonnages(value);
}
State currentState = iaqLearning.getCurrentState(map.getGrid());
Mouvement mouvement = iaqLearning.bestMouvement(currentState);
iaqLearning.moveSnake(mouvement);
int[] coordinate = iaqLearning.getHeadCoordinate();
for (int[] snakeCoordinate : iaqLearnings[(i + 1) % 2].getCoordinate()) {
if (coordinate[0] == snakeCoordinate[0] && coordinate[1] == snakeCoordinate[1]) {
iaqLearning.receiveReward(currentState, mouvement, -10.0, currentState);
iaqLearnings[(i + 1) % 2].receiveReward(currentState, mouvement, 10.0, currentState);
break;
}
}
mapIA.placePersonnages(iaqLearning);
State nextState = iaqLearning.getCurrentState(mapIA.getGrid());
iaqLearning.receiveReward(currentState, mouvement, -0.1, nextState);
iaqLearning.increaseRound();
mapIA.clearMap();
map.clearMap();
}
if(isGameOver) break;
}
qTable.save(path);
for (int i = 0; i < epsilon.length; i++) {
epsilon[i] = Math.max(minEpsilon, epsilon[i] * decay_rate);
}
System.out.println("Episode: " + episode + " | Robot 1 States: " + qTable.getqValues().size());
}
}
}