J'aimerais (edgar13 aussi) savoir comment faire une IA en ti basic
(j'ai déjà essayé mais j'ai pas réussit...)

import math
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def train(weights_ih, weights_ho, inputs, targets, learning_rate):
hidden = [0.0] * len(weights_ih[0])
output = [0.0] * len(weights_ho[0])
# Calcul de la couche cachée
for i in range(len(hidden)):
hidden[i] = sum(inputs[j] * weights_ih[j][i] for j in range(len(inputs)))
hidden[i] = sigmoid(hidden[i])
# Calcul de la sortie
for i in range(len(output)):
output[i] = sum(hidden[j] * weights_ho[j][i] for j in range(len(hidden)))
output[i] = sigmoid(output[i])
# Calcul de l'erreur de sortie
output_errors = [targets[i] - output[i] for i in range(len(output))]
# Calcul de l'erreur de la couche cachée
hidden_errors = [sum(output_errors[j] * weights_ho[i][j] for j in range(len(output))) for i in range(len(hidden))]
# Mise à jour des poids entre la couche d'entrée et la couche cachée
for i in range(len(weights_ih)):
for j in range(len(weights_ih[0])):
weights_ih[i][j] += learning_rate * hidden_errors[j] * inputs[i] * (1 - inputs[i]) * hidden[j] * (1 - hidden[j])
# Mise à jour des poids entre la couche cachée et la couche de sortie
for i in range(len(weights_ho)):
for j in range(len(weights_ho[0])):
weights_ho[i][j] += learning_rate * output_errors[j] * hidden[i] * (1 - hidden[i]) * output[j] * (1 - output[j])
# Données d'entraînement
input_size = 784
hidden_size = 128
output_size = 10
weights_ih = [[0.0] * hidden_size for _ in range(input_size)]
weights_ho = [[0.0] * output_size for _ in range(hidden_size)]
inputs = [0.0] * input_size
targets = [0.0] * output_size
# Paramètres d'entraînement
max_iterations = 100
initial_learning_rate = 0.1
# Boucle d'entraînement
for iteration in range(max_iterations):
learning_rate = initial_learning_rate / (iteration + 1) # Ajustement du taux d'apprentissage
train(weights_ih, weights_ho, inputs, targets, learning_rate)
# Autres étapes d'entraînement...
# Étape de test
test_inputs = [0.0] * input_size
expected_outputs = [0.0] * output_size
# Appliquer les poids appris du réseau neuronal aux données de test
hidden = [0.0] * len(weights_ih[0])
output = [0.0] * len(weights_ho[0])
# Calcul de la couche cachée
for i in range(len(hidden)):
hidden[i] = sum(test_inputs[j] * weights_ih[j][i] for j in range(len(test_inputs)))
hidden[i] = sigmoid(hidden[i])
# Calcul de la sortie
for i in range(len(output)):
output[i] = sum(hidden[j] * weights_ho[j][i] for j in range(len(hidden))
output[i] = sigmoid(output[i])
print(output)
# Calcul de la sortie
for i in range(len(output)):
output[i] = sum(hidden[j] * weights_ho[j][i] for j in range(len(hidden))
output[i] = sigmoid(output[i])
# Calcul de la sortie
for i in range(len(output)):
output[i] = sum(hidden[j] * weights_ho[j][i] for j in range(len(hidden)))
output[i] = sigmoid(output[i])
print(output)
Users browsing this forum: ClaudeBot [spider] and 3 guests