#include "core.h" NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) { numLayers = count; for (int i = 0; i < count; i++) { layerSizes.push_back(layers[i].size); } // Инициализация весов случайными числами for (int i = 0; i < count - 1; i++) { std::vector> layerWeights; for (int j = 0; j < layerSizes[i+1]; j++) { std::vector nodeWeights; for (int k = 0; k < layerSizes[i]; k++) { nodeWeights.push_back(((double)rand() / RAND_MAX) * 2 - 1); } layerWeights.push_back(nodeWeights); } weights.push_back(layerWeights); std::vector layerBiases; for (int j = 0; j < layerSizes[i+1]; j++) { layerBiases.push_back(((double)rand() / RAND_MAX) * 2 - 1); } biases.push_back(layerBiases); } } std::vector NeuralNetwork::feedForward(std::vector input) { outputs.clear(); outputs.push_back(input); std::vector current = input; for (int i = 0; i < numLayers - 1; i++) { std::vector next; for (int j = 0; j < layerSizes[i+1]; j++) { double sum = biases[i][j]; for (int k = 0; k < layerSizes[i]; k++) { sum += current[k] * weights[i][j][k]; } next.push_back(sigmoid(sum)); } current = next; outputs.push_back(current); } return current; } void NeuralNetwork::train(std::vector input, std::vector target, double lr) { // 1. Прямой проход feedForward(input); // 2. Вычисление ошибок для выходного слоя std::vector> errors(numLayers); errors[numLayers - 1].resize(layerSizes[numLayers - 1]); for (int i = 0; i < layerSizes[numLayers - 1]; i++) { double output = outputs[numLayers - 1][i]; errors[numLayers - 1][i] = (target[i] - output) * sigmoidDerivative(output); } // 3. Обратное распространение ошибки на скрытые слои for (int i = numLayers - 2; i > 0; i--) { errors[i].resize(layerSizes[i]); for (int j = 0; j < layerSizes[i]; j++) { double error = 0.0; for (int k = 0; k < layerSizes[i+1]; k++) { error += errors[i+1][k] * weights[i][k][j]; } errors[i][j] = error * sigmoidDerivative(outputs[i][j]); } } // 4. Обновление весов и смещений for (int i = 0; i < numLayers - 1; i++) { for (int j = 0; j < layerSizes[i+1]; j++) { for (int k = 0; k < layerSizes[i]; k++) { weights[i][j][k] += lr * errors[i+1][j] * outputs[i][k]; } biases[i][j] += lr * errors[i+1][j]; } } }