From d20cf1b17a463fbbd1d597ba76577715ab8e6186 Mon Sep 17 00:00:00 2001 From: KoDer Date: Wed, 29 Apr 2026 10:56:38 +0300 Subject: [PATCH] =?UTF-8?q?=D0=9E=D0=B1=D0=BD=D0=BE=D0=B2=D0=B8=D1=82?= =?UTF-8?q?=D1=8C=20Xenith/core.cpp?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Xenith/core.cpp | 77 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 19 deletions(-) diff --git a/Xenith/core.cpp b/Xenith/core.cpp index ed1837e..a8a4c79 100644 --- a/Xenith/core.cpp +++ b/Xenith/core.cpp @@ -2,6 +2,19 @@ #include #include +#define USE_PARALLEL +#define MAX_THREADS 0 + +#ifdef USE_PARALLEL +#include +#define OMP_PARALLEL _Pragma("omp parallel for") +#define OMP_SET_THREADS() { if (MAX_THREADS > 0) omp_set_num_threads(MAX_THREADS); } +#else +#define OMP_PARALLEL +#define OMP_SET_THREADS() +#endif + + NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) : numLayers(count) { for (int i = 0; i < count; i++) sizes.push_back(layers[i].size); for (int i = 0; i < count - 1; i++) { @@ -19,45 +32,71 @@ NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) : numLayers(c } std::vector NeuralNetwork::feedForward(const std::vector& input) { + OMP_SET_THREADS(); + outputs.clear(); outputs.push_back(input); + std::vector curr = input; + for (int i = 0; i < numLayers - 1; i++) { - std::vector next; - for (int j = 0; j < sizes[i+1]; j++) { - double sum = biases[i][j]; - for (int k = 0; k < (int)curr.size(); k++) sum += curr[k] * weights[i][j][k]; - next.push_back(1.0 / (1.0 + exp(-sum))); - } + std::vector next(sizes[i + 1]); + + OMP_PARALLEL + for (int j = 0; j < sizes[i + 1]; j++) { + double sum = biases[i][j]; + for (int k = 0; k < (int)curr.size(); k++) { + sum += curr[k] * weights[i][j][k]; + } + next[j] = 1.0 / (1.0 + exp(-sum)); + } + curr = next; outputs.push_back(curr); } return curr; } + double NeuralNetwork::train(const std::vector& input, const std::vector& target, double lr) { + OMP_SET_THREADS(); + std::vector pred = feedForward(input); + std::vector> errors(numLayers); - errors[numLayers-1].resize(sizes[numLayers-1]); + errors[numLayers - 1].resize(sizes[numLayers - 1]); + double totalErr = 0; - for (int i = 0; i < sizes[numLayers-1]; i++) { + + for (int i = 0; i < sizes[numLayers - 1]; i++) { double e = target[i] - pred[i]; - errors[numLayers-1][i] = e * pred[i] * (1.0 - pred[i]); + errors[numLayers - 1][i] = e * pred[i] * (1.0 - pred[i]); totalErr += e * e; } + for (int i = numLayers - 2; i > 0; i--) { errors[i].resize(sizes[i]); - for (int j = 0; j < sizes[i]; j++) { - double e = 0; - for (int k = 0; k < sizes[i+1]; k++) e += errors[i+1][k] * weights[i][k][j]; - errors[i][j] = e * outputs[i][j] * (1.0 - outputs[i][j]); - } + + OMP_PARALLEL + for (int j = 0; j < sizes[i]; j++) { + double e = 0; + for (int k = 0; k < sizes[i + 1]; k++) { + e += errors[i + 1][k] * weights[i][k][j]; + } + errors[i][j] = e * outputs[i][j] * (1.0 - outputs[i][j]); + } } + for (int i = 0; i < numLayers - 1; i++) { - for (int j = 0; j < sizes[i+1]; j++) { - for (int k = 0; k < sizes[i]; k++) weights[i][j][k] += lr * errors[i+1][j] * outputs[i][k]; - biases[i][j] += lr * errors[i+1][j]; - } + + OMP_PARALLEL + for (int j = 0; j < sizes[i + 1]; j++) { + for (int k = 0; k < sizes[i]; k++) { + weights[i][j][k] += lr * errors[i + 1][j] * outputs[i][k]; + } + biases[i][j] += lr * errors[i + 1][j]; + } } + return totalErr; -} \ No newline at end of file +}