Обновить Xenith/core.cpp
This commit is contained in:
+50
-11
@@ -2,6 +2,19 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
|
||||||
|
#define USE_PARALLEL
|
||||||
|
#define MAX_THREADS 0
|
||||||
|
|
||||||
|
#ifdef USE_PARALLEL
|
||||||
|
#include <omp.h>
|
||||||
|
#define OMP_PARALLEL _Pragma("omp parallel for")
|
||||||
|
#define OMP_SET_THREADS() { if (MAX_THREADS > 0) omp_set_num_threads(MAX_THREADS); }
|
||||||
|
#else
|
||||||
|
#define OMP_PARALLEL
|
||||||
|
#define OMP_SET_THREADS()
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) : numLayers(count) {
|
NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) : numLayers(count) {
|
||||||
for (int i = 0; i < count; i++) sizes.push_back(layers[i].size);
|
for (int i = 0; i < count; i++) sizes.push_back(layers[i].size);
|
||||||
for (int i = 0; i < count - 1; i++) {
|
for (int i = 0; i < count - 1; i++) {
|
||||||
@@ -19,45 +32,71 @@ NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) : numLayers(c
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<double> NeuralNetwork::feedForward(const std::vector<double>& input) {
|
std::vector<double> NeuralNetwork::feedForward(const std::vector<double>& input) {
|
||||||
|
OMP_SET_THREADS();
|
||||||
|
|
||||||
outputs.clear();
|
outputs.clear();
|
||||||
outputs.push_back(input);
|
outputs.push_back(input);
|
||||||
|
|
||||||
std::vector<double> curr = input;
|
std::vector<double> curr = input;
|
||||||
|
|
||||||
for (int i = 0; i < numLayers - 1; i++) {
|
for (int i = 0; i < numLayers - 1; i++) {
|
||||||
std::vector<double> next;
|
std::vector<double> next(sizes[i + 1]);
|
||||||
for (int j = 0; j < sizes[i+1]; j++) {
|
|
||||||
|
OMP_PARALLEL
|
||||||
|
for (int j = 0; j < sizes[i + 1]; j++) {
|
||||||
double sum = biases[i][j];
|
double sum = biases[i][j];
|
||||||
for (int k = 0; k < (int)curr.size(); k++) sum += curr[k] * weights[i][j][k];
|
for (int k = 0; k < (int)curr.size(); k++) {
|
||||||
next.push_back(1.0 / (1.0 + exp(-sum)));
|
sum += curr[k] * weights[i][j][k];
|
||||||
}
|
}
|
||||||
|
next[j] = 1.0 / (1.0 + exp(-sum));
|
||||||
|
}
|
||||||
|
|
||||||
curr = next;
|
curr = next;
|
||||||
outputs.push_back(curr);
|
outputs.push_back(curr);
|
||||||
}
|
}
|
||||||
return curr;
|
return curr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
double NeuralNetwork::train(const std::vector<double>& input, const std::vector<double>& target, double lr) {
|
double NeuralNetwork::train(const std::vector<double>& input, const std::vector<double>& target, double lr) {
|
||||||
|
OMP_SET_THREADS();
|
||||||
|
|
||||||
std::vector<double> pred = feedForward(input);
|
std::vector<double> pred = feedForward(input);
|
||||||
|
|
||||||
std::vector<std::vector<double>> errors(numLayers);
|
std::vector<std::vector<double>> errors(numLayers);
|
||||||
errors[numLayers-1].resize(sizes[numLayers-1]);
|
errors[numLayers - 1].resize(sizes[numLayers - 1]);
|
||||||
|
|
||||||
double totalErr = 0;
|
double totalErr = 0;
|
||||||
for (int i = 0; i < sizes[numLayers-1]; i++) {
|
|
||||||
|
for (int i = 0; i < sizes[numLayers - 1]; i++) {
|
||||||
double e = target[i] - pred[i];
|
double e = target[i] - pred[i];
|
||||||
errors[numLayers-1][i] = e * pred[i] * (1.0 - pred[i]);
|
errors[numLayers - 1][i] = e * pred[i] * (1.0 - pred[i]);
|
||||||
totalErr += e * e;
|
totalErr += e * e;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = numLayers - 2; i > 0; i--) {
|
for (int i = numLayers - 2; i > 0; i--) {
|
||||||
errors[i].resize(sizes[i]);
|
errors[i].resize(sizes[i]);
|
||||||
|
|
||||||
|
OMP_PARALLEL
|
||||||
for (int j = 0; j < sizes[i]; j++) {
|
for (int j = 0; j < sizes[i]; j++) {
|
||||||
double e = 0;
|
double e = 0;
|
||||||
for (int k = 0; k < sizes[i+1]; k++) e += errors[i+1][k] * weights[i][k][j];
|
for (int k = 0; k < sizes[i + 1]; k++) {
|
||||||
|
e += errors[i + 1][k] * weights[i][k][j];
|
||||||
|
}
|
||||||
errors[i][j] = e * outputs[i][j] * (1.0 - outputs[i][j]);
|
errors[i][j] = e * outputs[i][j] * (1.0 - outputs[i][j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < numLayers - 1; i++) {
|
for (int i = 0; i < numLayers - 1; i++) {
|
||||||
for (int j = 0; j < sizes[i+1]; j++) {
|
|
||||||
for (int k = 0; k < sizes[i]; k++) weights[i][j][k] += lr * errors[i+1][j] * outputs[i][k];
|
OMP_PARALLEL
|
||||||
biases[i][j] += lr * errors[i+1][j];
|
for (int j = 0; j < sizes[i + 1]; j++) {
|
||||||
|
for (int k = 0; k < sizes[i]; k++) {
|
||||||
|
weights[i][j][k] += lr * errors[i + 1][j] * outputs[i][k];
|
||||||
|
}
|
||||||
|
biases[i][j] += lr * errors[i + 1][j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return totalErr;
|
return totalErr;
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user