ls
This commit is contained in:
@@ -0,0 +1,82 @@
|
|||||||
|
#include "core.h"
|
||||||
|
|
||||||
|
NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) {
|
||||||
|
numLayers = count;
|
||||||
|
for (int i = 0; i < count; i++) {
|
||||||
|
layerSizes.push_back(layers[i].size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Инициализация весов случайными числами
|
||||||
|
for (int i = 0; i < count - 1; i++) {
|
||||||
|
std::vector<std::vector<double>> layerWeights;
|
||||||
|
for (int j = 0; j < layerSizes[i+1]; j++) {
|
||||||
|
std::vector<double> nodeWeights;
|
||||||
|
for (int k = 0; k < layerSizes[i]; k++) {
|
||||||
|
nodeWeights.push_back(((double)rand() / RAND_MAX) * 2 - 1);
|
||||||
|
}
|
||||||
|
layerWeights.push_back(nodeWeights);
|
||||||
|
}
|
||||||
|
weights.push_back(layerWeights);
|
||||||
|
|
||||||
|
std::vector<double> layerBiases;
|
||||||
|
for (int j = 0; j < layerSizes[i+1]; j++) {
|
||||||
|
layerBiases.push_back(((double)rand() / RAND_MAX) * 2 - 1);
|
||||||
|
}
|
||||||
|
biases.push_back(layerBiases);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<double> NeuralNetwork::feedForward(std::vector<double> input) {
|
||||||
|
outputs.clear();
|
||||||
|
outputs.push_back(input);
|
||||||
|
|
||||||
|
std::vector<double> current = input;
|
||||||
|
for (int i = 0; i < numLayers - 1; i++) {
|
||||||
|
std::vector<double> next;
|
||||||
|
for (int j = 0; j < layerSizes[i+1]; j++) {
|
||||||
|
double sum = biases[i][j];
|
||||||
|
for (int k = 0; k < layerSizes[i]; k++) {
|
||||||
|
sum += current[k] * weights[i][j][k];
|
||||||
|
}
|
||||||
|
next.push_back(sigmoid(sum));
|
||||||
|
}
|
||||||
|
current = next;
|
||||||
|
outputs.push_back(current);
|
||||||
|
}
|
||||||
|
return current;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NeuralNetwork::train(std::vector<double> input, std::vector<double> target, double lr) {
|
||||||
|
// 1. Прямой проход
|
||||||
|
feedForward(input);
|
||||||
|
|
||||||
|
// 2. Вычисление ошибок для выходного слоя
|
||||||
|
std::vector<std::vector<double>> errors(numLayers);
|
||||||
|
errors[numLayers - 1].resize(layerSizes[numLayers - 1]);
|
||||||
|
for (int i = 0; i < layerSizes[numLayers - 1]; i++) {
|
||||||
|
double output = outputs[numLayers - 1][i];
|
||||||
|
errors[numLayers - 1][i] = (target[i] - output) * sigmoidDerivative(output);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Обратное распространение ошибки на скрытые слои
|
||||||
|
for (int i = numLayers - 2; i > 0; i--) {
|
||||||
|
errors[i].resize(layerSizes[i]);
|
||||||
|
for (int j = 0; j < layerSizes[i]; j++) {
|
||||||
|
double error = 0.0;
|
||||||
|
for (int k = 0; k < layerSizes[i+1]; k++) {
|
||||||
|
error += errors[i+1][k] * weights[i][k][j];
|
||||||
|
}
|
||||||
|
errors[i][j] = error * sigmoidDerivative(outputs[i][j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Обновление весов и смещений
|
||||||
|
for (int i = 0; i < numLayers - 1; i++) {
|
||||||
|
for (int j = 0; j < layerSizes[i+1]; j++) {
|
||||||
|
for (int k = 0; k < layerSizes[i]; k++) {
|
||||||
|
weights[i][j][k] += lr * errors[i+1][j] * outputs[i][k];
|
||||||
|
}
|
||||||
|
biases[i][j] += lr * errors[i+1][j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
+27
-8
@@ -1,9 +1,28 @@
|
|||||||
|
#ifndef CORE_H
|
||||||
|
#define CORE_H
|
||||||
|
|
||||||
class NeuroEngine: {
|
#include "typedef.h"
|
||||||
private:
|
#include <vector>
|
||||||
void* m_engine;
|
#include <cmath>
|
||||||
public:
|
#include <iostream>
|
||||||
NeuroEngine();
|
#include <cstdlib>
|
||||||
~NeuroEngine();
|
|
||||||
void* getEngine();
|
class NeuralNetwork {
|
||||||
}
|
private:
|
||||||
|
int numLayers;
|
||||||
|
std::vector<int> layerSizes;
|
||||||
|
std::vector<std::vector<std::vector<double>>> weights; // weights[layer][to_node][from_node]
|
||||||
|
std::vector<std::vector<double>> biases; // biases[layer][node]
|
||||||
|
std::vector<std::vector<double>> outputs; // Храним выходы слоев для backprop
|
||||||
|
|
||||||
|
double sigmoid(double x) { return 1.0 / (1.0 + exp(-x)); }
|
||||||
|
double sigmoidDerivative(double x) { return x * (1.0 - x); }
|
||||||
|
|
||||||
|
public:
|
||||||
|
NeuralNetwork(LayerStructure_t layers[], int count);
|
||||||
|
|
||||||
|
std::vector<double> feedForward(std::vector<double> input);
|
||||||
|
void train(std::vector<double> input, std::vector<double> target, double learningRate);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
#ifndef TYPEDEF_H
|
||||||
|
#define TYPEDEF_H
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
SIGMOID
|
||||||
|
} FunctionActivate_t;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int size;
|
||||||
|
FunctionActivate_t activate;
|
||||||
|
} LayerStructure_t;
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -1,13 +1,39 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <vector>
|
||||||
int calculate(int a, int b) {
|
#include <ctime>
|
||||||
return a + o;
|
#include "Xenith/core.h"
|
||||||
}
|
#include "Xenith/typedef.h"
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
// print hello world and test void calculate function
|
srand(time(NULL));
|
||||||
std::cout << "Hello World!" << std::endl;
|
|
||||||
int result = calculate(1, 2);
|
LayerStructure_t layers[] = {
|
||||||
std::cout << result << std::endl;
|
{2, SIGMOID}, // Вход: 2 числа
|
||||||
|
{3, SIGMOID}, // Скрытый слой
|
||||||
|
{1, SIGMOID} // Выход: 1 число
|
||||||
|
};
|
||||||
|
|
||||||
|
NeuralNetwork nn(layers, 3);
|
||||||
|
|
||||||
|
// Данные для обучения
|
||||||
|
std::vector<std::vector<double>> inputs = {{1, 1}, {1, 0}, {0, 0}, {0, 1}};
|
||||||
|
std::vector<std::vector<double>> targets = {{0}, {1}, {1}, {0}};
|
||||||
|
|
||||||
|
// Цикл обучения
|
||||||
|
std::cout << "Training..." << std::endl;
|
||||||
|
for (int epoch = 0; epoch < 20000; epoch++) {
|
||||||
|
for (int i = 0; i < inputs.size(); i++) {
|
||||||
|
nn.train(inputs[i], targets[i], 0.5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Проверка результатов
|
||||||
|
std::cout << "Results:" << std::endl;
|
||||||
|
for (int i = 0; i < inputs.size(); i++) {
|
||||||
|
std::vector<double> res = nn.feedForward(inputs[i]);
|
||||||
|
std::cout << inputs[i][0] << " " << inputs[i][1] << " -> "
|
||||||
|
<< (res[0] > 0.5 ? 1 : 0) << " (raw: " << res[0] << ")" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user