From 229c5d85c4f40783e69241b1beae0d67e49a09d7 Mon Sep 17 00:00:00 2001 From: KoDer Date: Tue, 28 Apr 2026 23:23:57 +0700 Subject: [PATCH] ls --- Xenith/core.cpp | 82 ++++++++++++++++++++++++++++++++++++++++++++++++ Xenith/core.h | 35 ++++++++++++++++----- Xenith/typedef.h | 15 +++++++++ main.cpp | 42 ++++++++++++++++++++----- 4 files changed, 158 insertions(+), 16 deletions(-) create mode 100644 Xenith/typedef.h diff --git a/Xenith/core.cpp b/Xenith/core.cpp index e69de29..29203c0 100644 --- a/Xenith/core.cpp +++ b/Xenith/core.cpp @@ -0,0 +1,82 @@ +#include "core.h" + +NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) { + numLayers = count; + for (int i = 0; i < count; i++) { + layerSizes.push_back(layers[i].size); + } + + // Инициализация весов случайными числами + for (int i = 0; i < count - 1; i++) { + std::vector> layerWeights; + for (int j = 0; j < layerSizes[i+1]; j++) { + std::vector nodeWeights; + for (int k = 0; k < layerSizes[i]; k++) { + nodeWeights.push_back(((double)rand() / RAND_MAX) * 2 - 1); + } + layerWeights.push_back(nodeWeights); + } + weights.push_back(layerWeights); + + std::vector layerBiases; + for (int j = 0; j < layerSizes[i+1]; j++) { + layerBiases.push_back(((double)rand() / RAND_MAX) * 2 - 1); + } + biases.push_back(layerBiases); + } +} + +std::vector NeuralNetwork::feedForward(std::vector input) { + outputs.clear(); + outputs.push_back(input); + + std::vector current = input; + for (int i = 0; i < numLayers - 1; i++) { + std::vector next; + for (int j = 0; j < layerSizes[i+1]; j++) { + double sum = biases[i][j]; + for (int k = 0; k < layerSizes[i]; k++) { + sum += current[k] * weights[i][j][k]; + } + next.push_back(sigmoid(sum)); + } + current = next; + outputs.push_back(current); + } + return current; +} + +void NeuralNetwork::train(std::vector input, std::vector target, double lr) { + // 1. Прямой проход + feedForward(input); + + // 2. Вычисление ошибок для выходного слоя + std::vector> errors(numLayers); + errors[numLayers - 1].resize(layerSizes[numLayers - 1]); + for (int i = 0; i < layerSizes[numLayers - 1]; i++) { + double output = outputs[numLayers - 1][i]; + errors[numLayers - 1][i] = (target[i] - output) * sigmoidDerivative(output); + } + + // 3. Обратное распространение ошибки на скрытые слои + for (int i = numLayers - 2; i > 0; i--) { + errors[i].resize(layerSizes[i]); + for (int j = 0; j < layerSizes[i]; j++) { + double error = 0.0; + for (int k = 0; k < layerSizes[i+1]; k++) { + error += errors[i+1][k] * weights[i][k][j]; + } + errors[i][j] = error * sigmoidDerivative(outputs[i][j]); + } + } + + // 4. Обновление весов и смещений + for (int i = 0; i < numLayers - 1; i++) { + for (int j = 0; j < layerSizes[i+1]; j++) { + for (int k = 0; k < layerSizes[i]; k++) { + weights[i][j][k] += lr * errors[i+1][j] * outputs[i][k]; + } + biases[i][j] += lr * errors[i+1][j]; + } + } +} \ No newline at end of file diff --git a/Xenith/core.h b/Xenith/core.h index bedf096..038f768 100644 --- a/Xenith/core.h +++ b/Xenith/core.h @@ -1,9 +1,28 @@ +#ifndef CORE_H +#define CORE_H -class NeuroEngine: { - private: - void* m_engine; - public: - NeuroEngine(); - ~NeuroEngine(); - void* getEngine(); -} +#include "typedef.h" +#include +#include +#include +#include + +class NeuralNetwork { +private: + int numLayers; + std::vector layerSizes; + std::vector>> weights; // weights[layer][to_node][from_node] + std::vector> biases; // biases[layer][node] + std::vector> outputs; // Храним выходы слоев для backprop + + double sigmoid(double x) { return 1.0 / (1.0 + exp(-x)); } + double sigmoidDerivative(double x) { return x * (1.0 - x); } + +public: + NeuralNetwork(LayerStructure_t layers[], int count); + + std::vector feedForward(std::vector input); + void train(std::vector input, std::vector target, double learningRate); +}; + +#endif \ No newline at end of file diff --git a/Xenith/typedef.h b/Xenith/typedef.h new file mode 100644 index 0000000..31fdc29 --- /dev/null +++ b/Xenith/typedef.h @@ -0,0 +1,15 @@ +#ifndef TYPEDEF_H +#define TYPEDEF_H + +#include + +typedef enum { + SIGMOID +} FunctionActivate_t; + +typedef struct { + int size; + FunctionActivate_t activate; +} LayerStructure_t; + +#endif \ No newline at end of file diff --git a/main.cpp b/main.cpp index 7a123e4..4083290 100644 --- a/main.cpp +++ b/main.cpp @@ -1,13 +1,39 @@ #include - -int calculate(int a, int b) { - return a + o; -} +#include +#include +#include "Xenith/core.h" +#include "Xenith/typedef.h" int main() { - // print hello world and test void calculate function - std::cout << "Hello World!" << std::endl; - int result = calculate(1, 2); - std::cout << result << std::endl; + srand(time(NULL)); + + LayerStructure_t layers[] = { + {2, SIGMOID}, // Вход: 2 числа + {3, SIGMOID}, // Скрытый слой + {1, SIGMOID} // Выход: 1 число + }; + + NeuralNetwork nn(layers, 3); + + // Данные для обучения + std::vector> inputs = {{1, 1}, {1, 0}, {0, 0}, {0, 1}}; + std::vector> targets = {{0}, {1}, {1}, {0}}; + + // Цикл обучения + std::cout << "Training..." << std::endl; + for (int epoch = 0; epoch < 20000; epoch++) { + for (int i = 0; i < inputs.size(); i++) { + nn.train(inputs[i], targets[i], 0.5); + } + } + + // Проверка результатов + std::cout << "Results:" << std::endl; + for (int i = 0; i < inputs.size(); i++) { + std::vector res = nn.feedForward(inputs[i]); + std::cout << inputs[i][0] << " " << inputs[i][1] << " -> " + << (res[0] > 0.5 ? 1 : 0) << " (raw: " << res[0] << ")" << std::endl; + } + return 0; } \ No newline at end of file