Start devloping vulkan
This commit is contained in:
+25
-9
@@ -1,6 +1,9 @@
|
||||
#include "core.h"
|
||||
#include "core.hpp"
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <omp.h>
|
||||
|
||||
#define MAX_CORES 16
|
||||
|
||||
NeuralNetwork::NeuralNetwork(LayerStructure_t layers[], int count) : numLayers(count) {
|
||||
for (int i = 0; i < count; i++) sizes.push_back(layers[i].size);
|
||||
@@ -39,29 +42,42 @@ std::vector<double> NeuralNetwork::feedForward(const std::vector<double>& input)
|
||||
|
||||
|
||||
double NeuralNetwork::train(const std::vector<double>& input, const std::vector<double>& target, double lr) {
|
||||
std::vector<double> pred = feedForward(input);
|
||||
omp_set_num_threads(MAX_CORES);
|
||||
|
||||
std::vector<double> pred = feedForward(input);
|
||||
std::vector<std::vector<double>> errors(numLayers);
|
||||
errors[numLayers - 1].resize(sizes[numLayers - 1]);
|
||||
|
||||
|
||||
double totalErr = 0;
|
||||
for (int i = 0; i < sizes[numLayers-1]; i++) {
|
||||
|
||||
for (int i = 0; i < sizes[numLayers - 1]; i++) {
|
||||
double e = target[i] - pred[i];
|
||||
errors[numLayers-1][i] = e * pred[i] * (1.0 - pred[i]);
|
||||
errors[numLayers - 1][i] = e * pred[i] * (1.0 - pred[i]);
|
||||
totalErr += e * e;
|
||||
}
|
||||
|
||||
for (int i = numLayers - 2; i > 0; i--) {
|
||||
errors[i].resize(sizes[i]);
|
||||
|
||||
#pragma omp parallel for
|
||||
for (int j = 0; j < sizes[i]; j++) {
|
||||
double e = 0;
|
||||
for (int k = 0; k < sizes[i+1]; k++) e += errors[i+1][k] * weights[i][k][j];
|
||||
for (int k = 0; k < sizes[i + 1]; k++) {
|
||||
e += errors[i + 1][k] * weights[i][k][j];
|
||||
}
|
||||
errors[i][j] = e * outputs[i][j] * (1.0 - outputs[i][j]);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < numLayers - 1; i++) {
|
||||
for (int j = 0; j < sizes[i+1]; j++) {
|
||||
for (int k = 0; k < sizes[i]; k++) weights[i][j][k] += lr * errors[i+1][j] * outputs[i][k];
|
||||
biases[i][j] += lr * errors[i+1][j];
|
||||
#pragma omp parallel for
|
||||
for (int j = 0; j < sizes[i + 1]; j++) {
|
||||
double errorTerm = lr * errors[i + 1][j];
|
||||
|
||||
// Вложенный цикл обновления весов
|
||||
for (int k = 0; k < sizes[i]; k++) {
|
||||
weights[i][j][k] += errorTerm * outputs[i][k];
|
||||
}
|
||||
biases[i][j] += errorTerm;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#ifndef CORE_H
|
||||
#define CORE_H
|
||||
|
||||
#include "typedef.h"
|
||||
#include "typedef.hpp"
|
||||
#include <vector>
|
||||
#include <cmath>
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
#version 450
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include "token.h"
|
||||
#include "token.hpp"
|
||||
#include <algorithm>
|
||||
#include <random>
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef TYPEDEF_H
|
||||
#define TYPEDEF_H
|
||||
|
||||
const int MAX_CONTEXT = 8; // Сколько токенов видит сеть
|
||||
const int EMBED_DIM = 4; // Размер вектора одного токена
|
||||
const int MAX_CONTEXT = 32; // Сколько токенов видит сеть
|
||||
const int EMBED_DIM = 8; // Размер вектора одного токена
|
||||
const int MAX_VOCAB = 90; // Размер словаря
|
||||
|
||||
typedef enum { SIGMOID } FunctionActivate_t;
|
||||
Reference in New Issue
Block a user