Sync public subset from Flux

This commit is contained in:
Gitea CI
2025-10-07 11:09:55 +00:00
parent 8892d58e66
commit 35023cb7e1
30 changed files with 707 additions and 229 deletions

View File

@@ -0,0 +1,24 @@
#pragma once
#include "./core/omp_config.h"
#include "./utils/vector.h"
#include "./utils/matrix.h"
namespace neural_networks{
template <typename T>
struct Activation_ReLU{
//utils::Matrix<T> inputs;
utils::Matrix<T> outputs;
void forward(const utils::Matrix<T>& inputs){
outputs = numerics::matclip_low(inputs, T{0});
}
};
} // end namespace neural_networks

View File

@@ -5,27 +5,30 @@
#include "./utils/vector.h"
#include "./utils/matrix.h"
#include "./numerics/max.h"
#include "./numerics/matmax.h"
#include "./numerics/matsubtract.h"
#include "./numerics/exponential.h"
#include "./numerics/matexp.h"
#include "./numerics/matdiv.h"
namespace neural_networks{
template <typename T>
struct activation_softmax{
struct Activation_Softmax{
utils::Matrix<T> exp_values;
utils::Matrix<T> probabilities;
utils::Matrix<T> outputs;
void forward(const utils::Matrix<T> inputs){
void forward(const utils::Matrix<T>& inputs){
// Get unnormalized probabilities
exp_values = numerics::matexp(numerics::matsubtract(inputs, numerics::matmax(inputs, "rows"), "col"));
exp_values = numerics::exponential(numerics::matsubtract(inputs, numerics::max(inputs, "rows"), "col"));
// Normalize them for each sample
probabilities = numerics::matdiv(exp_values, numerics::matsum(exp_values, "col"), "col");
outputs = probabilities;
}

View File

@@ -1,28 +0,0 @@
#pragma once
#include "./core/omp_config.h"
#include "./utils/vector.h"
#include "./utils/matrix.h"
#include "./utils/random.h"
namespace neural_networks{
template <typename T>
struct activation_ReLU{
utils::Matrix<T> outputs;
void forward(utils::Matrix<T> inputs){
outputs = numerics::max(inputs, T{0});
//outputs.print();
}
};
} // end namespace neural_networks

View File

@@ -11,7 +11,7 @@
namespace neural_networks{
template <typename TX, typename Ty>
void create_spital_data(const uint64_t samples, const uint64_t classes, utils::Matrix<TX>& X, utils::Vector<Ty>& y) {
void create_spital_data(const uint64_t samples, const uint64_t classes, utils::Matrix<TX>& X, utils::Matrix<Ty>& y) {
const uint64_t rows = samples*classes;
TX r, t;
@@ -21,8 +21,8 @@ namespace neural_networks{
if ((rows != X.rows()) || (X.cols() != 2)){
X.resize(samples*classes, 2);
}
if (rows != y.size()){
y.resize(rows);
if (rows != y.rows()){
y.resize(rows, 1);
}
for (uint64_t i = 0; i < classes; ++i){
@@ -33,7 +33,7 @@ namespace neural_networks{
X(row_idx, 0) = r*std::cos(t*2.5) + utils::random(TX{-0.15}, TX{0.15});
X(row_idx, 1) = r*std::sin(t*2.5) + utils::random(TX{-0.15}, TX{0.15});
y[row_idx] = static_cast<Ty>(i);
y(row_idx, 0) = static_cast<Ty>(i);
}
}
}

View File

@@ -10,31 +10,28 @@
namespace neural_networks{
template <typename T>
struct dense_layer{
struct Dense_Layer{
//utils::Matrix<T> _inputs;
utils::Matrix<T> weights;
utils::Vector<T> biases;
utils::Matrix<T> outputs;
// Default Constructor
dense_layer() = default;
Dense_Layer() = default;
// Constructor
dense_layer(const uint64_t n_inputs, const uint64_t n_neurons){
Dense_Layer(const uint64_t n_inputs, const uint64_t n_neurons){
weights.random(n_inputs, n_neurons, -1, 1);
biases.resize(n_neurons, T{0});
//weights.print();
//outputs.resize()
}
void forward(utils::Matrix<T> inputs){
outputs = numerics::matadd(numerics::matmul_auto(inputs, (weights)), biases, "row");
void forward(utils::Matrix<T>& inputs){
outputs = numerics::matadd(numerics::matmul_auto(inputs, weights), biases, "row");
}
};

View File

@@ -1,34 +0,0 @@
#pragma once
#include "./core/omp_config.h"
#include "./utils/vector.h"
#include "./utils/matrix.h"
namespace neural_networks{
template <typename Td, typename Ti>
struct Loss{
utils::Matrix<Td> sample_losses;
Td data_losses;
virtual utils::Vector<Td> forward(const utils::Matrix<Td>& output, const utils::Matrix<Ti>& y) = 0;
Td calculate(const utils::Matrix<Td>& output, const utils::Matrix<Ti>& y){
// Calculate sample losses
sample_losses = forward(output, y);
// Calculate mean loss
data_losses = numerics::mean(sample_losses);
return data_losses;
}
};
} // end namespace neural_networks

View File

@@ -5,30 +5,28 @@
#include "./utils/vector.h"
#include "./utils/matrix.h"
#include "./numerics/vecmean.h"
namespace neural_networks{
template <typename Td, typename Ti>
struct Loss{
utils::Matrix<Td> sample_losses;
Td data_losses;
utils::Vector<Td> sample_losses;
Td data_loss;
virtual utils::Vector<Td> forward(const utils::Matrix<Td>& output, const utils::Matrix<Ti>& y) = 0;
Td calculate(const utils::Matrix<Td>& output, const utils::Matrix<Ti>& y){
// Calculate sample losses
sample_losses = forward(output, y);
// Calculate mean loss
data_losses = numerics::mean(sample_losses);
return data_losses;
data_loss = numerics::vecmean(sample_losses);
return data_loss;
}
};
} // end namespace neural_networks

View File

@@ -0,0 +1,55 @@
#pragma once
#include "./core/omp_config.h"
#include "./utils/vector.h"
#include "./utils/matrix.h"
#include "./utils/matcast.h"
#include "./numerics/matclip.h"
#include "./numerics/veclog.h"
#include "./Loss.h"
namespace neural_networks{
template <typename Td, typename Ti>
struct Loss_CategoricalCrossentrophy : Loss<Td, Ti> {
utils::Vector<Td> forward(const utils::Matrix<Td>& y_pred, const utils::Matrix<Ti>& y_true) override{
utils::Vector<Td> correct_confidences(y_true.rows(), Td{0});
utils::Matrix<Td> cast_y_true = utils::matcast<Td, Ti>(y_true);
// Number of samles in a batch
const uint64_t samples = y_true.rows();
// Clip data to prevent dividning by 0
// Clip both sides to not drag mean towards any value
utils::Matrix<Td> y_pred_clipped = numerics::matclip(y_pred, Td{1e-7}, Td{1.0} - Td{1e-7});
// Probabilities for taget values
// only if categorical labes
if (y_true.cols() == 1){
for (uint64_t i = 0; i < y_true.rows(); ++i){
const uint64_t idx = static_cast<uint64_t>(y_true(i, 0));
correct_confidences[i] = y_pred_clipped(i, idx);
}
}else{ // Mask values - only for one-hot encoded labels
correct_confidences = numerics::matdot_row(y_pred_clipped, cast_y_true);
}
// Losses
utils::Vector<Td> negative_log_likelihoods(samples, Td{0});
for (uint64_t i = 0; i < samples; ++i){
negative_log_likelihoods[i] = -std::log(static_cast<Td>(correct_confidences[i]));
}
return negative_log_likelihoods;
}
};
} // end namespace neural_networks

View File

@@ -3,10 +3,11 @@
#include "datasets/spiral.h"
#include "layers/dense_layer.h"
#include "layers/Dense_Layer.h"
#include "activation_functions/ReLU.h"
#include "activation_functions/Softmax.h"
#include "activation_functions/Activation_ReLU.h"
#include "activation_functions/Activation_Softmax.h"
#include "loss/loss.h"
#include "loss/Loss.h" // Base
#include "loss/Loss_CategoricalCrossentrophy.h"