Sync public subset from Flux

This commit is contained in:
Gitea CI
2025-10-20 12:24:21 +00:00
parent 9a69d64d79
commit a334b74935
8 changed files with 230 additions and 35 deletions

View File

@@ -11,30 +11,47 @@
#include "./numerics/matdiv.h"
namespace neural_networks{
template <typename T>
struct Activation_Softmax{
utils::Matrix<T> exp_values;
utils::Matrix<T> probabilities;
//utils::Matrix<T> exp_values;
//utils::Matrix<T> probabilities;
utils::Matrix<T> outputs;
utils::Matrix<T> dinputs;
void forward(const utils::Matrix<T>& inputs){
// Get unnormalized probabilities
exp_values = numerics::matexp(numerics::matsubtract(inputs, numerics::matmax(inputs, "rows"), "col"));
utils::Matrix<T> exp_values = numerics::matexp(numerics::matsubtract(inputs, numerics::matmax(inputs, "rows"), "col"));
// Normalize them for each sample
probabilities = numerics::matdiv(exp_values, numerics::matsum(exp_values, "col"), "col");
utils::Matrix<T> probabilities = numerics::matdiv(exp_values, numerics::matsum(exp_values, "col"), "col");
outputs = probabilities;
}
void backward(const utils::Matrix<T>& dvalues){
const uint64_t rows = dvalues.rows();
const uint64_t cols = dvalues.cols();
if ((dinputs.rows() != rows) || dinputs.cols() != cols){
dinputs.resize(rows, cols);
}
for (uint64_t i = 0; i < rows; ++i){
T dot = T{0};
for (uint64_t j = 0; j < cols; ++j){
dot += outputs(i,j) * dvalues(i,j);
}
for (uint64_t j = 0; j < cols; ++j){
dinputs(i,j) = outputs(i,j) * (dvalues(i,j) - dot);
}
}
}
};
} // end namespace neural_networks

View File

@@ -0,0 +1,68 @@
#pragma once
#include "./core/omp_config.h"
#include "./utils/vector.h"
#include "./utils/matrix.h"
#include "./numerics/matmax.h"
#include "./numerics/matsubtract.h"
#include "./numerics/matexp.h"
#include "./numerics/matdiv.h"
#include "./modules/neural_networks/activation_functions/Activation_Softmax.h"
#include "./modules/neural_networks/loss/Loss_CategoricalCrossentrophy.h"
namespace neural_networks{
template <typename Td, typename Ti>
struct Activation_Softmax_Loss_CategoricalCrossentropy{
neural_networks::Activation_Softmax<Td> activation;
neural_networks::Loss_CategoricalCrossentrophy<Td, Ti> loss;
//utils::Matrix<T> exp_values;
//utils::Matrix<T> probabilities;
utils::Matrix<Td> outputs;
utils::Matrix<Td> dinputs;
utils::Vector<Td> forward(const utils::Matrix<Td>& inputs, const utils::Matrix<Ti>& y_true){
// Output layer's activation function
activation.forward(inputs);
// Set the output
outputs = activation.outputs;
// Calculate and return loss value
Td data_loss = loss.calculate(inputs, y_true);
return data_loss;
}
void backward(const utils::Matrix<Td>& dvalues, const utils::Matrix<Ti>& y_true){
// Number of samples
const uint64_t samples = y_true.rows();
// If the labels are one-hot encoded,
// turn them into discrete values
const uint64_t rows = dvalues.rows();
const uint64_t cols = dvalues.cols();
if ((dinputs.rows() != rows) || dinputs.cols() != cols){
dinputs.resize(rows, cols);
}
for (uint64_t i = 0; i < rows; ++i){
Td dot = Td{0};
for (uint64_t j = 0; j < cols; ++j){
dot += outputs(i,j) * dvalues(i,j);
}
for (uint64_t j = 0; j < cols; ++j){
dinputs(i,j) = outputs(i,j) * (dvalues(i,j) - dot);
}
}
}
};
} // end namespace neural_networks

View File

@@ -13,9 +13,11 @@ namespace neural_networks{
struct Loss{
utils::Vector<Td> sample_losses;
utils::Matrix<Td> dinputs;
Td data_loss;
virtual utils::Vector<Td> forward(const utils::Matrix<Td>& output, const utils::Matrix<Ti>& y) = 0;
virtual void backward(const utils::Matrix<Td>& dvalues, const utils::Matrix<Ti>& y) = 0;
Td calculate(const utils::Matrix<Td>& output, const utils::Matrix<Ti>& y){

View File

@@ -17,6 +17,9 @@ namespace neural_networks{
template <typename Td, typename Ti>
struct Loss_CategoricalCrossentrophy : Loss<Td, Ti> {
utils::Matrix<Td> dinputs;
utils::Vector<Td> forward(const utils::Matrix<Td>& y_pred, const utils::Matrix<Ti>& y_true) override{
utils::Vector<Td> correct_confidences(y_true.rows(), Td{0});
@@ -48,6 +51,32 @@ namespace neural_networks{
return negative_log_likelihoods;
}
void backward(const utils::Matrix<Td>& dvalues, const utils::Matrix<Ti>& y_true) override{
// Number of samples
const Td samples = static_cast<Td> (y_true.rows());
// Number of labels in every sample
// We'll use the first samle to count them
const Ti labels = dvalues.cols();
utils::Matrix<Ti> y_temp;
if (y_true.cols() == 1){
y_temp = utils::eye(labels, y_true.get_col(0));
}else{
y_temp = y_true;
}
// Calculate the gradient
numerics::inplace_matscalar(y_temp,Ti{-1});
dinputs = numerics::matdiv(utils::matcast<Td, Ti>(y_temp), dvalues);
numerics::inplace_matdiv(dinputs, samples);
}
};

View File

@@ -9,6 +9,7 @@
#include "activation_functions/Activation_ReLU.h"
#include "activation_functions/Activation_Softmax.h"
#include "activation_functions/Activation_Softmax_Loss_CategoricalCrossentropy.h"
#include "loss/Loss.h" // Base
#include "loss/Loss_CategoricalCrossentrophy.h"

View File

@@ -8,7 +8,6 @@
namespace numerics{
// ---------------- Serial baseline ----------------
template <typename T>
utils::Matrix<T> matdiv(const utils::Matrix<T>& A, const utils::Vector<T>& b, std::string method){
@@ -33,6 +32,60 @@ namespace numerics{
}
template <typename T>
void inplace_matdiv(utils::Matrix<T>& A, const utils::Matrix<T>& B){
const uint64_t rows = A.rows();
const uint64_t cols = A.cols();
if ((rows != B.rows()) || (cols != B.cols())){
throw std::runtime_error("inplace_matdiv: rows and cols are not the same'");
}
for (uint64_t i = 0; i < rows; ++i){
for (uint64_t j = 0; j < cols; ++j){
A(i,j) /= B(i,j);
}
}
}
template <typename T>
utils::Matrix<T> matdiv(const utils::Matrix<T>& A, const utils::Matrix<T>& B){
const uint64_t rows = A.rows();
const uint64_t cols = A.cols();
if ((rows != B.rows()) || (cols != B.cols())){
throw std::runtime_error("matdiv: choose div by: 'row' or 'col'");
}
utils::Matrix<T> C = A;
inplace_matdiv(C, B);
return C;
}
template <typename T>
void inplace_matdiv(utils::Matrix<T>& A, const T b){
const uint64_t rows = A.rows();
const uint64_t cols = A.cols();
for (uint64_t i = 0; i < rows; ++i){
for (uint64_t j = 0; j < cols; ++j){
A(i,j) /= b;
}
}
}
} // namespace numerics
#endif // _matdiv_n_

View File

@@ -2,3 +2,4 @@
#pragma once
#include "./utils/generators/linspace.h"
#include "./utils/generators/eye.h"

View File

@@ -0,0 +1,24 @@
#pragma once
#include "utils/vector.h"
#include "utils/matrix.h"
namespace utils{
template <typename T>
utils::Matrix<T> eye(const T a, const utils::Vector<T>& b){
const uint64_t N = b.size();
utils::Matrix<T> C(N, a, T{0});
for (uint64_t i = 0; i < N; ++i){
C(i, b[i]) = T{1};
}
return C;
}
} // end namespace utils