diff --git a/assets/logo/logo_openbuild_1600.png b/assets/logo/logo_openbuild_1600.png new file mode 100644 index 0000000..f809819 Binary files /dev/null and b/assets/logo/logo_openbuild_1600.png differ diff --git a/include/modules/neural_networks/activation_functions/Activation_ReLU.h b/include/modules/neural_networks/activation_functions/Activation_ReLU.h new file mode 100644 index 0000000..add7906 --- /dev/null +++ b/include/modules/neural_networks/activation_functions/Activation_ReLU.h @@ -0,0 +1,24 @@ +#pragma once + +#include "./core/omp_config.h" + +#include "./utils/vector.h" +#include "./utils/matrix.h" + + +namespace neural_networks{ + + template + struct Activation_ReLU{ + + //utils::Matrix inputs; + utils::Matrix outputs; + + void forward(const utils::Matrix& inputs){ + outputs = numerics::matclip_low(inputs, T{0}); + } + }; + + + +} // end namespace neural_networks \ No newline at end of file diff --git a/include/modules/neural_networks/activation_functions/Softmax.h b/include/modules/neural_networks/activation_functions/Activation_Softmax.h similarity index 59% rename from include/modules/neural_networks/activation_functions/Softmax.h rename to include/modules/neural_networks/activation_functions/Activation_Softmax.h index a6431f8..2be0034 100644 --- a/include/modules/neural_networks/activation_functions/Softmax.h +++ b/include/modules/neural_networks/activation_functions/Activation_Softmax.h @@ -5,27 +5,30 @@ #include "./utils/vector.h" #include "./utils/matrix.h" -#include "./numerics/max.h" +#include "./numerics/matmax.h" #include "./numerics/matsubtract.h" -#include "./numerics/exponential.h" +#include "./numerics/matexp.h" #include "./numerics/matdiv.h" namespace neural_networks{ template - struct activation_softmax{ + struct Activation_Softmax{ utils::Matrix exp_values; utils::Matrix probabilities; utils::Matrix outputs; - void forward(const utils::Matrix inputs){ + void forward(const utils::Matrix& inputs){ + // Get unnormalized probabilities + exp_values = numerics::matexp(numerics::matsubtract(inputs, numerics::matmax(inputs, "rows"), "col")); - exp_values = numerics::exponential(numerics::matsubtract(inputs, numerics::max(inputs, "rows"), "col")); + // Normalize them for each sample probabilities = numerics::matdiv(exp_values, numerics::matsum(exp_values, "col"), "col"); + outputs = probabilities; } diff --git a/include/modules/neural_networks/activation_functions/ReLU.h b/include/modules/neural_networks/activation_functions/ReLU.h deleted file mode 100644 index 9e38e10..0000000 --- a/include/modules/neural_networks/activation_functions/ReLU.h +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once - -#include "./core/omp_config.h" - -#include "./utils/vector.h" -#include "./utils/matrix.h" -#include "./utils/random.h" - - -namespace neural_networks{ - - template - struct activation_ReLU{ - - utils::Matrix outputs; - - void forward(utils::Matrix inputs){ - outputs = numerics::max(inputs, T{0}); - //outputs.print(); - } - - - - }; - - - -} // end namespace neural_networks \ No newline at end of file diff --git a/include/modules/neural_networks/datasets/spiral.h b/include/modules/neural_networks/datasets/spiral.h index 4b95a8b..b5e5836 100644 --- a/include/modules/neural_networks/datasets/spiral.h +++ b/include/modules/neural_networks/datasets/spiral.h @@ -11,7 +11,7 @@ namespace neural_networks{ template - void create_spital_data(const uint64_t samples, const uint64_t classes, utils::Matrix& X, utils::Vector& y) { + void create_spital_data(const uint64_t samples, const uint64_t classes, utils::Matrix& X, utils::Matrix& y) { const uint64_t rows = samples*classes; TX r, t; @@ -21,8 +21,8 @@ namespace neural_networks{ if ((rows != X.rows()) || (X.cols() != 2)){ X.resize(samples*classes, 2); } - if (rows != y.size()){ - y.resize(rows); + if (rows != y.rows()){ + y.resize(rows, 1); } for (uint64_t i = 0; i < classes; ++i){ @@ -33,7 +33,7 @@ namespace neural_networks{ X(row_idx, 0) = r*std::cos(t*2.5) + utils::random(TX{-0.15}, TX{0.15}); X(row_idx, 1) = r*std::sin(t*2.5) + utils::random(TX{-0.15}, TX{0.15}); - y[row_idx] = static_cast(i); + y(row_idx, 0) = static_cast(i); } } } diff --git a/include/modules/neural_networks/layers/dense_layer.h b/include/modules/neural_networks/layers/Dense_Layer.h similarity index 69% rename from include/modules/neural_networks/layers/dense_layer.h rename to include/modules/neural_networks/layers/Dense_Layer.h index 51a8ba7..4acc229 100644 --- a/include/modules/neural_networks/layers/dense_layer.h +++ b/include/modules/neural_networks/layers/Dense_Layer.h @@ -10,31 +10,28 @@ namespace neural_networks{ template - struct dense_layer{ + struct Dense_Layer{ + + //utils::Matrix _inputs; utils::Matrix weights; utils::Vector biases; utils::Matrix outputs; + // Default Constructor - dense_layer() = default; + Dense_Layer() = default; // Constructor - dense_layer(const uint64_t n_inputs, const uint64_t n_neurons){ + Dense_Layer(const uint64_t n_inputs, const uint64_t n_neurons){ weights.random(n_inputs, n_neurons, -1, 1); biases.resize(n_neurons, T{0}); - //weights.print(); - //outputs.resize() } - - void forward(utils::Matrix inputs){ - outputs = numerics::matadd(numerics::matmul_auto(inputs, (weights)), biases, "row"); + void forward(utils::Matrix& inputs){ + outputs = numerics::matadd(numerics::matmul_auto(inputs, weights), biases, "row"); } - - - }; diff --git a/include/modules/neural_networks/loss/Loss _CategoricalCrossentrophy.h b/include/modules/neural_networks/loss/Loss _CategoricalCrossentrophy.h deleted file mode 100644 index fa4c6c2..0000000 --- a/include/modules/neural_networks/loss/Loss _CategoricalCrossentrophy.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include "./core/omp_config.h" - -#include "./utils/vector.h" -#include "./utils/matrix.h" - - -namespace neural_networks{ - - template - struct Loss{ - - utils::Matrix sample_losses; - Td data_losses; - - virtual utils::Vector forward(const utils::Matrix& output, const utils::Matrix& y) = 0; - - Td calculate(const utils::Matrix& output, const utils::Matrix& y){ - // Calculate sample losses - sample_losses = forward(output, y); - - // Calculate mean loss - data_losses = numerics::mean(sample_losses); - return data_losses; - - } - - - }; - - - -} // end namespace neural_networks \ No newline at end of file diff --git a/include/modules/neural_networks/loss/loss.h b/include/modules/neural_networks/loss/Loss.h similarity index 75% rename from include/modules/neural_networks/loss/loss.h rename to include/modules/neural_networks/loss/Loss.h index fa4c6c2..5b3b634 100644 --- a/include/modules/neural_networks/loss/loss.h +++ b/include/modules/neural_networks/loss/Loss.h @@ -5,30 +5,28 @@ #include "./utils/vector.h" #include "./utils/matrix.h" +#include "./numerics/vecmean.h" namespace neural_networks{ template struct Loss{ - utils::Matrix sample_losses; - Td data_losses; + utils::Vector sample_losses; + Td data_loss; virtual utils::Vector forward(const utils::Matrix& output, const utils::Matrix& y) = 0; Td calculate(const utils::Matrix& output, const utils::Matrix& y){ + // Calculate sample losses sample_losses = forward(output, y); // Calculate mean loss - data_losses = numerics::mean(sample_losses); - return data_losses; + data_loss = numerics::vecmean(sample_losses); + return data_loss; } - - }; - - } // end namespace neural_networks \ No newline at end of file diff --git a/include/modules/neural_networks/loss/Loss_CategoricalCrossentrophy.h b/include/modules/neural_networks/loss/Loss_CategoricalCrossentrophy.h new file mode 100644 index 0000000..33f07ea --- /dev/null +++ b/include/modules/neural_networks/loss/Loss_CategoricalCrossentrophy.h @@ -0,0 +1,55 @@ +#pragma once + +#include "./core/omp_config.h" + +#include "./utils/vector.h" +#include "./utils/matrix.h" +#include "./utils/matcast.h" + +#include "./numerics/matclip.h" +#include "./numerics/veclog.h" + +#include "./Loss.h" + + +namespace neural_networks{ + + template + struct Loss_CategoricalCrossentrophy : Loss { + + utils::Vector forward(const utils::Matrix& y_pred, const utils::Matrix& y_true) override{ + + utils::Vector correct_confidences(y_true.rows(), Td{0}); + utils::Matrix cast_y_true = utils::matcast(y_true); + + // Number of samles in a batch + const uint64_t samples = y_true.rows(); + + // Clip data to prevent dividning by 0 + // Clip both sides to not drag mean towards any value + utils::Matrix y_pred_clipped = numerics::matclip(y_pred, Td{1e-7}, Td{1.0} - Td{1e-7}); + + // Probabilities for taget values + // only if categorical labes + if (y_true.cols() == 1){ + for (uint64_t i = 0; i < y_true.rows(); ++i){ + const uint64_t idx = static_cast(y_true(i, 0)); + correct_confidences[i] = y_pred_clipped(i, idx); + } + }else{ // Mask values - only for one-hot encoded labels + correct_confidences = numerics::matdot_row(y_pred_clipped, cast_y_true); + + } + // Losses + utils::Vector negative_log_likelihoods(samples, Td{0}); + for (uint64_t i = 0; i < samples; ++i){ + negative_log_likelihoods[i] = -std::log(static_cast(correct_confidences[i])); + } + + return negative_log_likelihoods; + } + }; + + + +} // end namespace neural_networks \ No newline at end of file diff --git a/include/modules/neural_networks/neural_networks.h b/include/modules/neural_networks/neural_networks.h index c0d32ab..3db2b2b 100644 --- a/include/modules/neural_networks/neural_networks.h +++ b/include/modules/neural_networks/neural_networks.h @@ -3,10 +3,11 @@ #include "datasets/spiral.h" -#include "layers/dense_layer.h" +#include "layers/Dense_Layer.h" -#include "activation_functions/ReLU.h" -#include "activation_functions/Softmax.h" +#include "activation_functions/Activation_ReLU.h" +#include "activation_functions/Activation_Softmax.h" -#include "loss/loss.h" \ No newline at end of file +#include "loss/Loss.h" // Base +#include "loss/Loss_CategoricalCrossentrophy.h" diff --git a/include/numerics/exp.h b/include/numerics/exp.h new file mode 100644 index 0000000..238e988 --- /dev/null +++ b/include/numerics/exp.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +#include "./utils/vector.h" +#include "./utils/matrix.h" + + +namespace numerics{ + + template + T exp(const T a){ + return std::exp(a); + } + + +} // namespace numerics + diff --git a/include/numerics/exponential.h b/include/numerics/exponential.h deleted file mode 100644 index cfe59a7..0000000 --- a/include/numerics/exponential.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#include - -#include "./utils/vector.h" -#include "./utils/matrix.h" - - -namespace numerics{ - - template - T exponential(const T a){ - return std::exp(a); - } - - template - utils::Vector exponential(const utils::Vector& a){ - utils::Vector b = a; - for (uint64_t i = 0; i < a.size(); ++i){ - b[i] = numerics::exponential(a[i]); - } - return b; - } - - template - utils::Matrix exponential(const utils::Matrix& A){ - utils::Matrix B = A; - for (uint64_t i = 0; i < A.rows(); ++i){ - for (uint64_t j = 0; j < A.cols(); ++j){ - B(i,j) = numerics::exponential(A(i,j)); - } - } - return B; - } - - - -} // namespace numerics - diff --git a/include/numerics/log.h b/include/numerics/log.h new file mode 100644 index 0000000..d178580 --- /dev/null +++ b/include/numerics/log.h @@ -0,0 +1,25 @@ +#pragma once + + +#include "./utils/vector.h" +#include "./utils/matrix.h" + + +namespace numerics{ + + + template + void inplace_log(T a){ + a = std::log(a); + } + + template + T log(const T a){ + T b = a; + inplace_log(b); + return b; + } + + +} // namespace numerics + diff --git a/include/numerics/matclip.h b/include/numerics/matclip.h new file mode 100644 index 0000000..3ddc46c --- /dev/null +++ b/include/numerics/matclip.h @@ -0,0 +1,76 @@ +#pragma once + +#include "./utils/matrix.h" + + +namespace numerics{ + + template + void inplace_matclip_high(utils::Matrix& A, T high){ + uint64_t rows = A.rows(); + uint64_t cols = A.cols(); + + for (uint64_t i = 0; i < rows; ++i){ + for (uint64_t j = 0; j < cols; ++j){ + if (A(i,j) > high){ + A(i,j) = high; + } + } + } + } + template + void inplace_matclip_low(utils::Matrix& A, T low){ + uint64_t rows = A.rows(); + uint64_t cols = A.cols(); + + for (uint64_t i = 0; i < rows; ++i){ + for (uint64_t j = 0; j < cols; ++j){ + if (A(i,j) < low){ + A(i,j) = low; + } + } + } + } + template + void inplace_matclip(utils::Matrix& A, T low, T high){ + uint64_t rows = A.rows(); + uint64_t cols = A.cols(); + + for (uint64_t i = 0; i < rows; ++i){ + for (uint64_t j = 0; j < cols; ++j){ + if (A(i,j) > high){ + A(i,j) = high; + }else if (A(i,j) < low){ + A(i,j) = low; + } + } + } + } + template + utils::Matrix matclip_high(const utils::Matrix& A, Td high){ + + utils::Matrix B = A; + inplace_matclip_high(B, high); + + return B; + } + template + utils::Matrix matclip_low(const utils::Matrix& A, Td low){ + + utils::Matrix B = A; + inplace_matclip_low(B, low); + + return B; + } + + template + utils::Matrix matclip(const utils::Matrix& A, Td low, Td high){ + + utils::Matrix B = A; + inplace_matclip(B, low, high); + + return B; + } + +} // namespace numerics + diff --git a/include/numerics/matdot.h b/include/numerics/matdot.h new file mode 100644 index 0000000..5de22b6 --- /dev/null +++ b/include/numerics/matdot.h @@ -0,0 +1,63 @@ +#pragma once + +#include "./core/omp_config.h" + +#include "./utils/matrix.h" +#include "./utils/vector.h" + + +namespace numerics{ + + + template + utils::Vector matdot_row(const utils::Matrix& A, const utils::Matrix& B){ + + if (A.rows() != B.rows() || A.cols() != B.cols()){ + throw std::runtime_error("matmul: dimension mismatch"); + } + + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + + utils::Vector c(rows, T{0}); + + for (uint64_t i = 0; i < rows; ++i){ + T sum = T{0}; + for (uint64_t j = 0; j < cols; ++j){ + sum += A(i,j) * A(i,j); + } + c[i] = sum; + } + return c; + } + + template + utils::Vector matdot_col(const utils::Matrix& A, const utils::Matrix& B){ + + if (A.rows() != B.rows() || A.cols() != B.cols()){ + throw std::runtime_error("matmul: dimension mismatch"); + } + + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + + utils::Vector c(cols, T{0}); + + for (uint64_t j = 0; j < cols; ++j){ + T sum = T{0}; + for (uint64_t i = 0; i < rows; ++i){ + sum += A(i,j) * A(i,j); + } + c[j] = sum; + } + return c; + } + + + + + +} // namespace numerics + diff --git a/include/numerics/matexp.h b/include/numerics/matexp.h new file mode 100644 index 0000000..dd7f6be --- /dev/null +++ b/include/numerics/matexp.h @@ -0,0 +1,22 @@ +#pragma once + +#include "./utils/matrix.h" + +namespace numerics{ + + + template + utils::Matrix matexp(const utils::Matrix& A){ + utils::Matrix B = A; + for (uint64_t i = 0; i < A.rows(); ++i){ + for (uint64_t j = 0; j < A.cols(); ++j){ + B(i,j) = numerics::exp(A(i,j)); + } + } + return B; + } + + + +} // namespace numerics + diff --git a/include/numerics/matlog.h b/include/numerics/matlog.h new file mode 100644 index 0000000..00c3469 --- /dev/null +++ b/include/numerics/matlog.h @@ -0,0 +1,34 @@ +#pragma once + + +#include "./utils/vector.h" +#include "./utils/matrix.h" + + +namespace numerics{ + + + template + void inplace_matlog(utils::Matrix& A){ + + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + for (uint64_t i = 0; i < rows; ++i){ + for (uint64_t j = 0; j < cols; ++j){ + numerics::inplace_log(A(i,j)); + } + } + } + + template + utils::Matrix log(const utils::Matrix& A){ + + utils::Matrix B = A; + inplace_matlog(B); + return B; + } + + +} // namespace numerics + diff --git a/include/numerics/matmax.h b/include/numerics/matmax.h new file mode 100644 index 0000000..74b2102 --- /dev/null +++ b/include/numerics/matmax.h @@ -0,0 +1,52 @@ +#pragma once + +#include "./utils/matrix.h" + +namespace numerics{ + + template + T matmax(const utils::Matrix& A){ + + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + T max_value(T{0}); + + for (uint64_t i = 0; i < rows; ++i){ + for (uint64_t j = 0; j < cols; ++j){ + max_value = numerics::max(max_value, A(i,j)); + } + } + return max_value; + } + + template + utils::Vector matmax(const utils::Matrix& A, std::string method){ + + utils::Vector b; + + if (method == "cols"){ + b.resize(A.cols(), T{0}); + for (uint64_t i = 0; i < A.cols(); ++i){ + for (uint64_t j = 0; j < A.rows(); ++j){ + b[i] = numerics::max(A(j, i), b[i]); + } + } + }else if (method == "rows"){ + b.resize(A.rows(), T{0}); + for (uint64_t i = 0; i < A.rows(); ++i){ + for (uint64_t j = 0; j < A.cols(); ++j){ + b[i] = numerics::max(A(i, j), b[i]); + } + } + }else{ + throw std::runtime_error("max: choose 'rows or 'cols'"); + } + return b; + + } + + + +} // namespace numerics + diff --git a/include/numerics/matmean.h b/include/numerics/matmean.h new file mode 100644 index 0000000..ccceccc --- /dev/null +++ b/include/numerics/matmean.h @@ -0,0 +1,88 @@ +#ifndef _mean_n_ +#define _mean_n_ + +#include "./utils/vector.h" +#include "./utils/matrix.h" +#include "./core/omp_config.h" + +namespace numerics{ + + template + T matmean(utils::Matrix& A) { + + T mean(T{0}); + + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + + for (uint64_t i = 0; i < cols; ++i) { + for (uint64_t j = 0; j < rows; ++j) { + mean += A(j, i); + } + } + mean /= (static_cast(rows)* static_cast(cols)); + return mean; + } + + + template + void inplace_matmean_row(utils::Matrix& A, utils::Vector& b) { + + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + if (b.size() != cols){ + b.resize(cols, T{0}); + } + + for(uint64_t j = 0; j < cols; ++j){ + for (uint64_t i = 0; i < rows; ++i){ + b[j] += A(i, j); + } + b[j] =/ static_cast(rows); + } + } + + template + void inplace_matmean_cols(utils::Matrix& A) { + + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + if (b.size() != rows){ + b.resize(rows, T{0}); + } + + for(uint64_t i = 0; i < rows; ++i){ + for (uint64_t j = 0; j < cols; ++j){ + b[i] += A(i, j); + } + b[j] =/ static_cast(cols); + } + } + + + template + utils::Vector matmean_row(utils::Matrix& A) { + + utils:Vector b(A.rows(), T{0}); + + inplace_matmean_row(A, b); + + return b; + } + + template + utils::Vector matmean_col(utils::Matrix& A) { + + utils:Vector b(A.cols(), T{0}); + + inplace_matmean_cols(A, b); + + return b; + } + +} // namespace numerics + +#endif // _mean_n_ \ No newline at end of file diff --git a/include/numerics/matsum.h b/include/numerics/matsum.h index 9bf0ebd..51774ff 100644 --- a/include/numerics/matsum.h +++ b/include/numerics/matsum.h @@ -8,7 +8,7 @@ namespace numerics{ template - utils::Vector matsum(utils::Matrix& A, std::string method) { + utils::Vector matsum(const utils::Matrix& A, std::string method) { utils::Vector b; diff --git a/include/numerics/max.h b/include/numerics/max.h index 4e0db24..1740b2b 100644 --- a/include/numerics/max.h +++ b/include/numerics/max.h @@ -1,10 +1,5 @@ #pragma once - -#include "./utils/vector.h" -#include "./utils/matrix.h" - - namespace numerics{ template @@ -17,60 +12,5 @@ namespace numerics{ } } - template - void inplace_max(utils::Matrix& A, const T b){ - - const uint64_t rows = A.rows(); - const uint64_t cols = A.cols(); - - for (uint64_t i = 0; i < rows; ++i){ - for (uint64_t j = 0; j < cols; ++j){ - - if (b > A(i,j)){ - //std::cout << A(i,j) << std::endl; - A(i,j) = b; - //std::cout << A(i,j) << std::endl; - } - } - } - } - - template - utils::Matrix max(const utils::Matrix& A, const T b){ - - utils::Matrix B = A; - inplace_max(B, b); - return B; - } - - template - utils::Vector max(const utils::Matrix& A, std::string method){ - - utils::Vector b; - - if (method == "cols"){ - b.resize(A.cols(), T{0}); - for (uint64_t i = 0; i < A.cols(); ++i){ - for (uint64_t j = 0; j < A.rows(); ++j){ - b[i] = max(A(j, i), b[i]); - } - } - }else if (method == "rows"){ - b.resize(A.rows(), T{0}); - for (uint64_t i = 0; i < A.rows(); ++i){ - for (uint64_t j = 0; j < A.cols(); ++j){ - //std::cout << i << ":" << j << std::endl; - b[i] = max(A(i, j), b[i]); - } - } - }else{ - throw std::runtime_error("max: choose 'rows or 'cols'"); - } - return b; - - } - - - } // namespace numerics diff --git a/include/numerics/mean.h b/include/numerics/mean.h deleted file mode 100644 index b8166a4..0000000 --- a/include/numerics/mean.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _mean_n_ -#define _mean_n_ - -#include "./utils/vector.h" -#include "./utils/matrix.h" -#include "./core/omp_config.h" - -namespace numerics{ - - template - T mean(utils::Vector& A) { - - T mean(T{0}); - - const uint64_t rows = A.rows(); - const uint64_t cols = A.cols(); - - - for (uint64_t i = 0; i < cols; ++i) { - for (uint64_t j = 0; j < rows; ++j) { - mean += A(j, i); - } - } - mean /= (static_cast(rows)* static_cast(cols)); - return mean; - } - - -} // namespace numerics - -#endif // _mean_n_ \ No newline at end of file diff --git a/include/numerics/numerics.h b/include/numerics/numerics.h index 1c0d2e3..d048dab 100644 --- a/include/numerics/numerics.h +++ b/include/numerics/numerics.h @@ -1,21 +1,31 @@ // "./numerics/numerics.h" #pragma once - +#include "./numerics/max.h" +#include "./numerics/exp.h" +#include "./numerics/log.h" #include "./numerics/initializers/eye.h" #include "./numerics/matequal.h" #include "./numerics/transpose.h" #include "./numerics/inverse.h" #include "./numerics/matmul.h" +#include "./numerics/matmax.h" #include "./numerics/matdiv.h" #include "./numerics/matvec.h" #include "./numerics/matadd.h" #include "./numerics/matsubtract.h" #include "./numerics/matsum.h" +#include "./numerics/matclip.h" +#include "./numerics/matexp.h" +#include "./numerics/matlog.h" +#include "./numerics/matdot.h" #include "./numerics/min.h" -#include "./numerics/max.h" #include "./numerics/abs.h" -#include "./numerics/mean.h" -#include "./numerics/exponential.h" +#include "./numerics/vecclip.h" +#include "./numerics/vecexp.h" +#include "./numerics/vecmax.h" +#include "./numerics/veclog.h" + + #include "./numerics/interpolation1d.h" // base diff --git a/include/numerics/vecclip.h b/include/numerics/vecclip.h new file mode 100644 index 0000000..e259c66 --- /dev/null +++ b/include/numerics/vecclip.h @@ -0,0 +1,69 @@ +#pragma once + + +#include "./utils/vector.h" +#include "./utils/matrix.h" + + +namespace numerics{ + + template + void inplace_vecclip_high(utils::Vector& a, T high){ + uint64_t N = a.size(); + + for (uint64_t i = 0; i < N; ++i){ + if (a[i] > high){ + a[i] = high; + } + } + } + template + void inplace_vecclip_low(utils::Vector& a, T low){ + uint64_t N = a.size(); + + for (uint64_t i = 0; i < N; ++i){ + if (a[i] < low){ + a[i] = low; + } + } + } + template + void inplace_vecclip(utils::Vector& a, T low, T high){ + uint64_t N = a.size(); + + for (uint64_t i = 0; i < N; ++i){ + if (a[i] > high){ + a[i] = high; + }else if (a[i] < low){ + a[i] = low; + } + } + } + template + utils::Vector vecclip_high(const utils::Vector& a, Td high){ + + utils::Vector b = a; + inplace_vecclip_high(b, high); + + return b; + } + template + utils::Vector vecclip_low(const utils::Vector& a, Td low){ + + utils::Vector b = a; + inplace_vecclip_low(b, low); + + return b; + } + template + utils::Vector vecclip(const utils::Vector& a, Td low, Td high){ + + utils::Vector b = a; + inplace_vecclip(b, low, high); + + return b; + } + + +} // namespace numerics + diff --git a/include/numerics/vecexp.h b/include/numerics/vecexp.h new file mode 100644 index 0000000..73a9b56 --- /dev/null +++ b/include/numerics/vecexp.h @@ -0,0 +1,19 @@ +#pragma once + +#include "./utils/vector.h" + +namespace numerics{ + + + template + utils::Vector vecexp(const utils::Vector& a){ + utils::Vector b = a; + for (uint64_t i = 0; i < a.size(); ++i){ + b[i] = numerics::exp(a[i]); + } + return b; + } + + +} // namespace numerics + diff --git a/include/numerics/veclog.h b/include/numerics/veclog.h new file mode 100644 index 0000000..53560f5 --- /dev/null +++ b/include/numerics/veclog.h @@ -0,0 +1,30 @@ +#pragma once + + +#include "./utils/vector.h" +#include "./utils/matrix.h" + + +namespace numerics{ + + + template + void inplace_veclog(utils::Vector& a){ + + const uint64_t N = a.size(); + + for (uint64_t i = 0; i < N; ++i){ + numerics::inplace_log(a[i]); + } + } + + template + utils::Vector veclog(const utils::Vector& a){ + + utils::Vector b = a; + inplace_veclog(b); + return b; + } + +} // namespace numerics + diff --git a/include/numerics/vecmax.h b/include/numerics/vecmax.h new file mode 100644 index 0000000..e22f42d --- /dev/null +++ b/include/numerics/vecmax.h @@ -0,0 +1,22 @@ +#pragma once + + +#include "./utils/vector.h" + +namespace numerics{ + + + + template + T vecmax(const utils::Vector& a){ + T max_value(T{0}); + uint64_t N = a.size(); + + for (uint64_t i = 0; i < N; ++i){ + max_value = numerics::max(max_value, a[i]); + } + return max_value; + } + +} // namespace numerics + diff --git a/include/numerics/vecmean.h b/include/numerics/vecmean.h new file mode 100644 index 0000000..268b5aa --- /dev/null +++ b/include/numerics/vecmean.h @@ -0,0 +1,28 @@ +#ifndef _mean_n_ +#define _mean_n_ + +#include "./utils/vector.h" +#include "./utils/matrix.h" +#include "./core/omp_config.h" + +namespace numerics{ + + template + T vecmean(utils::Vector& a) { + + T mean(T{0}); + + const uint64_t N = a.size(); + + + for (uint64_t i = 0; i < N; ++i) { + mean += a[i]; + } + mean /= (static_cast(N)); + return mean; + } + + +} // namespace numerics + +#endif // _mean_n_ \ No newline at end of file diff --git a/include/utils/matcast.h b/include/utils/matcast.h new file mode 100644 index 0000000..7fc5dd9 --- /dev/null +++ b/include/utils/matcast.h @@ -0,0 +1,35 @@ +#pragma once + +#include "./core/omp_config.h" +#include "./utils/matrix.h" + + +namespace utils{ + + template + void inplace_matcast(const utils::Matrix& A, utils::Matrix& B) { + if ((A.rows() != B.rows()) || (A.cols() != B.cols())){ + throw std::runtime_error("inplace_matcast: dimension mismatch"); + } + const uint64_t rows = A.rows(); + const uint64_t cols = A.cols(); + + for (uint64_t i = 0; i < rows; ++i){ + for (uint64_t j = 0; j < cols; ++j){ + B(i,j) = static_cast(A(i,j)); + } + } + } + + + template + utils::Matrix matcast(const utils::Matrix& A) { + utils::Matrix B(A.rows(), A.cols(), To{0}); + + inplace_matcast(A,B); + + return B; + } + +} // end namespace utils + diff --git a/include/utils/utils.h b/include/utils/utils.h index 5b7b8bf..5b8b39a 100644 --- a/include/utils/utils.h +++ b/include/utils/utils.h @@ -5,3 +5,4 @@ #include "./utils/matrix.h" #include "./utils/generators.h" #include "./utils/random.h" +#include "./utils/matcast.h"