Sync public subset from Flux (private)

This commit is contained in:
Gitea CI
2025-10-06 20:14:13 +00:00
parent 272e77c536
commit b2d00af0e1
390 changed files with 152131 additions and 0 deletions

0
include/core/.gitkeep Normal file
View File

View File

@@ -0,0 +1,36 @@
#pragma once
namespace core{
enum class GridKind { Uniform, NonUniform };
enum class FDKind { Central, Forward, Backward };
enum class BCKind { Dirichlet, Neumann /*, Robin*/ };
enum class SolverKind { LU, Inverse /*, CG*/ };
template <typename T>
struct BC {
FDKind fd{FDKind::Forward};
BCKind kind{BCKind::Dirichlet};
T value{T(0)};
};
// Global default config holder
template <typename T>
struct Configs {
GridKind grid{GridKind::Uniform};
FDKind fd{FDKind::Central};
BC<T> left{FDKind::Forward, BCKind::Dirichlet, T(0) };
BC<T> right{FDKind::Backward, BCKind::Dirichlet, T(0) };
SolverKind solver{SolverKind::LU};
static Configs& defaults() {
static Configs g{}; // process-wide defaults
return g;
}
};
} // namespace core

53
include/core/omp_config.h Normal file
View File

@@ -0,0 +1,53 @@
#pragma once
#include <vector>
#include <omp.h>
namespace omp_config{
// Configure OpenMP behavior at runtime.
inline void omp_configure(int max_active_levels,
bool dynamic_threads,
const std::vector<int>& threads_per_level = {},
bool bind_close = true)
{
// 1) Allow nested parallel regions (levels of teams)
// Example: outer #pragma omp parallel ... { inner #pragma omp parallel ... }
omp_set_max_active_levels(max_active_levels); // 1 = only top-level; 2+ enables nesting
// 2) Let the runtime shrink/grow thread counts if it thinks it should
// (helps avoid oversubscription when you accidentally ask for too many threads)
omp_set_dynamic(dynamic_threads ? 1 : 0);
// 3) Thread binding (keep threads near their cores) is controlled via env vars,
// so here we just *recommend* a good default (see below). You *can* setenv()
// in code, but its cleaner to do it outside the program.
(void)bind_close; // documented below in env var section
// 4) Top-level default thread count (inner levels are usually set per region)
if (!threads_per_level.empty()) {
omp_set_num_threads(threads_per_level[0]); // e.g. 16 for the outermost team
// Inner levels:
// - Use num_threads(threads_per_level[L]) on the inner #pragma omp parallel
// - or set OMP_NUM_THREADS="outer,inner,inner2" as an environment variable
}
}
// ---------- Helper: may we create another team? ----------
inline bool omp_parallel_allowed() {
#ifdef _OPENMP
// If were not in parallel, we can spawn.
if (!omp_in_parallel()) return true;
// Already inside parallel: allow only if nesting is enabled and not at limit.
int level = omp_get_active_level(); // 0 outside parallel, 1 inside, ...
int maxlv = omp_get_max_active_levels(); // user/runtime cap
return static_cast<bool>(level < maxlv);
#else
return false; // no OpenMP → no extra teams
#endif
}
} // namespace omp_config