16template <MInt nDim,
class SysEqn>
26template <MInt nDim,
class SysEqn>
130template <MInt nDim,
class SysEqn>
151 m_enabled = Context::getSolverProperty<MBool>(
"sliceEnabled", solverId(), AT_);
160 m_log <<
"Set properties for DG Slice class...";
176 MString gridFileNameDefault =
"grid.slice";
177 gridFileNameDefault = Context::getSolverProperty<MString>(
"sliceGridFileName", solverId(), AT_, &gridFileNameDefault);
192 axisDefault = Context::getSolverProperty<MString>(
"sliceAxis", solverId(), AT_, &axisDefault);
206 MFloat interceptDefault = 0.0;
207 interceptDefault = Context::getSolverProperty<MFloat>(
"sliceIntercept", solverId(), AT_, &interceptDefault);
221 MInt writeIntervalDefault = -1;
222 if(m_solver.m_restartInterval > 0) {
223 writeIntervalDefault =
224 Context::getSolverProperty<MInt>(
"sliceWriteInterval", solverId(), AT_, &m_solver.m_restartInterval);
226 writeIntervalDefault =
227 Context::getSolverProperty<MInt>(
"sliceWriteInterval", solverId(), AT_, &writeIntervalDefault);
232 MBool sliceFound =
false;
255 m_log <<
"not all necessary slice properties are set!" << std::endl;
267 MString gridFileName = Context::getSolverProperty<MString>(
"sliceGridFileName_" + to_string(noFiles), solverId(),
268 AT_, &gridFileNameDefault);
271 Context::getSolverProperty<MString>(
"sliceAxis_" + to_string(noFiles), solverId(), AT_, &axisDefault);
279 Context::getSolverProperty<MFloat>(
"sliceIntercept_" + to_string(noFiles), solverId(), AT_, &interceptDefault);
286 MInt writeInterval = Context::getSolverProperty<MInt>(
"sliceWriteInterval_" + to_string(noFiles), solverId(), AT_,
287 &writeIntervalDefault);
290 if(writeInterval < 1) {
291 TERMM(1,
"WriteInterval for Slice no. " + to_string(noFiles) +
" is smaller than 1.");
295 if(axis !=
"x" && axis !=
"y" && axis !=
"z") {
296 TERMM(1,
"Illegal axis for Slice no. " + to_string(noFiles) +
". Only x, y or z is a valid axis");
300 gridFileName +=
'_' + to_string(noFiles);
302 m_sliceSeries.emplace_back(axis, intercept, writeInterval, gridFileName, noFiles);
310 m_sliceVarIds.clear();
311 m_noSliceVars.clear();
312 m_sliceVarNames.clear();
313 m_solver.getSolverSamplingProperties(m_sliceVarIds, m_noSliceVars, m_sliceVarNames,
"Slice");
321 m_log <<
"done!" << std::endl;
324 IF_CONSTEXPR(nDim == 2) {
326 TERMM(1,
"Can not use DG Slice in 2D calculation.");
338template <MInt nDim,
class SysEqn>
347 for(
MUint i = 0; i < m_sliceSeries.size(); i++) {
348 init(m_sliceSeries[i]);
375template <MInt nDim,
class SysEqn>
388 MIntScratchSpace cellIds(m_solver.grid().noInternalCells(), AT_,
"cellIds");
389 MIntScratchSpace hilbertInfo(m_solver.grid().noInternalCells() * 3, AT_,
"hilbertIds");
393 m_solver.grid().raw().createGridSlice(sliceSeries.
m_axis, sliceSeries.
m_intercept, gridFilePath, solverId(),
399 m_log <<
"Initializing DG Slice " << sliceSeries.
m_fileNo <<
" ...";
403 if(m_solver.grid().raw().treeb().noSolvers() > 1) {
405 cellIds[i] = m_solver.grid().tree().grid2solver(cellIds[i]);
414 const MInt noMaxElements = m_solver.m_elements.size();
423 const MInt noMaxNodes = pow(
424 *std::max_element(&m_solver.m_elements.polyDeg(0), &m_solver.m_elements.polyDeg(0) + m_solver.m_elements.size())
427 MFloatScratchSpace coordinates(noMaxElements * noMaxNodes * m_dim, AT_,
"coordinates");
430 std::array<MFloat, m_dim> nodeCoord;
433 MInt noElementsSlice = 0;
434 for(
MInt h = 0, cellSum = 0, noNodes = 0, prevNoNodes = 0, sumElements = 0; h < sliceSeries.
m_noHilbertIds; h++) {
435 for(
MInt c = cellSum, e = 0; (c - cellSum) < hilbertInfo[h * 3 + 1]; e++) {
440 if(cellIds[c] < m_solver.m_elements.cellId(e)) {
442 noNodes +=
ipow(m_solver.m_initPolyDeg + 1, m_dim - 1);
450 if(cellIds[c] == m_solver.m_elements.cellId(e)) {
452 const MInt noNodes1D = m_solver.m_elements.polyDeg(e) + 1;
455 const MFloatTensor nodeCoordinates(&m_solver.m_elements.nodeCoordinates(e), noNodes1D, noNodes1D, noNodes1D,
458 for(
MInt i = 0; i < noNodes1D; i++) {
459 for(
MInt j = 0; j < noNodes1D; j++) {
460 if(sliceSeries.
m_axis ==
"x") {
462 nodeCoord[1] = nodeCoordinates(0, i, j, 1);
463 nodeCoord[2] = nodeCoordinates(0, i, j, 2);
464 }
else if(sliceSeries.
m_axis ==
"y") {
465 nodeCoord[0] = nodeCoordinates(i, 0, j, 0);
467 nodeCoord[2] = nodeCoordinates(i, 0, j, 2);
468 }
else if(sliceSeries.
m_axis ==
"z") {
469 nodeCoord[0] = nodeCoordinates(i, j, 0, 0);
470 nodeCoord[1] = nodeCoordinates(i, j, 0, 1);
474 std::copy_n(&nodeCoord[0], m_dim, &coordinates[noCoords]);
478 elementIds[noElementsSlice] = e;
481 elementOffset[noElementsSlice] = noNodes - prevNoNodes;
484 const MInt noNodesXD =
ipow(m_solver.m_elements.polyDeg(e) + 1, m_dim - 1);
485 noNodes += noNodesXD;
488 elementNodes[noElementsSlice] = noNodesXD;
492 sliceSeries.
m_polyDegs[c] = m_solver.m_elements.polyDeg(e);
504 cellSum += hilbertInfo[h * 3 + 1];
505 prevNoNodes = noNodes;
506 sumElements = noElementsSlice;
514 std::copy_n(&elementIds[0], noElementsSlice, &sliceSeries.
m_elementIds[0]);
515 std::copy_n(&elementNodes[0], noElementsSlice, &sliceSeries.
m_elementNodes[0]);
516 std::copy_n(&elementOffset[0], noElementsSlice, &sliceSeries.
m_elementOffset[0]);
517 std::copy_n(&coordinates[0], noCoords, &sliceSeries.
m_coordinates[0]);
530 using namespace maia;
538 MPI_Comm_rank(mpiComm(), &rank);
543 AT_,
"rank",
"sliceScratch[0]");
546 const MInt noSliceDomains =
547 std::count_if(sliceScratch.
begin(), sliceScratch.
end(), [](
const MInt a) { return a != -1; });
550 for(
MInt i : sliceScratch) {
552 sliceDomains[position] = i;
558 MPI_Group globalGroup, localGroup;
560 MPI_Group_incl(globalGroup, noSliceDomains, &sliceDomains[0], &localGroup, AT_);
574 MInt sliceDomain = -1;
575 MPI_Comm_rank(sliceSeries.
m_mpiComm, &sliceDomain);
581 MIntScratchSpace noLocalHilbertIds(noSliceDomains, AT_,
"noLocalHilbertIds");
585 "MPI_IN_PLACE",
"noLocalHilbertIds[0]");
589 offsetsRecvData[0] = 0;
590 for(
MInt i = 1; i < noSliceDomains; i++) {
591 offsetsRecvData[i] = offsetsRecvData[i - 1] + noLocalHilbertIds[i - 1] * 3;
593 MInt noTotalRecvData = offsetsRecvData[noSliceDomains - 1] + noLocalHilbertIds[noSliceDomains - 1] * 3;
596 MIntScratchSpace sendHilbertData(noLocalHilbertIds[sliceDomain] * 3, AT_,
"sendHilbertData");
599 sendHilbertData[i * 3] = hilbertInfo[i * 3];
600 sendHilbertData[i * 3 + 1] = domainId();
607 for(
MInt i = 0; i < noSliceDomains; i++) {
608 noRecvData[i] = noLocalHilbertIds[i] * 3;
612 MPI_Allgatherv(&sendHilbertData[0], sendHilbertData.
size(), MPI_INT, &recvHilbertData[0], &noRecvData[0],
613 &offsetsRecvData[0], MPI_INT, sliceSeries.
m_mpiComm, AT_,
"sendHilbertData[0]",
614 "recvHilbertData[0]");
618 for(
MInt i = 0; i < noTotalRecvData / 3; i++) {
621 sliceGlobalHilbertInfo[i][0] = recvHilbertData[i * 3];
622 sliceGlobalHilbertInfo[i][1] = recvHilbertData[i * 3 + 1];
623 sliceGlobalHilbertInfo[i][2] = recvHilbertData[i * 3 + 2];
626 std::stable_sort(sliceGlobalHilbertInfo.
begin(), sliceGlobalHilbertInfo.
end(),
627 [](
const std::array<MInt, 3>&
a,
const std::array<MInt, 3>&
b) { return a[1] < b[1]; });
629 std::stable_sort(sliceGlobalHilbertInfo.
begin(), sliceGlobalHilbertInfo.
end(),
630 [](
const std::array<MInt, 3>&
a,
const std::array<MInt, 3>&
b) { return a[0] < b[0]; });
633 MInt maxNoHilbertIds = *std::max_element(noLocalHilbertIds.
begin(), noLocalHilbertIds.
end());
649 for(
MInt i = 0, j = 0; i < noLocalHilbertIds[sliceDomain]; i++) {
653 while(sliceGlobalHilbertInfo[j][0] < hilbertInfo[i * 3]) {
654 offset += sliceGlobalHilbertInfo[j][2];
658 while(sliceGlobalHilbertInfo[j][1] < domainId()) {
659 offset += sliceGlobalHilbertInfo[j][2];
672 MBool optimizedSliceIo =
true;
673 optimizedSliceIo = Context::getBasicProperty<MBool>(
"optimizedSliceIo", AT_, &optimizedSliceIo);
678 std::vector<MInt> bufferElementIdsPerHilbertId;
679 std::vector<MInt> bufferElementNodesPerHilbertIds;
683 back_inserter(bufferElementIdsPerHilbertId));
686 back_inserter(bufferElementNodesPerHilbertIds));
688 MInt noHilbertIdChunks = 0;
702 MInt startIdx = std::accumulate(&bufferElementIdsPerHilbertId[0], &bufferElementIdsPerHilbertId[h], 0);
703 MInt endIdx = std::accumulate(&bufferElementIdsPerHilbertId[0], &bufferElementIdsPerHilbertId[h + 1], 0);
704 offsetSum += bufferElementNodesPerHilbertIds[h - 1];
705 for(
MInt j = startIdx; j < endIdx; j++) {
725 noHilbertIdChunks = i;
738 MInt maxNoHilbertIdChunks = -1;
740 sliceSeries.
m_mpiComm, AT_,
"noHilbertIdChunks",
"maxNoHilbertIdChunks");
751 m_log <<
"done!" << std::endl;
760template <MInt nDim,
class SysEqn>
769 for(
MUint i = 0; i < m_sliceSeries.size(); i++) {
770 save(m_sliceSeries[i], isFinalTimeStep);
782template <MInt nDim,
class SysEqn>
788 TERMM(1,
"DG Slice class was not properly initialized.");
797 if(!(m_solver.m_timeStep % sliceSeries.
m_writeInterval == 0 || isFinalTimeStep)) {
809 const MInt noVars = std::accumulate(m_noSliceVars.begin(), m_noSliceVars.end(), 0);
810 const MUint noVarIds = m_noSliceVars.size();
824 for(
MUint n = 0; n < noNodes; n++) {
826 for(
MUint v = 0; v < noVarIds; v++) {
827 m_solver.calcSamplingVarAtPoint(&coordinatesTensor(offset + n, 0), sliceSeries.
m_elementIds[e],
828 m_sliceVarIds[v], &stateTensor(offset + n, varOffset),
true);
829 varOffset += m_noSliceVars[v];
842 const MString s = (m_solver.grid().raw().treeb().noSolvers() > 1) ?
"_b" + std::to_string(solverId()) :
"";
843 std::ostringstream fileName;
844 fileName << m_solver.outputDir() <<
"slice" << s <<
"_" << sliceSeries.
m_fileNo <<
"_" << std::setw(8)
845 << std::setfill(
'0') << m_solver.m_timeStep << ParallelIo::fileExt();
849 ParallelIo::size_type nodesOffset, globalNoNodes, offset, totalCount;
855 file.setAttribute(sliceSeries.
m_gridFileName + ParallelIo::fileExt(),
"gridFile");
856 file.setAttribute(solverId(),
"solverId");
857 file.setAttribute(
"DG",
"solverType");
858 file.setAttribute(m_solver.m_timeStep,
"timeStep");
859 file.setAttribute(m_solver.m_time,
"time");
861 file.setAttribute(sliceSeries.
m_axis,
"sliceAxis");
862 file.setAttribute(sliceSeries.
m_intercept,
"sliceIntercept");
866 file.defineArray(PIO_INT,
"polyDegs", totalCount);
869 MString dgIntegrationMethod =
"DG_INTEGRATE_GAUSS";
870 dgIntegrationMethod =
871 Context::getSolverProperty<MString>(
"dgIntegrationMethod", solverId(), AT_, &dgIntegrationMethod);
872 MString dgPolynomialType =
"DG_POLY_LEGENDRE";
873 dgPolynomialType = Context::getSolverProperty<MString>(
"dgPolynomialType", solverId(), AT_, &dgPolynomialType);
876 file.setAttribute(dgIntegrationMethod,
"dgIntegrationMethod",
"polyDegs");
877 file.setAttribute(dgPolynomialType,
"dgPolynomialType",
"polyDegs");
880 for(
MUint v = 0, totalNoVars = 0; v < noVarIds; v++) {
881 for(
MInt i = 0; i < m_noSliceVars[v]; i++) {
882 const MString name =
"variables" + std::to_string(totalNoVars);
883 file.defineArray(PIO_FLOAT, name, globalNoNodes);
884 file.setAttribute(m_sliceVarNames[v][i],
"name", name);
893 file.writeArray(&sliceSeries.
m_polyDegs[0 + pOffset],
"polyDegs");
896 file.setOffset(0, 0);
897 file.writeArray(&sliceSeries.
m_polyDegs[0],
"polyDegs");
904 MFloatTensor stateTensorFinal(&states[0], std::max(tmp, 1), noVars);
907 for(
MInt i = 0; i < noVars; i++) {
917 std::fill(buffer.
begin(), buffer.
end(), 0.0);
922 for(
MInt e = elementOffset; e < noElementsSlice; e++) {
925 b[j] = stateTensorFinal(offsetElementNodes + j, i);
929 elementOffset = noElementsSlice;
931 const MString name =
"variables" + std::to_string(i);
933 file.writeArray(&buffer[0], name);
936 file.setOffset(0, 0);
938 const MString name =
"variables" + std::to_string(i);
939 file.writeArray(&tmpBuf, name);
static MBool propertyExists(const MString &name, MInt solver=m_noSolvers)
This function checks if a property exists in general.
std::vector< MInt > m_elementOffset
std::vector< MInt > m_elementNodes
std::vector< MFloat > m_coordinates
DgSliceSeries(const MString &axis, const MFloat intercept, const MInt writeInterval, const MString &gridFileName, const MInt fileNo)
std::vector< MInt > m_elementNodesHilbertOffset
std::vector< MInt > m_elementNodesPerHilbertIds
std::vector< MInt > m_polyDegs
std::vector< MInt > m_cellIdsPerHilbertId
std::vector< MInt > m_cellIdsOffset
std::vector< MInt > m_elementIds
std::vector< MInt > m_elementIdsPerHilbertId
Determine all coordinates and alloc buffer size create a valid 2D grid which represents a slice from ...
std::vector< MInt > m_noSliceVars
Number of variables for each slice variable.
void save(const MBool isFinalTimeStep)
Call save method for all slice objects.
void updateGridFile(DgSliceSeries &sliceSeries, const MString &prefix="")
std::vector< MInt > m_sliceVarIds
List of slice variables.
void init()
Initialize all slice objects.
std::vector< DgSliceSeries > m_sliceSeries
void setProperties()
Read properties for all slices and create corresponding slice objects.
DgSlices(DgCartesianSolver< nDim, SysEqn > &solver)
static constexpr const MInt m_dim
DgCartesianSolver< nDim, SysEqn > & m_solver
std::vector< std::vector< MString > > m_sliceVarNames
List of variable names for each slice variable.
This class is a ScratchSpace.
MInt ipow(MInt base, MInt exp)
Integer exponent function for non-negative exponents.
MInt globalNoDomains()
Return global number of domains.
std::basic_string< char > MString
int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgatherv
int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm, const MString &name, const MString &varname)
same as MPI_Comm_create, but updates the number of MPI communicators
int MPI_Group_incl(MPI_Group group, int n, const int ranks[], MPI_Group *newgroup, const MString &name)
same as MPI_Group_incl
int MPI_Comm_group(MPI_Comm comm, MPI_Group *group, const MString &name, const MString &varname)
same as MPI_Comm_group
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allreduce
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgather
int MPI_Group_free(MPI_Group *group, const MString &name)
same as MPI_Group_free
Namespace for auxiliary functions/classes.
PARALLELIO_DEFAULT_BACKEND ParallelIo