15#if not defined(MAIA_DISABLE_LB)
19#if not defined(MAIA_DISABLE_DG)
28template <MInt nDim,
class ppType>
30template <MInt nDim,
class ppType>
32template <MInt nDim,
class ppType>
34template <MInt nDim,
class ppType>
36template <MInt nDim,
class ppType>
38template <MInt nDim,
class ppType>
40template <MInt nDim,
class ppType>
42template <MInt nDim,
class ppType>
44template <MInt nDim,
class ppType>
46template <MInt nDim,
class ppType>
63template <MInt nDim,
class ppType>
165 mTerm(1, AT_,
"Unknown postprocessing operation");
174template <MInt nDim,
class ppType>
184template <MInt nDim,
class ppType>
195template <MInt nDim,
class ppType>
205template <MInt nDim,
class ppType>
224template <MInt nDim,
class ppType>
230 m_log <<
" + Initializing postprocessing" << endl << endl;
268 m_log <<
" + Postprocessing is active" << endl;
269 for(
MInt i = 0; i < 3; i++) {
289 if(noSourceTerms == 0) {
290 TERMM(1,
"No APE source term average was specified.");
294 for(
MInt i = 0; i < noSourceTerms; i++) {
296 Context::getSolverProperty<MString>(
"apeSourceTermsAverages",
m_postprocessingId, AT_, i);
305 TERMM(1,
"The given APE source term average '" + sourceTermName +
"' was not found.");
313 TERMM(1,
"Given APE source term averages '" + sourceTermName +
"' already present. Check your property file.");
325 std::vector<MInt> sourceTermMeanVars;
330 m_activeMeanVars.insert(sourceTermMeanVars.begin(), sourceTermMeanVars.end());
380 m_log <<
" - requested operations: " << endl;
734 mTerm(1, AT_,
"Unknown postprocessing operation");
755template <MInt nDim,
class ppType>
779 TERMM(1,
"Please specify the property 'averageInterval' (with a value > 0) ...");
796 TERMM(1,
"Please specify the property 'averageStartTimestep' (with a value >= 0) ...");
813 TERMM(1,
"Please specify the property 'averageStopTimestep' (> averageStartTimestep) ...");
838template <MInt nDim,
class ppType>
996 if(sourceTerm == ST::Q_mI_linear) {
999 if(sourceTerm == ST::Q_c || sourceTerm == ST::Q_e) {
1012template <MInt nDim,
class ppType>
1016#if not defined(MAIA_DISABLE_LB)
1017 constexpr MBool isothermal = (std::is_same<ppType, PostProcessingLb<nDim>>::value);
1020 constexpr MBool isothermal =
false;
1028 const MInt noSummedVars =
1040 std::stringstream ss;
1041 ss <<
"PostData: noVariables in postprocessing not correct: postData().noVariables(): " <<
postData().
noVariables()
1042 <<
" m_varNames.size(): " << noSummedVars << std::endl;
1051 const MInt offset = 3 * (nDim - 1) + 1;
1091 count += nDim * nDim;
1135 if(count >
postData().noVariables()) {
1137 mTerm(1, AT_,
"Variables size not matching!");
1154 m_log <<
"m_useKahan is activated" << endl;
1155 TERMM(1,
"FIXME Kahan summation is untested and probably broken");
1178template <MInt nDim,
class ppType>
1189 TERMM(1,
"Error: line probing activated but number of probe line directions is "
1198 Context::getSolverProperty<MInt>(
"pp_correlationDirection",
m_postprocessingId, AT_, correlationId);
1201 << correlationId << endl;
1204 Context::getSolverProperty<MInt>(
"pp_correlationVariableIndex",
m_postprocessingId, AT_, correlationId);
1212 correlationDir(correlationId, 0) = 1;
1214 correlationDir(correlationId, 0) = 0;
1218 IF_CONSTEXPR(nDim == 3) {
1220 correlationDir(correlationId, 1) = 1;
1222 correlationDir(correlationId, 1) = 2;
1231 <<
" with correlationCorrelationDirection "
1250 map<MFloat, MInt, coord_comp_1d_> coordinates;
1251 std::vector<MFloat> coord(nDim, std::numeric_limits<MFloat>::max());
1252 MFloat halfCellLength = NAN;
1256 for(
MInt cellId = 0; cellId <
postData().
grid().noInternalCells(); cellId++) {
1257 if(gridProxy->tree().noChildren(cellId) > 0)
continue;
1258 for(
MInt dim = 0; dim < nDim; dim++) {
1265 halfCellLength = gridProxy->halfCellLength(cellId);
1266 for(
MInt i = 0; i < nDim - 1; i++) {
1269 >= halfCellLength) {
1275 < (halfCellLength + MFloatEps)) {
1276 for(
MInt dirId = 0; dirId < 2 * nDim; dirId++) {
1281 if(gridProxy->tree().hasNeighbor(cellId, dirId) != 1) {
1284 MInt nId = gridProxy->tree().neighbor(cellId, dirId);
1287 != coordinates.end()) {
1295 if(ok || (ok2 && !nghbrFound)) {
1296 if(
postData().a_isBndryGhostCell(cellId))
continue;
1311 auto it = coordinates.begin();
1312 for(
MInt i = 0; it != coordinates.end(); it++, i++) {
1320 postData().mpiComm(), AT_,
"m_noCorrelationIds",
"m_globalNoCorrelationIds");
1322 MInt maxGlobalNoCorrelationIds = 0;
1333 "m_globalCorrelationExchangeVar", F0, AT_);
1335 "m_globalCorrelationExchangeVarMean", F0, AT_);
1337 "m_globalCorrelationExchangeVarRMS", F0, AT_);
1350 "m_noCorrelationIds[correlationId]",
"postData().noDomain()");
1355 for(
MInt dom = 0; dom < noDomain; dom++) {
1356 displs[dom] = offset;
1357 offset += recvbuf[dom];
1363 &displs[
postData().domainId()], MPI_INT, 0,
postData().mpiComm(), AT_,
"m_correlationIds",
1364 "m_globalCorrelationIds");
1368 &displs[
postData().domainId()], MPI_DOUBLE, 0,
postData().mpiComm(), AT_,
"m_correlationPositions",
1369 "m_globalCorrelationPositions");
1372 postData().mpiComm(), AT_,
"m_probeLineAverageCoordinates");
1375 postData().mpiComm(), AT_,
"m_globalCorrelationPositions");
1378 "m_correlationExchangeVar[correlationId]", AT_);
1380 "m_correlationExchangeVarMean[correlationId]", AT_);
1382 "m_correlationExchangeVarRMS[correlationId]", AT_);
1386 MFloat smallerDist = std::numeric_limits<MFloat>::max();
1387 MFloat biggerDist = std::numeric_limits<MFloat>::max();
1394 MFloat dist = abs(corrPosition - dataPosition);
1395 if(corrPosition <= dataPosition &&
dist < smallerDist) {
1399 if(corrPosition > dataPosition &&
dist < biggerDist) {
1406 if(smallerDist <= biggerDist) {
1429template <MInt nDim,
class ppType>
1458 TERMM(1,
"Please specify the property 'averageStartTimestep' (with a value > 0) ...");
1472 TERMM(1,
"m_movingAverageInterval has to be >=1");
1489 TERMM(1,
"m_movingAverageDataPoints has to be at least 2");
1499 "m_movAvgVariables", F0, AT_);
1506 }
else if(nDim == 3) {
1528template <MInt nDim,
class ppType>
1554 for(
MInt i = 0; i < nDim; i++, t++)
1576 m_log <<
" = local probe points: " << endl;
1581 sprintf(buf,
"%d", np);
1583 filename.append(buf);
1589 for(
MInt i = 0; i < nDim; i++)
1594 m_log <<
" # number " << np << endl;
1595 m_log <<
" # coordinates: ";
1596 for(
MInt d = 0; d < nDim; d++)
1600 m_log <<
" # real coordinates: ";
1601 for(
MInt d = 0; d < nDim; d++)
1623template <MInt nDim,
class ppType>
1627 if(!
pp->solver().grid().isActive())
return;
1644 TERMM(1,
"Error: line probing activated but number of probe line directions is " + std::to_string(
m_noProbeLines)
1652 Context::getSolverProperty<MInt>(
"pp_probeLineDirection",
m_postprocessingId, AT_, probeLineId);
1655 << probeLineId << endl;
1678 lineDir(probeLineId, 0) = 1;
1680 lineDir(probeLineId, 0) = 0;
1684 IF_CONSTEXPR(nDim == 3) {
1686 lineDir(probeLineId, 1) = 1;
1688 lineDir(probeLineId, 1) = 2;
1690 Context::getSolverProperty<MFloat>(
"pp_probeLineCoordinates",
m_postprocessingId, AT_, 2 * probeLineId + 1);
1696 for(
MInt i = 0; i < nDim - 1; i++) {
1717 map<MFloat, MInt, coord_comp_1d_> coordinates;
1718 const MFloat* coord =
nullptr;
1719 MFloat halfCellLength = NAN;
1724 if(
m_gridProxy->tree().noChildren(cellId) > 0)
continue;
1725 coord = &
pp->solver().a_coordinate(cellId, 0);
1730 halfCellLength =
m_gridProxy->halfCellLength(cellId);
1731 for(
MInt i = 0; i < nDim - 1; i++) {
1733 if(abs(
m_probeLineCoordinates[probeLineId][i] - coord[lineDir(probeLineId, i)]) >= halfCellLength) ok =
false;
1737 < (halfCellLength + MFloatEps)) {
1738 for(
MInt dirId = 0; dirId < 2 * nDim; dirId++) {
1743 if(
m_gridProxy->tree().hasNeighbor(cellId, dirId) != 1) {
1750 != coordinates.end()) {
1758 if(ok || (ok2 && !nghbrFound)) {
1759 if(
pp->solver().a_isBndryGhostCell(cellId))
continue;
1790 auto it = coordinates.begin();
1791 for(
MInt i = 0; it != coordinates.end();
1797 ParallelIo::size_type localCount, offset, totalCount;
1799 ParallelIo::calcOffset(localCount, &offset, &totalCount,
pp->solver().mpiComm());
1805 "m_noProbeLineIds",
"m_globalNoProbeLineIds");
1809template <MInt nDim,
class ppType>
1813 if(!
pp->solver().grid().isActive())
return;
1819 Context::getSolverProperty<MInt>(
"pp_probeLinePeriodic",
m_postprocessingId, AT_, probeLineId);
1837 vector<vector<MFloat>> probeLineAverageCoordinates_temp{};
1842 vector<MFloat> temp{};
1843 vector<MInt> sign{};
1849 temp.emplace_back(pos);
1852 probeLineAverageCoordinates_temp.emplace_back(temp);
1860 MInt noDomain =
pp->solver().noDomains();
1865 "m_noProbeLineIds[probeLineId]",
"ppblock()->noDomain()");
1868 if(
pp->solver().domainId() == 0) {
1870 for(
MInt dom = 0; dom < noDomain; dom++) {
1871 displs[dom] = offset;
1872 offset += recvbuf[dom];
1878 &displs[
pp->solver().domainId()], MPI_DOUBLE, 0,
pp->solver().mpiComm(), AT_,
1879 "probeLineAverageCoordinates_temp",
"m_probeLineAverageCoordinates");
1882 pp->solver().mpiComm(), AT_,
"m_probeLineAverageCoordinates");
1897 TERMM_IF_COND(nDim == 2,
"FIXME: the size of periodicCoordiantes was nDim-1, for 2D the below code was writing "
1898 "out of array bounds. Check if the code works as intended.");
1899 vector<MFloat> periodicCoordiantes(nDim);
1908 map<MFloat, MInt, coord_comp_1d_> coordinates;
1911 MBool ok, ok2, nghbrFound;
1913 for(
MInt cellId = 0; cellId <
pp->solver().grid().noInternalCells(); cellId++) {
1914 if(
m_gridProxy->tree().noChildren(cellId) > 0)
continue;
1915 coord = &
pp->solver().a_coordinate(cellId, 0);
1920 halfCellLength =
m_gridProxy->halfCellLength(cellId);
1922 for(
MInt i = 0; i < nDim - 1; i++) {
1924 if(abs(periodicCoordiantes[i] - coord[lineDir(probeLineId, i)]) >= halfCellLength) ok = 0;
1927 if(abs(periodicCoordiantes[i] - coord[lineDir(probeLineId, i)]) < (halfCellLength + MFloatEps)) {
1928 for(
MInt dirId = 0; dirId < 2 * nDim; dirId++) {
1940 != coordinates.end()) {
1948 if(ok || (ok2 && !nghbrFound)) {
1949 if(
pp->solver().a_isBndryGhostCell(cellId))
continue;
1958 "m_probeLineAverageIds[probeLineId][p]", AT_);
1960 "m_probeLineAveragePositions[probeLineId][p]", AT_);
1966 auto it = coordinates.begin();
1967 for(
MInt i = 0; it != coordinates.end(); it++, i++) {
1973 MPI_INT, MPI_SUM,
pp->solver().mpiComm(), AT_,
"m_noProbeLineAverageIds",
1974 "m_globalNoProbeLineAverageIds");
1987template <MInt nDim,
class ppType>
1999 TERMM_IF_COND(m_probeLineInterval <= 0, "Error: property 'pp_probeLineInterval' needs to be > 0
");
2007 m_probeLineStartTimestep = 0;
2008 m_probeLineStartTimestep =
2009 Context::getSolverProperty<MInt>("pp_probeLineStartTimestep
", m_postprocessingId, AT_, &m_probeLineStartTimestep);
2018 m_probeLineStopTimestep = std::numeric_limits<MInt>::max();
2019 m_probeLineStopTimestep =
2020 Context::getSolverProperty<MInt>("pp_probeLineStopTimestep
", m_postprocessingId, AT_, &m_probeLineStopTimestep);
2022 TERMM_IF_COND(m_probeLineStopTimestep <= 0 || m_probeLineStopTimestep <= m_probeLineStartTimestep,
2023 "Error: pp_probeLineStopTimestep needs to be > pp_probeLineStartTimestep.
");
2025 m_log << "Line probing: interval =
" << m_probeLineInterval << "; startTimeStep =
" << m_probeLineStartTimestep
2026 << "; stopTimeStep =
" << m_probeLineStopTimestep << std::endl;
2043template <MInt nDim, class ppType>
2044void PostProcessing<nDim, ppType>::initProbeSlice() {
2056 m_sliceAiaFileFormat = false;
2057 m_sliceAiaFileFormat =
2058 Context::getSolverProperty<MBool>("pp_sliceAiaFileFormat
", m_postprocessingId, AT_, &m_sliceAiaFileFormat);
2070 m_optimizedSliceIo = true;
2071 m_optimizedSliceIo = Context::getBasicProperty<MBool>("optimizedSliceIo
", AT_, &m_optimizedSliceIo);
2073 const MBool activeRank = pp->solver().grid().isActive();
2074 if(!activeRank && !m_sliceAiaFileFormat) return; // Note: createGridSlice needs to be called from all ranks!
2076 IF_CONSTEXPR(nDim == 2) {
2077 m_log << "slice probing
for 2D not useful
" << endl;
2080 // Implementation of CartesianGrid::createGridSlice()
2081 if(m_sliceAiaFileFormat) {
2093 m_noProbeSlices = Context::propertyLength("sliceAxis
", m_postprocessingId);
2094 TERMM_IF_COND(m_noProbeSlices < 1, "Error: slice probing enabled but property sliceAxis not defined.
");
2096 mAlloc(m_sliceAxis, m_noProbeSlices, "m_sliceAxis
", AT_);
2099 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2100 m_sliceAxis[sliceId] = Context::getSolverProperty<MString>("sliceAxis
", m_postprocessingId, AT_, sliceId);
2110 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2111 m_sliceIntercept[sliceId] =
2112 Context::getSolverProperty<MFloat>("sliceIntercept
", m_postprocessingId, AT_, sliceId);
2115 m_sliceVarIds.clear();
2116 m_noSliceVars.clear();
2117 m_sliceVarNames.clear();
2118 pp->solver().getSolverSamplingProperties(m_sliceVarIds, m_noSliceVars, m_sliceVarNames, "Slice
");
2119 // Allocate additional storage in the solver for the sampling variables if required
2120 pp->solver().initSolverSamplingVariables(m_sliceVarIds, m_noSliceVars);
2123 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2124 stringstream gridName;
2125 gridName << "sliceGrid_
" << m_sliceAxis[sliceId] << "_
" << m_sliceIntercept[sliceId];
2127 const MString filePath = pp->solver().outputDir() + gridName.str();
2128 pp->solver().grid().raw().createGridSlice(m_sliceAxis[sliceId], m_sliceIntercept[sliceId], filePath, -1,
2129 nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
2131 // Return for inactive ranks after participating in creating the grid-slice of the whole grid (all ranks)
2138 mAlloc(m_globalNoProbeSliceIds, m_noProbeSlices, "m_globalNoProbeSliceIds", AT_); // global #ids in each slice
2140 // Variables for every slice
2141 // Number of HilbertIds on this domain determines number of functions calls for data writing
2143 // Store hilbertInfo, which is hilbertId, noCellsPerHilbertId (for offset in local data array) and the offset for
2146 // Maximum number of HilbertIds is used to call the function for writing data on every domain equally often
2149 if(m_optimizedSliceIo) {
2155 MIntScratchSpace tmpSliceCellIds(pp->solver().grid().noInternalCells(), AT_, "tmpSliceCellIds
");
2156 MIntScratchSpace tmpSliceHilbertInfo(pp->solver().grid().noInternalCells() * 3, AT_, "tmpSliceHilbertInfo
");
2158 const MInt contHInfoSize = (m_optimizedSliceIo) ? pp->solver().grid().noInternalCells() * 3 : 0;
2159 MIntScratchSpace tmpSliceContHilbertInfo(contHInfoSize, AT_, "tmpSliceContHilbertInfo
");
2161 for(MInt i = 0; i < m_noProbeSlices; i++) {
2162 m_probeSliceIds[i] = nullptr;
2163 m_noProbeSliceHilbertInfo[i] = nullptr;
2164 m_noProbeSliceNoHilbertIds[i] = 0;
2166 if(m_optimizedSliceIo) {
2167 m_noProbeSliceContHilbertInfo[i] = nullptr;
2168 m_noProbeSliceNoContHilbertIds[i] = 0;
2172 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2173 stringstream gridName;
2175 gridName << "sliceGrid_
" << m_sliceAxis[sliceId] << "_
" << m_sliceIntercept[sliceId];
2176 m_probeSliceGridNames[sliceId] = gridName.str();
2178 MInt tmpNoSliceCellIds = 0;
2179 MInt tmpNoHilbertIds = 0;
2180 MInt tmpNoContHilbertIds = 0;
2182 MInt* tmpNoContHilbertIdsP = nullptr;
2183 MInt* tmpSliceContHilbertInfoP = nullptr;
2184 if(m_optimizedSliceIo) {
2185 tmpNoContHilbertIdsP = &tmpNoContHilbertIds;
2186 tmpSliceContHilbertInfoP = &tmpSliceContHilbertInfo[0];
2189 const MString filePath = pp->solver().outputDir() + m_probeSliceGridNames[sliceId];
2190 pp->solver().grid().raw().createGridSlice(
2191 m_sliceAxis[sliceId], m_sliceIntercept[sliceId], filePath, pp->solver().solverId(), &tmpNoSliceCellIds,
2192 tmpSliceCellIds.getPointer(), &tmpNoHilbertIds, tmpSliceHilbertInfo.getPointer(), tmpNoContHilbertIdsP,
2193 tmpSliceContHilbertInfoP);
2195 gridName << ParallelIo::fileExt();
2196 m_probeSliceGridNames[sliceId] = gridName.str();
2198 m_noProbeSliceIds[sliceId] = tmpNoSliceCellIds;
2199 m_noProbeSliceNoHilbertIds[sliceId] = tmpNoHilbertIds;
2201 if(tmpNoSliceCellIds > 0) mAlloc(m_probeSliceIds[sliceId], tmpNoSliceCellIds, "m_probeSliceIds", AT_);
2203 for(MInt slicedCellId = 0; slicedCellId < tmpNoSliceCellIds; slicedCellId++) {
2204 // Convert grid cellId into block cellId
2205 m_probeSliceIds[sliceId][slicedCellId] = pp->solver().grid().tree().grid2solver(tmpSliceCellIds[slicedCellId]);
2208 if(tmpNoHilbertIds > 0) {
2210 copy_n(&tmpSliceHilbertInfo[0], tmpNoHilbertIds * 3, &m_noProbeSliceHilbertInfo[sliceId][0]);
2213 if(m_optimizedSliceIo) {
2214 m_noProbeSliceNoContHilbertIds[sliceId] = tmpNoContHilbertIds;
2215 if(tmpNoContHilbertIds > 0) {
2218 copy_n(&tmpSliceContHilbertInfo[0], tmpNoContHilbertIds * 3, &m_noProbeSliceContHilbertInfo[sliceId][0]);
2221 MPI_Allreduce(m_noProbeSliceIds, m_globalNoProbeSliceIds, m_noProbeSlices, MPI_INT, MPI_SUM, pp->solver().mpiComm(),
2223 MPI_Allreduce(m_noProbeSliceNoHilbertIds, m_noProbeSliceMaxNoHilbertIds, m_noProbeSlices, MPI_INT, MPI_MAX,
2225 if(m_optimizedSliceIo) {
2226 MPI_Allreduce(m_noProbeSliceNoContHilbertIds, m_noProbeSliceMaxNoContHilbertIds, m_noProbeSlices, MPI_INT,
2242 if(Context::propertyLength("pp_probeSliceDir
", m_postprocessingId) % 2 != 0) {
2243 m_log << "odd number of probe slice directions provided by probeSliceDir, should be even!
" << endl;
2245 m_noProbeSlices = Context::propertyLength("pp_probeSliceDir
", m_postprocessingId) / 2;
2246 if(m_noProbeSlices == 0) {
2247 m_log << "not enough probe slice directions provided by probeSliceDir!
" << endl;
2252 mAlloc(m_probeSliceCoordinate, m_noProbeSlices, "m_probeSliceDir", AT_);
2254 ScratchSpace<MInt> dir(m_noProbeSlices, AT_, "dir
");
2255 for(MInt dirId = 0; dirId < 2 * m_noProbeSlices; dirId++) {
2256 m_probeSliceDir[dirId] = Context::getSolverProperty<MInt>("pp_probeSliceDir
", m_postprocessingId, AT_, dirId);
2258 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2259 if((m_probeSliceDir[2 * sliceId] < 0 || m_probeSliceDir[2 * sliceId] >= nDim)
2260 || (m_probeSliceDir[2 * sliceId + 1] < 0 || m_probeSliceDir[2 * sliceId + 1] >= nDim)
2261 || (m_probeSliceDir[2 * sliceId] == m_probeSliceDir[2 * sliceId + 1])) {
2262 m_log << "invalid value
for property probeSliceDir
for slice #
" << sliceId << ":
"
2263 << m_probeSliceDir[2 * sliceId] << " " << m_probeSliceDir[2 * sliceId + 1] << endl;
2266 if(m_probeSliceDir[2 * sliceId] > m_probeSliceDir[2 * sliceId + 1]) {
2267 MInt tmp = m_probeSliceDir[2 * sliceId];
2268 m_probeSliceDir[2 * sliceId] = m_probeSliceDir[2 * sliceId + 1];
2269 m_probeSliceDir[2 * sliceId + 1] = tmp;
2271 dir[sliceId] = 3 - (m_probeSliceDir[2 * sliceId] + m_probeSliceDir[2 * sliceId + 1]);
2285 if(Context::propertyLength("pp_probeSliceCoordinate
", m_postprocessingId) < m_noProbeSlices) {
2286 m_log << "to few #probeSliceCoordinate provided, found
"
2287 << Context::propertyLength("pp_probeSliceCoordinate
", m_postprocessingId) << " needed
" << m_noProbeSlices
2289 << "using default value 0.0
for missing values
" << endl;
2291 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2292 if(sliceId < Context::propertyLength("pp_probeSliceCoordinate
", m_postprocessingId)) {
2293 m_probeSliceCoordinate[sliceId] =
2294 Context::getSolverProperty<MFloat>("pp_probeSliceCoordinate
", m_postprocessingId, AT_, sliceId);
2296 m_probeSliceCoordinate[sliceId] = 0.0;
2299 m_log << "probeSlice #
" << sliceId << " in directions
" << m_probeSliceDir[2 * sliceId] << ",
"
2300 << m_probeSliceDir[2 * sliceId + 1] << " with probeSliceCoordinate
" << m_probeSliceCoordinate[sliceId]
2304 mAlloc(m_noProbeSliceIds, m_noProbeSlices, "m_noProbeSliceIds", AT_); // local #cells in each slice
2305 mAlloc(m_probeSliceIds, m_noProbeSlices, "m_probeSliceIds", AT_); // collector for cellIds for each slice
2307 AT_); // collector for cell positions in slice
2309 mAlloc(m_globalNoProbeSliceIds, m_noProbeSlices, "m_globalNoProbeSliceIds", AT_); // global #ids in each slice
2312 // suppress valgrind error
2313 for(MInt i = 0; i < m_noProbeSlices; i++) {
2314 m_probeSliceIds[i] = nullptr;
2315 m_probeSlicePositions[i] = nullptr;
2318 const MFloat* coord = nullptr;
2319 const MFloat* coord2 = nullptr;
2320 MFloat halfCellLength = NAN;
2323 MBool nghbrFound = false;
2324 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) { // loop over all slices
2325 map<pair<MFloat, MFloat>, MInt, coord_comp_2d_> coordinates{};
2326 for(MInt cellId = 0; cellId < pp->solver().grid().noInternalCells(); cellId++) { // collect cellIds on slice
2327 if(m_gridProxy->tree().noChildren(cellId) > 0) continue; // skip non leaf cells
2328 coord = &pp->solver().a_coordinate(cellId, 0);
2330 halfCellLength = m_gridProxy->halfCellLength(cellId);
2331 if(abs(m_probeSliceCoordinate[sliceId] - coord[dir[sliceId]]) < halfCellLength) {
2332 coordinates[pair<MFloat, MFloat>(coord[m_probeSliceDir[2 * sliceId]],
2333 coord[m_probeSliceDir[2 * sliceId + 1]])] = cellId;
2336 // check if distance is a bit more than half of the cell length (slice between cells)
2337 if(abs(m_probeSliceCoordinate[sliceId] - coord[dir[sliceId]]) < (halfCellLength + MFloatEps)) {
2338 for(MInt i = 0; i < 2; i++) {
2339 dirId = 2 * dir[sliceId] + i; // directions orthogonal to slice
2340 if(m_gridProxy->tree().hasNeighbor(cellId, dirId)
2341 != 1) { // skip if cells has no equal level neighbor in direction dirId
2344 nId = m_gridProxy->tree().neighbor(cellId, dirId);
2345 coord2 = &pp->solver().a_coordinate(nId, 0);
2346 // check if neighbor cell is already in the probe slice
2347 if(coordinates.find(pair<MFloat, MFloat>(coord2[m_probeSliceDir[2 * sliceId]],
2348 coord2[m_probeSliceDir[2 * sliceId + 1]]))
2349 != coordinates.end()) {
2354 coordinates[pair<MFloat, MFloat>(coord[m_probeSliceDir[2 * sliceId]],
2355 coord[m_probeSliceDir[2 * sliceId + 1]])] = cellId;
2361 m_noProbeSliceIds[sliceId] = coordinates.size();
2363 if(m_noProbeSliceIds[sliceId] > 0) {
2364 mAlloc(m_probeSliceIds[sliceId], m_noProbeSliceIds[sliceId], "m_probeSliceIds", AT_);
2365 mAlloc(m_probeSlicePositions[sliceId], 2 * m_noProbeSliceIds[sliceId], "m_probeSlicePositions", AT_);
2367 // Allocate dummy array to avoid errors when dereferencing during writing
2372 MPI_Gather(&(m_noProbeSliceIds[sliceId]), 1, MPI_INT, m_noGlobalProbeSliceIds[sliceId], 1, MPI_INT, 0,
2374 // m_log << "slice #
" << sliceId << ":
";
2375 // for(MInt i=0; i<pp->solver().m_noDomains; i++){
2376 // m_log << m_noGlobalProbeSliceIds[sliceId][i] << " ";
2381 auto it = coordinates.begin();
2382 for(MInt i = 0; it != coordinates.end(); it++, i++) {
2383 m_probeSliceIds[sliceId][i] = (*it).second;
2384 m_probeSlicePositions[sliceId][2 * i] = (*it).first.first;
2385 m_probeSlicePositions[sliceId][2 * i + 1] = (*it).first.second;
2388 ParallelIo::size_type localCount, offset, totalCount;
2389 localCount = m_noProbeSliceIds[sliceId];
2390 ParallelIo::calcOffset(localCount, &offset, &totalCount, pp->solver().mpiComm());
2391 m_probeSliceOffsets[sliceId] = offset;
2394 MPI_Allreduce(m_noProbeSliceIds, m_globalNoProbeSliceIds, m_noProbeSlices, MPI_INT, MPI_SUM, pp->solver().mpiComm(),
2401template <MInt nDim, class ppType>
2402void PostProcessing<nDim, ppType>::initTimeStepPropertiesSlice() {
2415 m_probeSliceInterval = Context::getSolverProperty<MInt>("pp_probeSliceInterval
", m_postprocessingId, AT_);
2416 TERMM_IF_COND(m_probeSliceInterval <= 0, "Error: property
'pp_probeSliceInterval' needs to be > 0
");
2428 m_probeSliceStartTimestep = 0;
2429 m_probeSliceStartTimestep = Context::getSolverProperty<MInt>("pp_probeSliceStartTimestep
", m_postprocessingId, AT_,
2430 &m_probeSliceStartTimestep);
2442 m_probeSliceStopTimestep = std::numeric_limits<MInt>::max();
2443 m_probeSliceStopTimestep =
2444 Context::getSolverProperty<MInt>("pp_probeSliceStopTimestep
", m_postprocessingId, AT_, &m_probeSliceStopTimestep);
2445 TERMM_IF_COND(m_probeSliceStopTimestep <= 0 || m_probeSliceStopTimestep <= m_probeSliceStartTimestep,
2446 "Error: pp_probeSliceStopTimestep needs to be > pp_probeSliceStartTimestep.
");
2448 m_log << "Slice probing: interval =
" << m_probeSliceInterval << "; startTimeStep =
" << m_probeSliceStartTimestep
2449 << "; stopTimeStep =
" << m_probeSliceStopTimestep << std::endl;
2461template <MInt nDim, class ppType>
2462void PostProcessing<nDim, ppType>::initAverageSolutionsSlice() {
2465 m_noProbeSlices = Context::propertyLength("sliceAxis
", m_postprocessingId);
2467 mAlloc(m_sliceAxis, m_noProbeSlices, "m_sliceAxis", AT_);
2470 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2471 m_sliceAxis[sliceId] = Context::getSolverProperty<MString>("sliceAxis
", m_postprocessingId, AT_, sliceId);
2473 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
2474 m_sliceIntercept[sliceId] = Context::getSolverProperty<MFloat>("sliceIntercept
", m_postprocessingId, AT_, sliceId);
2488template <MInt nDim, class ppType>
2489void PostProcessing<nDim, ppType>::initSpatialAveraging() {
2511 m_spatialDirection1 = -1;
2512 m_spatialDirection1 =
2513 Context::getSolverProperty<MInt>("pp_spatialDirection1
", m_postprocessingId, AT_, &m_spatialDirection1);
2523 m_spatialDirection2 = -1;
2524 m_spatialDirection2 =
2525 Context::getSolverProperty<MInt>("pp_spatialDirection2
", m_postprocessingId, AT_, &m_spatialDirection2);
2527 MInt noCells = pp->solver().grid().noInternalCells();
2528 MInt noVars = m_noVariables;
2529 MInt dir1 = m_spatialDirection1;
2530 MInt dir2 = m_spatialDirection2;
2533 // precalculate weights
2534 for(MInt lvl = 0; lvl < 20; lvl++) {
2535 m_spatialLvlWeight[lvl] = 1 / pow(2, nDim * lvl);
2538 if(dir1 == -1 && dir2 == -1) { // single point
2539 m_spatialWeightSinglePoint = 0;
2540 for(MInt cellId = 0; cellId < noCells; cellId++) { // calculate local total weight
2541 if(m_gridProxy->tree().noChildren(cellId) > 0) continue; // check if leaf cell
2542 lvlCell = m_gridProxy->tree().level(cellId);
2543 m_spatialWeightSinglePoint += m_spatialLvlWeight[lvlCell];
2547 else if((dir1 == -1 && dir2 != -1) || (dir2 == -1 && dir1 != -1)) { // line
2548 dir1 = (dir2 == -1) ? dir1 : dir2;
2550 if(dir1 < 0 || dir1 > nDim - 1) {
2551 m_log << "invalid spatial direction
" << endl;
2555 // single logfiles for testing
2556 stringstream logName;
2557 logName << "log_rank_
" << pp->solver().grid().domainId();
2559 log.open((logName.str()).c_str());
2563 map<MFloat, MInt, coord_comp_1d_>& coordinates =
2564 m_spatialLineCoordinates; // TODO labels:PP,toremove remove reference
2567 for(MInt cellId = 0; cellId < noCells; cellId++) { // collect cell coordinates in spatial direction
2568 cellLvl = m_gridProxy->tree().level(cellId);
2569 if(m_gridProxy->tree().noChildren(cellId) == 0) {
2570 coordinates[pp->solver().a_coordinate(cellId, dir1)] = cellLvl;
2574 createCellToMap1D(coordinates, m_spatialLineCellToMap); // reduce coordinates to positions where to average and
2575 // store a mapping of positions to average positions
2577 log << "local coordinates
" << endl;
2578 for(auto it = coordinates.begin(); it != coordinates.end(); it++) {
2579 log << (*it).first << "\t
" << (*it).second << endl;
2583 MInt noSolvers = pp->solver().m_noDomains; // rename
2585 m_spatialLineNoCells = coordinates.size();
2586 m_spatialCoordSum = 0;
2587 MPI_Reduce(&m_spatialLineNoCells, &m_spatialCoordSum, 1, MPI_INT, MPI_SUM, 0, pp->solver().mpiComm(), AT_,
2590 ScratchSpace<MFloat> coords(m_spatialLineNoCells, AT_, "coords
");
2591 ScratchSpace<MInt> levels(m_spatialLineNoCells, AT_, "levels
");
2592 // ScratchSpace<MInt> no_coords( noSolvers, AT_, "no_coords
" );
2593 ScratchSpace<MInt> all_levels(m_spatialCoordSum, AT_, "all_levels
");
2599 mAlloc(m_spatialLineAllCoord, m_spatialCoordSum, "m_spatialLineAllCoord", AT_); // TODO labels:PP root only
2602 for(auto it = coordinates.begin(); it != coordinates.end(); it++, pos++) {
2603 coords[pos] = (*it).first;
2604 levels[pos] = (*it).second;
2607 MPI_Gather(&m_spatialLineNoCells, 1, MPI_INT, m_spatialRecvcnts, 1, MPI_INT, 0, pp->solver().mpiComm(), AT_,
2611 for(MInt i = 0; i < noSolvers; i++) {
2612 log << m_spatialRecvcnts[i] << " ";
2616 m_spatialDispls[0] = 0;
2618 for(MInt solverId = 1; solverId < noSolvers; solverId++) {
2619 m_spatialDispls[solverId] = m_spatialDispls[solverId - 1] + m_spatialRecvcnts[solverId - 1];
2620 log << m_spatialDispls[solverId] << " ";
2624 MPI_Gatherv(coords.begin(), m_spatialLineNoCells, MPI_DOUBLE, m_spatialLineAllCoord, m_spatialRecvcnts,
2625 m_spatialDispls, MPI_DOUBLE, 0, pp->solver().mpiComm(), AT_, "coords.begin()
", "m_spatialLineAllCoord");
2626 MPI_Gatherv(levels.begin(), m_spatialLineNoCells, MPI_INT, all_levels.begin(), m_spatialRecvcnts, m_spatialDispls,
2627 MPI_INT, 0, pp->solver().mpiComm(), AT_, "levels.begin()
", "all_levels.begin()
");
2629 if(pp->solver().domainId() == 0) {
2630 for(MInt cellId = 0; cellId < m_spatialCoordSum; cellId++) {
2631 m_spatialGlobalLineCoordinates[m_spatialLineAllCoord[cellId]] = all_levels[cellId];
2633 createCellToMap1D(m_spatialGlobalLineCoordinates, m_spatialGlobalLineCellToMap);
2636 log << "global coordinates
" << m_spatialGlobalLineCoordinates.size() << endl;
2637 for(auto it = m_spatialGlobalLineCoordinates.begin(); it != m_spatialGlobalLineCoordinates.end(); it++) {
2638 log << (*it).first << "\t
" << (*it).second << endl;
2643 // mAlloc( m_spatialLineAllVars, m_spatialCoordSum*(noVars+1), "m_spatialLineAllVars", AT_); // TODO labels:PP
2644 // noVars can change when file is loaded
2645 m_spatialLineAllVars = 0;
2647 if(pp->solver().domainId() == 0) {
2648 log << "recvcnts displs
" << endl;
2649 for(MInt solverId = 0; solverId < noSolvers; solverId++) {
2650 m_spatialVarsDispls[solverId] = (noVars + 1) * m_spatialDispls[solverId];
2651 m_spatialVarsRecvcnts[solverId] = (noVars + 1) * m_spatialRecvcnts[solverId];
2652 log << m_spatialVarsRecvcnts[solverId] << " " << m_spatialVarsDispls[solverId] << endl;
2658 if(dir1 < 0 || dir1 > nDim - 1 || dir2 < 0 || dir2 > nDim - 1 || dir1 == dir2) {
2659 m_log << "invalid spatial directions
" << dir1 << " " << dir2 << endl;
2668 // single logfiles for testing
2669 stringstream logName;
2670 logName << "log_rank_
" << pp->solver().grid().domainId();
2672 log.open((logName.str()).c_str());
2676 map<pair<MFloat, MFloat>, MInt, coord_comp_2d_>& coordinates =
2677 m_spatialPlaneCoordinates; // TODO labels:PP remove reference
2680 for(MInt cellId = 0; cellId < noCells; cellId++) { // collect cell coordinates in spatial direction
2681 cellLvl = m_gridProxy->tree().level(cellId);
2682 if(m_gridProxy->tree().noChildren(cellId) == 0) {
2683 coordinates[pair<MFloat, MFloat>(pp->solver().a_coordinate(cellId, dir1),
2684 pp->solver().a_coordinate(cellId, dir2))] = cellLvl;
2688 createCellToMap2D(coordinates, m_spatialPlaneCellToMap); // reduce coordinates to positions where to average and
2689 // store a mapping of positions to average positions
2692 log << "local coordinates
" << endl;
2693 for( map< MFloat,MInt >::const_iterator it=coordinates.begin(); it!=coordinates.end(); it++){
2694 log << (*it).first << "\t
" << (*it).second << endl;
2699 MInt noSolvers = pp->solver().m_noDomains; // rename
2701 m_spatialPlaneNoCells = coordinates.size();
2702 m_spatialCoordSum = 0;
2703 MPI_Reduce(&m_spatialPlaneNoCells, &m_spatialCoordSum, 1, MPI_INT, MPI_SUM, 0, pp->solver().mpiComm(), AT_,
2706 ScratchSpace<MFloat> coords(2 * m_spatialPlaneNoCells, AT_, "coords
");
2707 ScratchSpace<MInt> levels(m_spatialPlaneNoCells, AT_, "levels
");
2708 ScratchSpace<MInt> no_coords(noSolvers, AT_, "no_coords
");
2709 ScratchSpace<MInt> coords_displs(noSolvers, AT_, "coords_displs
");
2710 // ScratchSpace<MInt> no_ids( noSolvers, AT_, "no_ids
" );
2711 ScratchSpace<MInt> all_levels(m_spatialCoordSum, AT_, "all_levels
");
2718 AT_); // TODO labels:PP root only?
2721 for(auto it = coordinates.begin(); it != coordinates.end(); it++, pos++) {
2722 coords[2 * pos] = (*it).first.first;
2723 coords[2 * pos + 1] = (*it).first.second;
2724 levels[pos] = (*it).second;
2727 MPI_Gather(&m_spatialPlaneNoCells, 1, MPI_INT, m_spatialRecvcnts, 1, MPI_INT, 0, pp->solver().mpiComm(), AT_,
2730 for(MInt i = 0; i < noSolvers; i++) {
2731 no_coords[i] = 2 * m_spatialRecvcnts[i];
2734 log << "no_coords
";
2735 for(MInt i = 0; i < noSolvers; i++) {
2736 log << no_coords[i] << " ";
2740 m_spatialDispls[0] = 0;
2741 coords_displs[0] = 0;
2743 for(MInt solverId = 1; solverId < noSolvers; solverId++) {
2744 m_spatialDispls[solverId] = m_spatialDispls[solverId - 1] + no_coords[solverId - 1];
2745 coords_displs[solverId] = 2 * m_spatialDispls[solverId];
2746 log << m_spatialDispls[solverId] << " ";
2750 MPI_Gatherv(coords.begin(), 2 * m_spatialPlaneNoCells, MPI_DOUBLE, m_spatialPlaneAllCoord, no_coords.begin(),
2751 coords_displs.begin(), MPI_DOUBLE, 0, pp->solver().mpiComm(), AT_, "coords.begin()
",
2753 MPI_Gatherv(levels.begin(), m_spatialPlaneNoCells, MPI_INT, all_levels.begin(), m_spatialRecvcnts, m_spatialDispls,
2754 MPI_INT, 0, pp->solver().mpiComm(), AT_, "levels.begin()
", "all_levels.begin()
");
2756 if(pp->solver().domainId() == 0) {
2757 for(MInt cellId = 0; cellId < m_spatialCoordSum; cellId++) {
2758 m_spatialGlobalPlaneCoordinates[pair<MFloat, MFloat>(
2759 m_spatialPlaneAllCoord[2 * cellId], m_spatialPlaneAllCoord[2 * cellId + 1])] = all_levels[cellId];
2761 createCellToMap2D(m_spatialGlobalPlaneCoordinates, m_spatialGlobalPlaneCellToMap);
2764 log << "global coordinates
" << m_spatialGlobalLineCoordinates.size() << endl;
2765 for( map< MFloat,MInt >::const_iterator it=m_spatialGlobalLineCoordinates.begin();
2766 it!=m_spatialGlobalLineCoordinates.end(); it++){ log << (*it).first << "\t
" << (*it).second << endl;
2773 m_spatialPlaneAllVars = 0;
2775 if(pp->solver().domainId() == 0) {
2776 log << "recvcnts displs
" << endl;
2777 for(MInt solverId = 0; solverId < noSolvers; solverId++) {
2778 m_spatialVarsDispls[solverId] = (noVars + 1) * m_spatialDispls[solverId];
2779 m_spatialVarsRecvcnts[solverId] = (noVars + 1) * m_spatialRecvcnts[solverId];
2780 log << m_spatialRecvcnts[solverId] << " " << m_spatialVarsDispls[solverId] << endl;
2798template <MInt nDim, class ppType>
2799void PostProcessing<nDim, ppType>::initProbeArbitraryLine() {
2817 noPoints = Context::propertyLength("pp_arbLinePoints
", m_postprocessingId);
2818 if(noPoints == 0 || noPoints % (2 * nDim) != 0) {
2819 m_log << "property pp_arbLinePoints has
" << Context::propertyLength("pp_arbLinePoints
", m_postprocessingId)
2820 << " entries, multiple of
" << 2 * nDim << " needed
";
2825 m_noArbLines = noPoints / (2 * nDim);
2826 for(MInt i = 0; i < noPoints; i++) {
2827 m_arbLinePoints[i] = Context::getSolverProperty<MFloat>("pp_arbLinePoints
", m_postprocessingId, AT_, i);
2845 if(Context::propertyLength("pp_noPointsArbLine
", m_postprocessingId) < m_noArbLines) {
2846 m_log << "property pp_noPointsArbLine has
" << Context::propertyLength("pp_noPointsArbLine
", m_postprocessingId)
2847 << " entries,
" << m_noArbLines << " needed
";
2852 for(MInt i = 0; i < m_noArbLines; i++) {
2853 m_noArbLineIds[i] = Context::getSolverProperty<MInt>("pp_noPointsArbLine
", m_postprocessingId, AT_, i);
2854 if(m_noArbLineIds[i] < 2) {
2855 m_log << "property pp_noPointsArbLine[
" << i << " ] needs to be at least 2
" << endl;
2856 m_noArbLineIds[i] = 2;
2861 mAlloc(m_arbLineIds, m_noArbLines, "m_arbLineIds", AT_);
2866 // suppress valgrind error
2867 for(MInt i = 0; i < m_noArbLines; i++) {
2868 m_arbLineIds[i] = nullptr;
2869 m_arbLineCoordinates[i] = nullptr;
2875 for(MInt lineId = 0; lineId < m_noArbLines; lineId++) { // loop over all lines
2877 mAlloc(m_arbLineIds[lineId], m_noArbLineIds[lineId], "m_arbLineIds", AT_);
2878 mAlloc(m_arbLineCoordinates[lineId], m_noArbLineIds[lineId] * nDim, "m_arbLineCoordinates", AT_);
2881 // find the cellIds for all specified positions on the line
2882 for(MInt pId = 0; pId < m_noArbLineIds[lineId]; pId++) {
2883 for(MInt dimId = 0; dimId < nDim; dimId++) {
2885 m_arbLinePoints[2 * nDim * lineId + dimId]
2886 + (MFloat)pId / (m_noArbLineIds[lineId] - 1)
2887 * (m_arbLinePoints[2 * nDim * lineId + dimId + nDim] - m_arbLinePoints[2 * nDim * lineId + dimId]);
2890 const MInt cellId = pp->solver().grid().raw().findContainingLeafCell(point);
2892 MInt tmpCellId = std::numeric_limits<MInt>::max();
2894 if(cellId > -1) tmpCellId = m_gridProxy->tree().globalId(cellId);
2896 MPI_Allreduce(MPI_IN_PLACE, &tmpCellId, 1, MPI_INT, MPI_MIN, pp->solver().mpiComm(), AT_, "MPI_IN_PLACE
",
2899 if(tmpCellId == std::numeric_limits<MInt>::max())
2900 m_log << "no cell contains line
" << lineId << ", point
" << pId << endl;
2906 if(m_gridProxy->tree().globalId(cellId) > tmpCellId) {
2907 cout << " deleting
double identified containing cell
" << lineId << " " << pId << endl;
2911 m_arbLineIds[lineId][count] = cellId;
2912 for(MInt dimId = 0; dimId < nDim; dimId++) {
2913 m_arbLineCoordinates[lineId][nDim * count + dimId] = point[dimId];
2918 //#cells found on the line
2919 m_noArbLineIds[lineId] = count;
2921 ParallelIo::size_type localCount, offset, totalCount;
2922 localCount = m_noArbLineIds[lineId];
2923 ParallelIo::calcOffset(localCount, &offset, &totalCount, pp->solver().mpiComm());
2924 m_arbLineOffsets[lineId] = offset;
2927 MPI_Allreduce(m_noArbLineIds, m_globalNoArbLineIds, m_noArbLines, MPI_INT, MPI_SUM, pp->solver().mpiComm(), AT_,
2941template <MInt nDim, class ppType>
2942void PostProcessing<nDim, ppType>::initProbeArbitrarySlice() {
2964 const MInt no_pp_arbSlicePoints = Context::propertyLength("pp_arbSlicePoints
", m_postprocessingId);
2966 if(no_pp_arbSlicePoints == 0 || no_pp_arbSlicePoints % (3 * nDim) != 0) {
2967 m_log << "property pp_arbSlicePoints has
" << no_pp_arbSlicePoints << " entries, multiple of
" << 3 * nDim
2969 TERMM(1, "ERROR: Invalid number of pp_arbSlicePoints
");
2972 m_noArbSlices = no_pp_arbSlicePoints / (3 * nDim);
2973 for(MInt i = 0; i < no_pp_arbSlicePoints; i++) {
2974 m_arbSlicePoints[i] = Context::getSolverProperty<MFloat>("pp_arbSlicePoints
", m_postprocessingId, AT_, i);
2977 // TODO labels:PP,DOC check if valid points
2993 const MInt no_pp_noPointsArbSlice = Context::propertyLength("pp_noPointsArbSlice
", m_postprocessingId);
2994 if(no_pp_noPointsArbSlice != 2 * m_noArbSlices) {
2995 m_log << "property pp_noPointsArbSlice has
" << no_pp_noPointsArbSlice << " entries,
" << m_noArbSlices
2997 TERMM(1, "ERROR: Invalid number of pp_noPointsArbSlice
");
2999 for(MInt i = 0; i < 2 * m_noArbSlices; i++) {
3000 m_noArbSlicePoints[i] = Context::getSolverProperty<MInt>("pp_noPointsArbSlice
", m_postprocessingId, AT_, i);
3001 if(m_noArbSlicePoints[i] < 2) {
3002 m_log << "property pp_noPointsArbSlice[
" << i << " ] needs to be at least 2
" << endl;
3003 m_noArbSlicePoints[i] = 2;
3015 MBool hasNewDistribution = false;
3016 MFloat stdVal = 0.0;
3018 << Context::propertyLength("pp_arbSlicePointsDistribution
", m_postprocessingId) << endl;
3020 if(Context::propertyLength("pp_arbSlicePointsDistribution
", m_postprocessingId) > 1) {
3021 mAlloc(m_arbSlicePointsDistribution, Context::propertyLength("pp_arbSlicePointsDistribution
", m_postprocessingId),
3023 for(MInt i = 0; i < Context::propertyLength("pp_arbSlicePointsDistribution
", m_postprocessingId); ++i) {
3024 m_arbSlicePointsDistribution[i] =
3025 Context::getSolverProperty<MFloat>("pp_arbSlicePointsDistribution
", m_postprocessingId, AT_, &stdVal, i);
3028 hasNewDistribution = true;
3041 m_movePointsToGrid = 0;
3042 if(Context::propertyLength("pp_movePointsToGrid
", m_postprocessingId) > 0) {
3043 m_movePointsToGrid = Context::getSolverProperty<MInt>("pp_movePointsToGrid
", m_postprocessingId, AT_);
3045 m_log << "NYRAE DEBUG 4
" << endl;
3056 m_spatialAveraging = false;
3057 if(Context::propertyLength("pp_spartialAveraging
", m_postprocessingId) > 0) {
3058 m_spatialAveraging = Context::getSolverProperty<MBool>("pp_spartialAveraging
", m_postprocessingId, AT_);
3061 collectMinLvlCells();
3063 if(0 < m_movePointsToGrid) {
3064 movePointsToGrid(m_arbSlicePoints, no_pp_arbSlicePoints, m_movePointsToGrid);
3073 MInt cellId, count, noIds;
3075 MInt sliceIdOffset = 0;
3076 for(MInt sliceId = 0; sliceId < m_noArbSlices; sliceId++) { // loop over all slices
3078 noIds = m_noArbSlicePoints[2 * sliceId] * m_noArbSlicePoints[2 * sliceId + 1];
3080 mAlloc(m_arbSliceIds[sliceId], noIds, "m_arbSliceIds", AT_);
3084 // find the cellIds for all specified positions on the slice
3086 for(MInt pId1 = 0; pId1 < m_noArbSlicePoints[2 * sliceId]; pId1++) {
3087 for(MInt pId2 = 0; pId2 < m_noArbSlicePoints[2 * sliceId + 1]; pId2++) {
3088 for(MInt dimId = 0; dimId < nDim; dimId++) {
3089 if(hasNewDistribution) {
3090 point[dimId] = m_arbSlicePoints[3 * nDim * sliceId + dimId]
3091 + m_arbSlicePointsDistribution[pId1 + (sliceIdOffset)]
3092 * (m_arbSlicePoints[3 * nDim * sliceId + dimId + nDim]
3093 - m_arbSlicePoints[3 * nDim * sliceId + dimId])
3094 + m_arbSlicePointsDistribution[pId2 + (sliceIdOffset + m_noArbSlicePoints[2 * sliceId])]
3095 * (m_arbSlicePoints[3 * nDim * sliceId + 2 * nDim + dimId]
3096 - m_arbSlicePoints[3 * nDim * sliceId + dimId]);
3098 point[dimId] = m_arbSlicePoints[3 * nDim * sliceId + dimId]
3099 + (MFloat)pId1 / (m_noArbSlicePoints[2 * sliceId] - 1)
3100 * (m_arbSlicePoints[3 * nDim * sliceId + dimId + nDim]
3101 - m_arbSlicePoints[3 * nDim * sliceId + dimId])
3102 + (MFloat)pId2 / (m_noArbSlicePoints[2 * sliceId + 1] - 1)
3103 * (m_arbSlicePoints[3 * nDim * sliceId + 2 * nDim + dimId]
3104 - m_arbSlicePoints[3 * nDim * sliceId + dimId]);
3109 findContainingCell(point, cellId);
3113 m_arbSliceIds[sliceId][count] = cellId;
3114 for(MInt dimId = 0; dimId < nDim; dimId++) {
3115 m_arbSliceCoordinates[sliceId][nDim * count + dimId] = point[dimId];
3121 //#cells found on the slice
3122 m_noArbSlicePoints[sliceId] = count;
3124 ParallelIo::size_type localCount, offset, totalCount;
3125 localCount = m_noArbSlicePoints[sliceId];
3126 ParallelIo::calcOffset(localCount, &offset, &totalCount, pp->solver().mpiComm());
3127 m_arbSliceOffsets[sliceId] = offset;
3129 sliceIdOffset += m_noArbSlicePoints[2 * sliceId] + m_noArbSlicePoints[(2 * sliceId) + 1];
3132 MPI_Allreduce(m_noArbSlicePoints, m_globalNoArbSlicePoints, m_noArbSlices, MPI_INT, MPI_SUM, pp->solver().mpiComm(),
3146template <MInt nDim, class ppType>
3147void PostProcessing<nDim, ppType>::initReduceToLevelAvg() {
3150 m_ReStressesAverageFileName = "";
3151 m_ReStressesAverageFileName = Context::getSolverProperty<MString>("pp_ReStressesAverageFileName
", m_postprocessingId,
3152 AT_, &m_ReStressesAverageFileName);
3153 if(m_ReStressesAverageFileName.empty()) {
3154 mTerm(1, AT_, "Please specify the property
'ReStressesAverageFileName' ...
");
3168template <MInt nDim, class ppType>
3169void PostProcessing<nDim, ppType>::initPeriodicSliceAverage() {
3172 if(!postData().grid().isActive()) return;
3174 MInt noSlicePositions = 0;
3175 std::multimap<MFloat, MFloat> slicePositions;
3177 std::vector<MFloat> coord(2, F0);
3178 for(MInt cellId = 0; cellId < postData().c_noCells(); cellId++) {
3179 if(postData().c_noChildren(cellId) > 0) continue; // skip non leaf cells
3180 if(postData().a_isHalo(cellId)) continue; // skip halo cells
3181 coord[0] = postData().c_coordinate(cellId, 0);
3182 coord[1] = postData().c_coordinate(cellId, 1);
3184 // see if coord is already present
3185 MInt count = slicePositions.count(coord[0]);
3188 MBool alreadyInserted = false;
3189 for(auto it = slicePositions.equal_range(coord[0]).first; it != slicePositions.equal_range(coord[0]).second;
3191 if(approx((*it).second, coord[1], 1e-16)) {
3192 alreadyInserted = true;
3195 if(!alreadyInserted) {
3196 slicePositions.insert(std::make_pair(coord[0], coord[1]));
3201 slicePositions.insert(std::make_pair(coord[0], coord[1]));
3206 // debug output slicePositions
3207 //----------------------------------------------------------------------------
3210 // fn << "slicePositions_
" << postData().domainId() << ".txt
";
3211 // MString fname = fn.str();
3212 // ofstream slicePositionsOutput;
3213 // slicePositionsOutput.precision(16);
3214 // slicePositionsOutput.open(fname);
3215 // for(auto it = slicePositions.begin(); it != slicePositions.end(); ++it){
3216 // MString line = "";
3217 // line.append(to_string((*it).first) + " " + to_string((*it).second) + " " + to_string(postData().domainId()));
3218 // slicePositionsOutput << line << endl;
3220 // slicePositionsOutput.close();
3221 //----------------------------------------------------------------------------
3223 // exchange multimap
3224 MFloatScratchSpace posX(noSlicePositions, AT_, "posX
");
3225 MFloatScratchSpace posY(noSlicePositions, AT_, "posY
");
3227 for(auto it = slicePositions.begin(); it != slicePositions.end(); ++it) {
3228 posX[count_] = (*it).first;
3229 posY[count_] = (*it).second;
3233 MPI_Allreduce(&noSlicePositions, &m_globalnoSlicePositions, 1, MPI_INT, MPI_SUM, postData().mpiComm(), AT_,
3236 MFloatScratchSpace globalPosX(m_globalnoSlicePositions, AT_, "globalPosX
");
3237 MFloatScratchSpace globalPosY(m_globalnoSlicePositions, AT_, "globalPosY
");
3239 ScratchSpace<MInt> recvbuf(postData().noDomains(), "recvbuf
", FUN_);
3241 MPI_Gather(&noSlicePositions, 1, MPI_INT, &recvbuf[0], 1, MPI_INT, 0, postData().mpiComm(), AT_, "noSlicePositions
",
3244 ScratchSpace<MInt> displs(postData().noDomains(), "displspos
", FUN_);
3245 if(postData().domainId() == 0) {
3247 for(MInt dom = 0; dom < postData().noDomains(); dom++) {
3248 displs[dom] = offset;
3249 offset += recvbuf[dom];
3253 MPI_Gatherv(&posX[0], noSlicePositions, MPI_DOUBLE, &globalPosX[0], &recvbuf[0], &displs[postData().domainId()],
3254 MPI_DOUBLE, 0, postData().mpiComm(), AT_, "posX
", "globalPosX
");
3255 MPI_Gatherv(&posY[0], noSlicePositions, MPI_DOUBLE, &globalPosY[0], &recvbuf[0], &displs[postData().domainId()],
3256 MPI_DOUBLE, 0, postData().mpiComm(), AT_, "posY
", "globalCountZ
");
3257 MPI_Bcast(&globalPosX[0], m_globalnoSlicePositions, MPI_DOUBLE, 0, postData().mpiComm(), AT_, "globalPosX
");
3258 MPI_Bcast(&globalPosY[0], m_globalnoSlicePositions, MPI_DOUBLE, 0, postData().mpiComm(), AT_, "globalPosY
");
3260 // find double entries
3261 for(MInt i = 0; i < m_globalnoSlicePositions; i++) {
3262 coord[0] = globalPosX[i];
3263 coord[1] = globalPosY[i];
3265 // see if coord is already present
3266 MInt count = m_sliceGlobalPositions.count(coord[0]);
3268 MBool alreadyInserted = false;
3269 for(auto it = m_sliceGlobalPositions.equal_range(coord[0]).first;
3270 it != m_sliceGlobalPositions.equal_range(coord[0]).second;
3272 if(approx((*it).second, coord[1], 1e-16)) {
3273 alreadyInserted = true;
3277 if(!alreadyInserted) {
3278 m_sliceGlobalPositions.insert(std::make_pair(coord[0], coord[1]));
3282 m_sliceGlobalPositions.insert(std::make_pair(coord[0], coord[1]));
3286 m_globalnoSlicePositions = m_sliceGlobalPositions.size();
3288 mAlloc(m_sliceAverage, postData().noVariables(), m_globalnoSlicePositions, "m_sliceAverage", AT_);
3289 for(MInt i = 0; i < m_globalnoSlicePositions; i++) {
3290 for(MInt varId = 0; varId < postData().noVariables(); varId++) {
3291 m_sliceAverage[varId][i] = F0;
3295 // determine index of cells in globalPos
3298 for(MInt cellId = 0; cellId < postData().c_noCells(); cellId++) {
3299 if(postData().c_noChildren(cellId) > 0) continue; // skip non leaf cells
3300 if(postData().a_isHalo(cellId)) continue; // skip halo cells
3301 MFloat x = postData().c_coordinate(cellId, 0);
3302 MFloat y = postData().c_coordinate(cellId, 1);
3304 for(auto it = m_sliceGlobalPositions.begin(); it != m_sliceGlobalPositions.end(); ++it) {
3305 if(approx((*it).first, x, 1e-16) && approx((*it).second, y, 1e-16)) {
3306 m_cell2globalIndex[index].push_back(cellId);
3307 m_noPeriodicSliceCells[index]++;
3314 MPI_Allreduce(MPI_IN_PLACE, &m_noPeriodicSliceCells[0], m_globalnoSlicePositions, MPI_INT, MPI_SUM,
3318 // debug output slicePositions
3319 //----------------------------------------------------------------------------
3320 if(postData().domainId() == 0) {
3323 fng << "sliceGlobalPositions.txt
";
3324 MString fnameg = fng.str();
3325 ofstream sliceGlobalPositionsOutput;
3326 sliceGlobalPositionsOutput.precision(16);
3327 sliceGlobalPositionsOutput.open(fnameg);
3329 for(auto it = m_sliceGlobalPositions.begin(); it != m_sliceGlobalPositions.end(); ++it) {
3331 line.append(to_string((*it).first) + " " + to_string((*it).second) + " "
3332 + to_string(m_noPeriodicSliceCells[index]));
3334 sliceGlobalPositionsOutput << line << endl;
3336 sliceGlobalPositionsOutput.close();
3338 //----------------------------------------------------------------------------
3343template <MInt nDim, class ppType>
3344void PostProcessing<nDim, ppType>::periodicSliceAverage() {
3347 if(!postData().grid().isActive()) return;
3349 postData().loadMeanFile(m_postprocessFileName);
3351 if(postData().domainId() == 0) cerr << "Calculating periodic average
" << endl;
3353 for(MInt i = 0; i < m_globalnoSlicePositions; i++) {
3354 for(MInt c = 0; c < (MInt)m_cell2globalIndex[i].size(); c++) {
3355 MInt cellId = m_cell2globalIndex[i][c];
3356 for(MInt varId = 0; varId < postData().noVariables(); varId++) {
3357 m_sliceAverage[varId][i] += postData().m_averagedVars[cellId][varId] / m_noPeriodicSliceCells[i];
3362 // exchange sliceAverage
3363 for(MInt varId = 0; varId < postData().noVariables(); varId++) {
3364 MPI_Allreduce(MPI_IN_PLACE, &m_sliceAverage[varId][0], m_globalnoSlicePositions, MPI_DOUBLE, MPI_SUM,
3368 for(MInt i = 0; i < m_globalnoSlicePositions; i++) {
3369 for(MInt c = 0; c < (MInt)m_cell2globalIndex[i].size(); c++) {
3370 MInt cellId = m_cell2globalIndex[i][c];
3371 for(MInt varId = 0; varId < postData().noVariables(); varId++) {
3372 postData().a_variable(cellId, varId) = m_sliceAverage[varId][i];
3377 postData().saveRestartFile(false);
3381 // debug output sliceAverage
3382 //----------------------------------------------------------------------------
3383 if(postData().domainId() == 0) {
3386 fng << "sliceAverage_
" << postData().domainId() << ".txt
";
3387 MString fnameg = fng.str();
3388 ofstream sliceGlobalPositionsOutput;
3389 sliceGlobalPositionsOutput.precision(16);
3390 sliceGlobalPositionsOutput.open(fnameg);
3392 for(auto it = m_sliceGlobalPositions.begin(); it != m_sliceGlobalPositions.end(); ++it) {
3394 line.append(to_string((*it).first) + " " + to_string((*it).second) + " " + to_string(m_noPeriodicSliceCells[i])
3395 + " " + to_string(m_sliceAverage[0][i]));
3397 sliceGlobalPositionsOutput << line << endl;
3399 sliceGlobalPositionsOutput.close();
3401 //----------------------------------------------------------------------------
3405template <MInt nDim, class ppType>
3406void PostProcessing<nDim, ppType>::neededMeanVarsForSourceTerm(const MInt sourceTerm,
3407 std::vector<MInt>& meanVars) const {
3411 switch(sourceTerm) {
3414 meanVars.push_back(MV::LAMB0);
3416 case ST::Q_mI_linear:
3418 meanVars.push_back(MV::VORT0);
3421 // Mean gradient of rho
3422 meanVars.push_back(MV::DRHO);
3424 // Mean gradient of p
3425 meanVars.push_back(MV::DP);
3427 // Mean of (gradient of p divided by rho)
3428 meanVars.push_back(MV::GRADPRHO);
3431 // Mean gradients of velocity components (contains MV::DU)
3432 meanVars.push_back(MV::GRADU);
3434 // Sum of products of velocity and velocity gradients:
3435 // u * grad(u) + v * grad(v) + w * grad(w)
3436 meanVars.push_back(MV::UGRADU);
3439 // Components of divergence of u
3440 meanVars.push_back(MV::DU);
3442 // Mean gradient of rho
3443 meanVars.push_back(MV::DRHO);
3445 // Mean gradient of p
3446 meanVars.push_back(MV::DP);
3449 // Components of divergence of u
3450 meanVars.push_back(MV::DU);
3452 // Mean gradient of rho
3453 meanVars.push_back(MV::DRHO);
3455 // Mean gradient of rho*div(u)
3456 meanVars.push_back(MV::RHODIVU);
3458 // Mean gradient of u*grad(rho)
3459 meanVars.push_back(MV::UGRADRHO);
3462 TERMM(1, "Source term
'" + s_sourceTermNames[sourceTerm] + "' not implemented yet.
");
3480template <MInt nDim, class ppType>
3481void PostProcessing<nDim, ppType>::averageSolutions() {
3484 if(!pp->solver().grid().isActive()) return;
3486 // Find out how many solutions are required and how the weighting will be
3487 const MFloat weight = 1.0 / (((m_averageStopTimestep - m_averageStartTimestep) / m_averageInterval) + 1);
3489 const MInt noCells = postData().grid().noInternalCells();
3491 const MFloat* cellVars = nullptr;
3492 MFloat* heatRelease = nullptr;
3494 if(m_twoPass) { // two pass activated, compute mean values first
3495 TERMM(1, "FIXME two-pass averaging is untested and probably broken
");
3496 for(MInt t = m_averageStartTimestep; t <= m_averageStopTimestep; t += m_averageInterval) {
3497 m_log << " ^ * Mean computation timestep
" << t << " with weight
" << weight << "\n
";
3499 pp->solver().loadSampleVariables(t); // load variables for timestep t
3501 if(m_statisticCombustionAnalysis) pp->solver().getHeatRelease(heatRelease);
3503 for(MInt dataId = 0; dataId < noCells; dataId++) {
3504 MInt cellId = convertIdParent(postData(), pp->solver(), dataId);
3506 getSampleVariables(cellId, cellVars, true);
3508 if(cellVars == nullptr) {
3512 for(MInt varId = 0; varId < m_noVariables; varId++) { // sum up all variables
3513 // m_summedVars[cellId][varId] += cellVars[varId];
3514 postData().a_variable(dataId, varId) += cellVars[varId];
3519 // weighting of summed primitive variables
3520 for(MInt dataId = 0; dataId < noCells; dataId++) {
3521 MInt cellId = convertIdParent(postData(), pp->solver(), dataId);
3523 for(MInt varId = 0; varId < m_noVariables; varId++) {
3524 // m_summedVars[cellId][varId] *= weight;
3525 postData().a_variable(dataId, varId) *= weight;
3529 } // end of mean calculation for twoPass
3531 // Start averaging, timestep loop
3532 // PP_ToDo-Average:is this correct
3533 // MInt restartT = m_restartTimeStep;
3534 // if(m_averageRestart) {
3535 // restartT += m_averageInterval;
3537 MInt restartT = m_averageStartTimestep;
3539 // TODO labels:PP move to separate method e.g. addAveragingSample() (see fv-structured PP)
3540 for(MInt t = restartT; t <= m_averageStopTimestep; t += m_averageInterval) {
3541 m_log << " ^ * Averaging timestep
" << t << " with weight
" << weight << "\n
";
3542 pp->solver().loadSampleVariables(t); // load variables for timestep t
3544 if(m_statisticCombustionAnalysis) pp->solver().getHeatRelease(heatRelease);
3546 for(MInt dataId = 0; dataId < noCells; dataId++) { // cell loop
3547 const MInt cellId = convertIdParent(postData(), pp->solver(), dataId);
3549 getSampleVariables(cellId, cellVars, true);
3551 if(cellVars == nullptr) {
3555 if(m_twoPass) // two pass, second part
3557 for(MInt varId = 0; varId < nDim; varId++) {
3558 // m_summedVars[cellId][varId + m_noVariables] +=
3559 // (cellVars[varId] - m_summedVars[cellId][varId]) * (cellVars[varId] - m_summedVars[cellId][varId]);
3560 // postData().a_variable(dataId, varId) +=
3561 // (cellVars[varId] - postData().a_variable(dataId, varId)) * (cellVars[varId] -
3562 // m_summedVars[cellId][varId]);
3564 for(MInt varId = 0; varId < 2 * nDim - 3; varId++) {
3565 // m_summedVars[cellId][m_noVariables + nDim + varId] +=
3566 // (cellVars[varId % nDim] - m_summedVars[cellId][varId])
3567 // * (cellVars[(varId + 1) % nDim] - m_summedVars[cellId][(varId + 1) % nDim]);
3568 // postData().a_variable(dataId, m_noVariables + nDim + varId) +=
3569 // (cellVars[varId % nDim] - postData().a_variable(dataId, varId))
3570 // * (cellVars[(varId + 1) % nDim] - m_summedVars[cellId][(varId + 1) % nDim]);
3573 for(MInt varId = 0; varId < nDim; varId++) {
3574 // m_summedVars[cellId][m_noVariables + 3 * (nDim - 1) + varId] +=
3575 // pow(cellVars[varId] - m_summedVars[cellId][varId], 3);
3576 // m_summedVars[cellId][m_noVariables + 3 * (nDim - 1) + nDim + varId] +=
3577 // pow(cellVars[varId] - m_summedVars[cellId][varId], 4);
3578 // postData().a_variable(dataId, m_noVariables + 3 * (nDim - 1) + varId) +=
3579 // pow(cellVars[varId] - postData().a_variable(dataId,varId), 3);
3580 // postData().a_variable(dataId, m_noVariables + 3 * (nDim - 1) + nDim + varId) +=
3581 // pow(cellVars[varId] - postData().a_variable(dataId, varId), 4);
3583 } else if(m_skewness) {
3584 for(MInt varId = 0; varId < nDim; varId++) {
3585 // m_summedVars[cellId][m_noVariables + 3 * (nDim - 1) + varId] +=
3586 // pow(cellVars[varId] - m_summedVars[cellId][varId], 3);
3587 // postData().a_variable(dataId, m_noVariables + 3 * (nDim - 1) + varId) +=
3588 // pow(cellVars[varId] - postData().a_variable(dataId, varId), 3);
3592 else if(m_useKahan) // Kahan summation activated, reduced error in summation of variables
3594 if(dataId == 0) m_log << "start kahan summation
" << endl;
3595 /* Kahan summation pseudocode
3598 for i=0 to input.length
3605 for(MInt varId = 0; varId < m_noVariables; varId++) { // sum up all variables
3606 // m_ySum[cellId][varId] = cellVars[varId] - m_cSum[cellId][varId];
3607 // m_tSum[cellId][varId] = m_summedVars[cellId][varId] + m_ySum[cellId][varId];
3608 // m_cSum[cellId][varId] = (m_tSum[cellId][varId] - m_summedVars[cellId][varId]) - m_ySum[cellId][varId];
3609 // m_summedVars[cellId][varId] = m_tSum[cellId][varId];
3611 for(MInt varId = 0; varId < nDim; varId++) { // squares of velocity components (u*u,v*v(,w*w))
3612 // m_ySquare[cellId][varId] = (cellVars[varId] * cellVars[varId]) - m_cSquare[cellId][varId];
3613 // m_tSquare[cellId][varId] = m_square[cellId][varId] + m_ySquare[cellId][varId];
3614 // m_cSquare[cellId][varId] = (m_tSquare[cellId][varId] - m_square[cellId][varId]) -
3615 // m_ySquare[cellId][varId]; m_square[cellId][varId] = m_tSquare[cellId][varId];
3617 for(MInt varId = 0; varId < 2 * nDim - 3;
3618 varId++) { // products of different velocity components (u*v(,v*w,w*u))
3619 // m_ySquare[cellId][nDim + varId] =
3620 //(cellVars[varId]) * (cellVars[(varId + 1) % nDim]) - m_cSquare[cellId][nDim + varId];
3621 // m_tSquare[cellId][nDim + varId] = m_square[cellId][nDim + varId] + m_ySquare[cellId][nDim + varId];
3622 // m_cSquare[cellId][nDim + varId] =
3623 //(m_tSquare[cellId][nDim + varId] - m_square[cellId][nDim + varId]) - m_ySquare[cellId][nDim + varId];
3624 // m_square[cellId][nDim + varId] = m_tSquare[cellId][nDim + varId];
3626 if(m_kurtosis) { // compute third and fourth power of velocity components for skewness and kurtosis
3627 for(MInt varId = 0; varId < nDim; varId++) {
3628 // m_yCube[cellId][varId] = pow(cellVars[varId], 3) - m_cCube[cellId][varId];
3629 // m_tCube[cellId][varId] = m_cube[cellId][varId] + m_yCube[cellId][varId];
3630 // m_cCube[cellId][varId] = (m_tCube[cellId][varId] - m_cube[cellId][varId]) - m_yCube[cellId][varId];
3631 // m_cube[cellId][varId] = m_tCube[cellId][varId];
3633 // m_yFourth[cellId][varId] = pow(cellVars[varId], 4) - m_cFourth[cellId][varId];
3634 // m_tFourth[cellId][varId] = m_fourth[cellId][varId] + m_yFourth[cellId][varId];
3635 // m_cFourth[cellId][varId] = (m_tFourth[cellId][varId] - m_fourth[cellId][varId]) -
3636 // m_yFourth[cellId][varId]; m_fourth[cellId][varId] = m_tFourth[cellId][varId];
3638 } else if(m_skewness) { // compute only third power of velocity components for skewness
3639 for(MInt varId = 0; varId < nDim; varId++) {
3640 // m_yCube[cellId][varId] = pow(cellVars[varId], 3) - m_cCube[cellId][varId];
3641 // m_tCube[cellId][varId] = m_cube[cellId][varId] + m_yCube[cellId][varId];
3642 // m_cCube[cellId][varId] = (m_tCube[cellId][varId] - m_cube[cellId][varId]) - m_yCube[cellId][varId];
3643 // m_cube[cellId][varId] = m_tCube[cellId][varId];
3646 } // kahan summation end
3647 else // normal summation
3649 // Primitive variables
3650 MInt varIndex = postData().getPropertyVariableOffset("primitive
").first;
3651 for(MInt varId = 0; varId < m_noVariables; varId++) {
3652 postData().a_variable(dataId, varIndex + varId) += cellVars[varId];
3656 MInt varSquare = postData().getPropertyVariableOffset("square
").first;
3657 for(MInt varId = 0; varId < nDim; varId++) {
3658 // squares of velocity components (u*u,v*v(,w*w))
3659 postData().a_variable(dataId, varSquare + varId) += cellVars[varId] * cellVars[varId];
3661 for(MInt varId = 0; varId < 2 * nDim - 3; varId++) {
3662 // products of different velocity components (u*v(,v*w,w*u))
3663 postData().a_variable(dataId, varSquare + nDim + varId) +=
3664 (cellVars[varId % nDim]) * (cellVars[(varId + 1) % nDim]);
3666 // squares of pressure p*p
3667 postData().a_variable(dataId, varSquare + 3 * nDim - 3) += cellVars[nDim + 1] * cellVars[nDim + 1];
3670 // third and fourth powers of velocity components (skewness and kurtosis)
3672 ASSERT(m_square, "");
3673 ASSERT(m_skewness, "");
3674 MInt varSkew = postData().getPropertyVariableOffset("skewness
").first;
3675 MInt varKurt = postData().getPropertyVariableOffset("kurtosis
").first;
3676 for(MInt varId = 0; varId < nDim; varId++) {
3677 postData().a_variable(dataId, varSkew + varId) += pow(cellVars[varId], 3);
3678 postData().a_variable(dataId, varKurt + varId) += pow(cellVars[varId], 4);
3680 // third powers of velocity components (skewness)
3681 } else if(m_skewness) {
3682 ASSERT(m_square, "");
3683 MInt varSkew = postData().getPropertyVariableOffset("skewness
").first;
3684 for(MInt varId = 0; varId < nDim; varId++) {
3685 postData().a_variable(dataId, varSkew + varId) += pow(cellVars[varId], 3);
3690 if(m_statisticCombustionAnalysis) {
3691 MInt varComb = postData().getPropertyVariableOffset("statisticCombustionAnalysis
").first;
3692 postData().a_variable(dataId, varComb) += heatRelease[cellId];
3693 postData().a_variable(dataId, varComb + 1) += cellVars[nDim + 2] * cellVars[nDim + 2];
3694 postData().a_variable(dataId, varComb + 2) += heatRelease[cellId] * heatRelease[cellId];
3698 if(m_averageVorticity) {
3699 MInt varVort = postData().getPropertyVariableOffset("averageVorticity
").first;
3700 for(MInt varId = 0; varId < m_noAveragedVorticities; varId++) {
3701 postData().a_variable(dataId, varVort + varId) += pp->vorticityAtCell(cellId, varId);
3706 } // normal summation end
3711 // weighting for twoPass
3712 for(MInt dataId = 0; dataId < noCells; dataId++) {
3713 MInt cellId = convertIdParent(postData(), pp->solver(), dataId);
3715 for(MInt varId = 0; varId < 3 * nDim - 3 + nDim * (m_skewness + m_kurtosis); varId++) {
3716 // m_summedVars[cellId][m_noVariables + varId] *= weight;
3721 m_computeAndSaveMean = true;
3722 computeAndSaveMean();
3725 m_log << " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ \n
" << endl;
3740template <MInt nDim, class ppType>
3741void PostProcessing<nDim, ppType>::averageSolutionsInSolve() {
3744 // Prepare to write restart file for all ranks
3745 if(globalTimeStep == m_averageStopTimestep) {
3746 postData().m_forceWriteRestart = true;
3749 if(!pp->solver().grid().isActive()) return;
3751 const MInt noCells = postData().grid().noInternalCells();
3753 // Following bool is used to disable direct addressing of [nDim+1], which
3754 // corresponds to the pressure in the FV solver. For LB, pressure is not a
3755 // solution variable as it is solving for an isothermal equation of state.
3756 // This might be done more beautiful in the future? Or we decide that each
3757 // solver has to provide this value even if meaningless?
3758#if not defined(MAIA_DISABLE_LB)
3759 constexpr MBool isothermal = (std::is_same<ppType, PostProcessingLb<nDim>>::value);
3760 m_averageSpeedOfSound = (m_averageSpeedOfSound && !isothermal);
3762 constexpr MBool isothermal = false;
3765 // Determine the value of "gamma
" from solver if needed
3766 // This check is placed here to be called during the first loop iteration, as
3767 // getDimensionalizationParams() does not work (yet) while being in the c'tor
3769 if(m_averageSpeedOfSound && m_gamma < 0.0) {
3770 vector<pair<MFloat, MString>> dimParams;
3771 pp->solver().getDimensionalizationParams(dimParams);
3774 for(auto&& p : dimParams) {
3775 if(p.second == "gamma
") {
3782 TERMM(1, "Dimensionalization parameter
for 'gamma' not found but needed
"
3783 "for averaging the speed of sound.
");
3787 const MBool skipRestartStep = m_averageRestart && (globalTimeStep == m_restartTimeStep);
3789 if(globalTimeStep == m_averageStartTimestep) {
3790 // Reset everything at averaging start timestep
3791 std::fill_n(&postData().a_variable(0, 0), noCells * postData().noVariables(), 0.0);
3794 // Lets do the averaging only if we are on the right timestep
3795 if(globalTimeStep >= m_averageStartTimestep && (globalTimeStep - m_averageStartTimestep) % m_averageInterval == 0
3796 && globalTimeStep <= m_averageStopTimestep && !skipRestartStep) {
3797 m_log << " ^ * Averaging timestep
" << globalTimeStep << "\n
";
3799 MFloat* heatRelease = 0;
3801 if(m_statisticCombustionAnalysis) {
3802 pp->solver().calculateHeatRelease();
3803 pp->solver().getHeatRelease(heatRelease);
3806 if(m_acousticAnalysis) {
3807 MFloatScratchSpace QeI(pp->solver().a_noCells(), AT_, "QeI
");
3808 MFloatScratchSpace QeIII(pp->solver().a_noCells(), AT_, "QeIII
");
3809 MFloatScratchSpace cSquared(pp->solver().a_noCells(), AT_, "cSquared
");
3810 MFloatScratchSpace drhodt(pp->solver().a_noCells(), AT_, "drhodt
");
3811 pp->computeAcousticSourceTermQe(QeI, QeIII, cSquared, drhodt);
3814 // To trigger calculating derivative of primitive variables
3815 const MBool needDerivative = m_averageSpeedOfSound || m_needVorticity
3816 || m_activeMeanVars.find(MV::GRADU) != m_activeMeanVars.end()
3817 || m_activeMeanVars.find(MV::DU) != m_activeMeanVars.end()
3818 || m_activeMeanVars.find(MV::UGRADU) != m_activeMeanVars.end()
3819 || m_activeMeanVars.find(MV::DRHO) != m_activeMeanVars.end()
3820 || m_activeMeanVars.find(MV::DP) != m_activeMeanVars.end()
3821 || m_activeMeanVars.find(MV::RHODIVU) != m_activeMeanVars.end()
3822 || m_activeMeanVars.find(MV::UGRADRHO) != m_activeMeanVars.end()
3823 || m_activeMeanVars.find(MV::GRADPRHO) != m_activeMeanVars.end();
3825 // exchange for correlation
3827 const MFloat* cellVarsCorr = nullptr;
3828 for(MInt correlationId = 0; correlationId < m_noCorrelationLines; correlationId++) {
3829 for(MInt i = 0; i < m_noCorrelationIds[correlationId]; i++) {
3830 MInt id = m_correlationIds[correlationId][i];
3831 MInt cellId = convertIdParent(postData(), pp->solver(), id);
3832 getSampleVariables(cellId, cellVarsCorr, true);
3833 MInt corrVarIndex = m_correlationVariableIndex[correlationId];
3834 m_correlationExchangeVar[correlationId][i] = cellVarsCorr[corrVarIndex];
3837 MInt noDomain = postData().noDomains();
3838 ScratchSpace<MInt> recvbuf(noDomain, "recvbuf
", AT_);
3841 MPI_Gather(&m_noCorrelationIds[correlationId], 1, MPI_INT, &recvbuf[0], 1, MPI_INT, 0, postData().mpiComm(),
3844 ScratchSpace<MInt> displs(noDomain, "displs
", AT_);
3845 if(postData().domainId() == 0) {
3847 for(MInt dom = 0; dom < noDomain; dom++) {
3848 displs[dom] = offset;
3849 offset += recvbuf[dom];
3853 MPI_Gatherv(&m_correlationExchangeVar[correlationId][0], m_noCorrelationIds[correlationId], MPI_DOUBLE,
3854 &m_globalCorrelationExchangeVar[correlationId][0], &recvbuf[postData().domainId()],
3855 &displs[postData().domainId()], MPI_DOUBLE, 0, postData().mpiComm(), AT_,
3858 MPI_Bcast(m_globalCorrelationExchangeVar[correlationId], m_globalNoCorrelationIds[correlationId], MPI_DOUBLE, 0,
3863#if defined(MAIA_GCC_COMPILER) && GNU < 10
3864 // NOTE: This is a bug in GCC 9 we're using on HAWK. With this fix clang
3865 // (correctly) complains.
3866 maia::parallelFor(0, noCells, [&, isothermal](MInt dataId) {
3868 maia::parallelFor(0, noCells, [&](MInt dataId) {
3870 const MInt cellId = convertIdParent(postData(), pp->solver(), dataId);
3872 std::vector<MFloat> cellVars(m_noVariables);
3873 getSampleVariables(cellId, cellVars);
3875 if(m_useKahan) { // kahan summation activated
3876 /* Kahan summation pseudocode
3879 for i=0 to input.length
3886 for(MInt varId = 0; varId < m_noVariables; varId++) { // sum up all variables
3887 // m_ySum[cellId][varId] = cellVars[varId] - m_cSum[cellId][varId];
3888 // m_tSum[cellId][varId] = m_summedVars[cellId][varId] + m_ySum[cellId][varId];
3889 // m_cSum[cellId][varId] = (m_tSum[cellId][varId] - m_summedVars[cellId][varId]) - m_ySum[cellId][varId];
3890 // m_summedVars[cellId][varId] = m_tSum[cellId][varId];
3892 for(MInt varId = 0; varId < nDim; varId++) { // squares of velocity components (u*u,v*v(,w*w))
3893 // m_ySquare[cellId][varId] = (cellVars[varId] * cellVars[varId]) - m_cSquare[cellId][varId];
3894 // m_tSquare[cellId][varId] = m_square[cellId][varId] + m_ySquare[cellId][varId];
3895 // m_cSquare[cellId][varId] = (m_tSquare[cellId][varId] - m_square[cellId][varId]) -
3896 // m_ySquare[cellId][varId]; m_square[cellId][varId] = m_tSquare[cellId][varId];
3898 for(MInt varId = 0; varId < 2 * nDim - 3;
3899 varId++) { // products of different velocity components (u*v(,v*w,w*u))
3900 // m_ySquare[cellId][nDim + varId] =
3901 // (cellVars[varId]) * (cellVars[(varId + 1) % nDim]) - m_cSquare[cellId][nDim + varId];
3902 // m_tSquare[cellId][nDim + varId] = m_square[cellId][nDim + varId] + m_ySquare[cellId][nDim + varId];
3903 // m_cSquare[cellId][nDim + varId] =
3904 // (m_tSquare[cellId][nDim + varId] - m_square[cellId][nDim + varId]) - m_ySquare[cellId][nDim + varId];
3905 // m_square[cellId][nDim + varId] = m_tSquare[cellId][nDim + varId];
3907 if(m_kurtosis) { // compute third and fourth power of velocity components for skewness and kurtosis
3908 for(MInt varId = 0; varId < nDim; varId++) {
3909 // m_yCube[cellId][varId] = pow(cellVars[varId], 3) - m_cCube[cellId][varId];
3910 // m_tCube[cellId][varId] = m_cube[cellId][varId] + m_yCube[cellId][varId];
3911 // m_cCube[cellId][varId] = (m_tCube[cellId][varId] - m_cube[cellId][varId]) - m_yCube[cellId][varId];
3912 // m_cube[cellId][varId] = m_tCube[cellId][varId];
3914 // m_yFourth[cellId][varId] = pow(cellVars[varId], 4) - m_cFourth[cellId][varId];
3915 // m_tFourth[cellId][varId] = m_fourth[cellId][varId] + m_yFourth[cellId][varId];
3916 // m_cFourth[cellId][varId] = (m_tFourth[cellId][varId] - m_fourth[cellId][varId]) -
3917 // m_yFourth[cellId][varId]; m_fourth[cellId][varId] = m_tFourth[cellId][varId];
3919 } else if(m_skewness) { // compute only third power of velocity components for skewness
3920 for(MInt varId = 0; varId < nDim; varId++) {
3921 // m_yCube[cellId][varId] = pow(cellVars[varId], 3) - m_cCube[cellId][varId];
3922 // m_tCube[cellId][varId] = m_cube[cellId][varId] + m_yCube[cellId][varId];
3923 // m_cCube[cellId][varId] = (m_tCube[cellId][varId] - m_cube[cellId][varId]) - m_yCube[cellId][varId];
3924 // m_cube[cellId][varId] = m_tCube[cellId][varId];
3927 } // kahan summation end
3928 else { // normal summation
3930 // Primitive variables
3931 for(MInt varId = 0; varId < m_noVariables; varId++) {
3932 postData().a_variable(dataId, varId) += cellVars[varId];
3936 // squares of velocity components (u*u,v*v(,w*w))
3937 const MInt varIndex = postData().getPropertyVariableOffset("square
").first;
3938 constexpr MInt noSquareDiag = nDim;
3939 for(MInt varId = 0; varId < noSquareDiag; varId++) {
3940 postData().a_variable(dataId, varIndex + varId) += cellVars[varId] * cellVars[varId];
3942 // products of different velocity components (u*v(,v*w,w*u))
3943 constexpr MInt noSquareMixed = 2 * nDim - 3;
3944 for(MInt varId = 0; varId < noSquareMixed; varId++) {
3945 postData().a_variable(dataId, varIndex + nDim + varId) +=
3946 (cellVars[varId % nDim]) * (cellVars[(varId + 1) % nDim]);
3948 // squares of pressure p*p
3949 if constexpr(isothermal) {
3950 postData().a_variable(dataId, varIndex + noSquareDiag + noSquareMixed) += cellVars[nDim] * cellVars[nDim];
3952 postData().a_variable(dataId, varIndex + noSquareDiag + noSquareMixed) +=
3953 cellVars[nDim + 1] * cellVars[nDim + 1];
3958 MInt varIndex = postData().getPropertyVariableOffset("correlation
").first;
3959 for(MInt correlationId = 0; correlationId < m_noCorrelationLines; correlationId++) {
3960 MInt index = m_correlationIndexMapping[correlationId][dataId];
3962 MInt corrVarIndex = m_correlationVariableIndex[correlationId];
3963 MFloat refCorrVar = m_globalCorrelationExchangeVar[correlationId][index];
3964 postData().a_variable(dataId, varIndex + correlationId) += refCorrVar * cellVars[corrVarIndex];
3969 // third and fourth powers of velocity components (skewness and kurtosis)
3971 ASSERT(m_square, "");
3972 ASSERT(m_skewness, "");
3973 const MInt varSkew = postData().getPropertyVariableOffset("skewness
").first;
3974 const MInt varKurt = postData().getPropertyVariableOffset("kurtosis
").first;
3975 ASSERT(varSkew + (nDim - 1) < postData().noVariables(), "");
3976 ASSERT(varKurt + (nDim - 1) < postData().noVariables(), "");
3977 for(MInt varId = 0; varId < nDim; varId++) {
3978 postData().a_variable(dataId, varSkew + varId) += pow(cellVars[varId], 3);
3979 postData().a_variable(dataId, varKurt + varId) += pow(cellVars[varId], 4);
3981 // third powers of velocity components (skewness)
3982 } else if(m_skewness) {
3983 ASSERT(m_square, "");
3984 MInt varSkew_ = postData().getPropertyVariableOffset("skewness
").first;
3985 ASSERT(varSkew_ + (nDim - 1) < postData().noVariables(), "");
3986 for(MInt varId = 0; varId < nDim; varId++) {
3987 postData().a_variable(dataId, varSkew_ + varId) += pow(cellVars[varId], 3);
3992 if(m_statisticCombustionAnalysis) {
3993 MInt varIndex = postData().getPropertyVariableOffset("statisticCombustionAnalysis
").first;
3994 postData().a_variable(dataId, varIndex) += heatRelease[cellId];
3995 postData().a_variable(dataId, varIndex + 1) += cellVars[nDim + 2] * cellVars[nDim + 2];
3996 postData().a_variable(dataId, varIndex + 2) += heatRelease[cellId] * heatRelease[cellId];
3999 if(needDerivative) {
4000 // Get pointer to derivatives of primitive variables
4001 // Instead of returning a pointer an array is filled for two reasons:
4002 // 1. solver can do some kind of calculation (certain gradient might not be calculated during solution
4004 // 2. solver may use SoA or AoS memory layout (flexibility)
4005 std::vector<MFloat> cellVarsDeriv(m_noVariables * nDim);
4006 if(pp->getSampleVarsDerivatives(cellId, cellVarsDeriv)) {
4007 // if(cellVarsDeriv != nullptr) {
4008 // const MFloatTensor deriv(const_cast<MFloat*>(cellVarsDeriv), m_noVariables, nDim);
4009 const MFloatTensor deriv(const_cast<MFloat*>(cellVarsDeriv.data()), m_noVariables, nDim);
4010 // Speed of sound and derivatives
4011 if(m_averageSpeedOfSound) {
4012 // Store convenience variables
4013 const MFloat rho = cellVars[nDim];
4014 const MFloat p = cellVars[nDim + 1];
4015 const MFloat pByRho = p / rho;
4017 // Speed of sound: c = sqrt(gamma * p / rho)
4018 MInt varIndex = postData().getPropertyVariableOffset("averageSpeedOfSound
").first;
4019 postData().a_variable(dataId, varIndex) += sqrt(m_gamma * pByRho);
4021 // Derivatives of speed of sound
4022 const MFloat factor = 0.5 * sqrt(m_gamma / (rho * p));
4023 for(MInt dimId = 0; dimId < nDim; dimId++) {
4024 postData().a_variable(dataId, varIndex + dimId + 1) +=
4025 factor * (deriv(nDim + 1, dimId) - pByRho * deriv(nDim, dimId));
4029 if(m_needVorticity) {
4030 constexpr MInt noVorticities = nDim * 2 - 3;
4031 MFloat vort[nDim * 2 - 3];
4032 calcVorticity(deriv, vort);
4035 if(m_averageVorticity) {
4036 MInt varIndex = postData().getPropertyVariableOffset("averageVorticity
").first;
4037 for(MInt varId = 0; varId < noVorticities; varId++) {
4038 postData().a_variable(dataId, varIndex + varId) += vort[varId];
4041 // Lamb vector (vorticity x velocity)
4042 if(m_activeMeanVars.find(MV::LAMB0) != m_activeMeanVars.end()) {
4043 MInt varIndex = postData().getPropertyVariableOffset("lamb0
").first;
4044 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4045 if constexpr(nDim == 2) {
4046 postData().a_variable(dataId, varIndex) -= vort[0] * cellVars[1];
4047 postData().a_variable(dataId, varIndex + 1) += vort[0] * cellVars[0];
4049 postData().a_variable(dataId, varIndex) += vort[1] * cellVars[2] - vort[2] * cellVars[1];
4050 postData().a_variable(dataId, varIndex + 1) += vort[2] * cellVars[0] - vort[0] * cellVars[2];
4051 postData().a_variable(dataId, varIndex + 2) += vort[0] * cellVars[1] - vort[1] * cellVars[0];
4056 // Velocity gradients
4057 if(m_activeMeanVars.find(MV::GRADU) != m_activeMeanVars.end()) {
4058 MInt varIndex = postData().getPropertyVariableOffset("gradu
").first;
4059 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4060 MInt indexGradU = 0;
4061 for(MInt veloId = 0; veloId < nDim; veloId++) {
4062 for(MInt dimId = 0; dimId < nDim; dimId++) {
4063 postData().a_variable(dataId, varIndex + indexGradU) += deriv(veloId, dimId);
4067 // offset += nDim * nDim;
4068 } else if(m_activeMeanVars.find(MV::DU) != m_activeMeanVars.end()) {
4069 MInt varIndex = postData().getPropertyVariableOffset("du
").first;
4070 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4071 // du/dx, dv/dy, dw/dz for the divergence of the velocity field
4072 for(MInt dimId = 0; dimId < nDim; dimId++) {
4073 postData().a_variable(dataId, varIndex + dimId) += deriv(dimId, dimId);
4077 // u * grad(u) + v * grad(v) + w * grad(w)
4078 if(m_activeMeanVars.find(MV::UGRADU) != m_activeMeanVars.end()) {
4079 MInt varIndex = postData().getPropertyVariableOffset("ugradu
").first;
4080 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4081 for(MInt dimId = 0; dimId < nDim; dimId++) {
4082 for(MInt veloId = 0; veloId < nDim; veloId++) {
4083 postData().a_variable(dataId, varIndex + dimId /*indexUGradU*/) +=
4084 cellVars[veloId] * deriv(veloId, dimId);
4090 if(m_activeMeanVars.find(MV::DRHO) != m_activeMeanVars.end()) {
4091 MInt varIndex = postData().getPropertyVariableOffset("drho
").first;
4092 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4093 for(MInt dimId = 0; dimId < nDim; dimId++) {
4094 postData().a_variable(dataId, varIndex + dimId) += deriv(nDim, dimId);
4099 if constexpr(!isothermal) {
4100 if(m_activeMeanVars.find(MV::DP) != m_activeMeanVars.end()) {
4101 MInt varIndex = postData().getPropertyVariableOffset("dp
").first;
4102 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4103 for(MInt dimId = 0; dimId < nDim; dimId++) {
4104 postData().a_variable(dataId, varIndex + dimId) += deriv(nDim + 1, dimId);
4110 if(m_activeMeanVars.find(MV::RHODIVU) != m_activeMeanVars.end()) {
4111 MInt varIndex = postData().getPropertyVariableOffset("rhodivu
").first;
4112 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4113 const MFloat rho = cellVars[nDim];
4114 for(MInt dimId = 0; dimId < nDim; dimId++) {
4115 postData().a_variable(dataId, varIndex + dimId) += rho * deriv(dimId, dimId);
4120 if(m_activeMeanVars.find(MV::UGRADRHO) != m_activeMeanVars.end()) {
4121 MInt varIndex = postData().getPropertyVariableOffset("ugradrho
").first;
4122 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4123 for(MInt dimId = 0; dimId < nDim; dimId++) {
4124 postData().a_variable(dataId, varIndex + dimId) += cellVars[dimId] * deriv(nDim, dimId);
4129 if constexpr(!isothermal) {
4130 if(m_activeMeanVars.find(MV::GRADPRHO) != m_activeMeanVars.end()) {
4131 MInt varIndex = postData().getPropertyVariableOffset("gradprho
").first;
4132 ASSERT(varIndex > -1 && varIndex < postData().noVariables(), "");
4133 const MFloat rho = cellVars[nDim];
4134 for(MInt dimId = 0; dimId < nDim; dimId++) {
4135 postData().a_variable(dataId, varIndex + dimId) += (deriv(nDim + 1, dimId) / rho);
4146 // Write mean file if we are finished
4147 if(globalTimeStep == m_averageStopTimestep) {
4148 m_computeAndSaveMean = true;
4152template <MInt nDim, class ppType>
4153template <MBool inSolution>
4154void PostProcessing<nDim, ppType>::computeAndSaveDivergence() {
4157 if(!(postData().isActive())) return;
4159 const MInt timeStep = pp->solver().getCurrentTimeStep();
4160 if constexpr(inSolution) {
4161 const MInt solutionInterval = pp->solver().m_solutionInterval;
4162 const MInt solutionOffset = pp->solver().m_solutionOffset;
4163 if(!(timeStep % solutionInterval == 0 && timeStep >= solutionOffset)) return;
4166 // Calculate for each cell the divergence
4167 constexpr MInt noVars = 1;
4168 const MInt noCellsPost = postData().grid().noInternalCells();
4169 MFloatScratchSpace divergence(noCellsPost, noVars, AT_, "divergence
");
4170 divergence.fill(0.0);
4171 for(MInt cellIdPost = 0; cellIdPost < noCellsPost; cellIdPost++) {
4172 const MInt cellIdSolver = convertIdParent(postData(), pp->solver(), cellIdPost);
4173 if(cellIdSolver < 0) continue;
4174 divergence(cellIdSolver) = calcDivergence(cellIdSolver);
4177 // Write output file
4179 ss << pp->solver().outputDir() << "Divergence_s
" << to_string(postData().solverId()) << "_
" << setw(8) << setfill('0')
4180 << timeStep << ParallelIo::fileExt();
4181 const MString fileName = ss.str();
4183 m_log << "Saving divergence velocity field
" << endl;
4185 std::vector<MString> variableNames = {"DIVERGENCE_U
"};
4186 postData().saveDataFile(false, fileName, noVars, variableNames, divergence.data());
4188 ParallelIo parallelIo(fileName, maia::parallel_io::PIO_APPEND, postData().mpiComm());
4189 parallelIo.setAttribute(pp->solver().time(), "time
");
4192template <MInt nDim, class ppType>
4193void PostProcessing<nDim, ppType>::computeAndSaveMean() {
4196 if(!(postData().isActive() && m_computeAndSaveMean)) return;
4198 // Find out how many solutions are averaged for weighting the summed variables
4199 const MFloat weight = 1.0 / (((m_averageStopTimestep - m_averageStartTimestep) / m_averageInterval) + 1);
4200 m_log << "Computing averages with weight:
" << setprecision(12) << weight << "\n
";
4202 const MInt noCells = postData().grid().noInternalCells();
4203 const MInt noVars = postData().noVariables();
4204 MFloatScratchSpace averagedVars(noCells, noVars, AT_, "averagedVars
");
4205 averagedVars.fill(0.0);
4207 const MInt varPrim = postData().getPropertyVariableOffset("primitive
").first;
4208 const MInt varSquare = postData().getPropertyVariableOffset("square
").first;
4209 const MInt varSkew = postData().getPropertyVariableOffset("skewness
").first;
4210 const MInt varKurt = postData().getPropertyVariableOffset("kurtosis
").first;
4211 const MInt varCorrelation = postData().getPropertyVariableOffset("correlation
").first;
4212 ASSERT(varPrim < postData().noVariables(), "postData noVariables too low
");
4213 ASSERT(!m_square || varSquare < postData().noVariables(), "postData noVariables too low
");
4214 ASSERT(!m_kurtosis || varKurt < postData().noVariables(), "postData noVariables too low
");
4215 ASSERT(!m_skewness || varSkew < postData().noVariables(), "postData noVariables too low
");
4216 ASSERT(!m_correlation || varCorrelation < postData().noVariables(), "postData noVariables too low
");
4218#if not defined(MAIA_DISABLE_LB)
4219 constexpr MBool isothermal = (std::is_same<ppType, PostProcessingLb<nDim>>::value);
4220 m_averageSpeedOfSound = (m_averageSpeedOfSound && !isothermal);
4222 constexpr MBool isothermal = false;
4225 // exchange for correlation
4227 for(MInt correlationId = 0; correlationId < m_noCorrelationLines; correlationId++) {
4228 for(MInt i = 0; i < m_noCorrelationIds[correlationId]; i++) {
4229 MInt dataId = m_correlationIds[correlationId][i];
4230 MInt corrVarIndex = m_correlationVariableIndex[correlationId];
4232 m_correlationExchangeVarMean[correlationId][i] = postData().a_variable(dataId, varPrim + corrVarIndex);
4234 m_correlationExchangeVarRMS[correlationId][i] = postData().a_variable(dataId, varSquare + corrVarIndex);
4237 MInt noDomain = postData().noDomains();
4238 ScratchSpace<MInt> recvbuf(noDomain, "recvbuf
", AT_);
4241 MPI_Gather(&m_noCorrelationIds[correlationId], 1, MPI_INT, &recvbuf[0], 1, MPI_INT, 0, postData().mpiComm(), AT_,
4244 ScratchSpace<MInt> displs(noDomain, "displs
", AT_);
4245 if(postData().domainId() == 0) {
4247 for(MInt dom = 0; dom < noDomain; dom++) {
4248 displs[dom] = offset;
4249 offset += recvbuf[dom];
4253 MPI_Gatherv(&m_correlationExchangeVarMean[correlationId][0], m_noCorrelationIds[correlationId], MPI_DOUBLE,
4254 &m_globalCorrelationExchangeVarMean[correlationId][0], &recvbuf[postData().domainId()],
4255 &displs[postData().domainId()], MPI_DOUBLE, 0, postData().mpiComm(), AT_,
4257 MPI_Gatherv(&m_correlationExchangeVarRMS[correlationId][0], m_noCorrelationIds[correlationId], MPI_DOUBLE,
4258 &m_globalCorrelationExchangeVarRMS[correlationId][0], &recvbuf[postData().domainId()],
4259 &displs[postData().domainId()], MPI_DOUBLE, 0, postData().mpiComm(), AT_,
4262 MPI_Bcast(m_globalCorrelationExchangeVarMean[correlationId], m_globalNoCorrelationIds[correlationId], MPI_DOUBLE,
4264 MPI_Bcast(m_globalCorrelationExchangeVarRMS[correlationId], m_globalNoCorrelationIds[correlationId], MPI_DOUBLE,
4270 for(MInt dataId = 0; dataId < noCells; dataId++) {
4271 MInt cellId = convertIdParent(postData(), pp->solver(), dataId);
4273 std::copy_n(&postData().a_variable(dataId, 0), noVars, &averagedVars(dataId, 0));
4275 // Weighting of all summed variables -> mean
4276 for(MInt varId = 0; varId < m_noVariables; varId++) {
4277 averagedVars(dataId, varPrim + varId) *= weight;
4281 // compute skewness and kurtosis of velocity components
4282 // e.g. skewness(u) = mean(u^3) - 3*u_mean*mean(u^2) + 2*u_mean^3
4283 // e.g. kurtosis(u) = mean(u^4) - 4*u_mean*mean(u^3) + 6*u_mean^2*mean(u^2) - 3*u_mean^4
4284 for(MInt varId = 0; varId < nDim; varId++) {
4285 averagedVars(dataId, varKurt + varId) =
4286 weight * averagedVars(dataId, varKurt + varId)
4287 - 4 * weight * averagedVars(dataId, varPrim + varId) * averagedVars(dataId, varSkew + varId)
4288 + 6 * weight * averagedVars(dataId, varSquare + varId) * pow(averagedVars(dataId, varPrim + varId), 2)
4289 - 3 * pow(averagedVars(dataId, varPrim + varId), 4);
4290 averagedVars(dataId, varSkew + varId) =
4291 weight * averagedVars(dataId, varSkew + varId)
4292 - 3 * weight * averagedVars(dataId, varPrim + varId) * averagedVars(dataId, varSquare + varId)
4293 + 2 * pow(averagedVars(dataId, varPrim + varId), 3);
4296 } else if(m_skewness) {
4297 // compute skewness of velocity components
4298 for(MInt varId = 0; varId < nDim; varId++) {
4299 averagedVars(dataId, varSkew + varId) =
4300 weight * averagedVars(dataId, varSkew + varId)
4301 - 3 * weight * averagedVars(dataId, varPrim + varId) * averagedVars(dataId, varSquare + varId)
4302 + 2 * pow(averagedVars(dataId, varPrim + varId), 3);
4307 // compute u',v'(,w') ( e.g. u'=mean(u^2)-(u_mean))^2 )
4308 for(MInt varId = 0; varId < nDim; varId++) {
4309 averagedVars(dataId, varSquare + varId) =
4310 weight * averagedVars(dataId, varSquare + varId) - pow(averagedVars(dataId, varPrim + varId), 2);
4312 // compute u'v'(,v'w',w'u') ( e.g. u'v'=mean(u*v)-u_mean*v_mean )
4313 for(MInt varId = 0; varId < 2 * nDim - 3; varId++) {
4314 averagedVars(dataId, varSquare + varId + nDim) =
4315 weight * averagedVars(dataId, varSquare + varId + nDim)
4316 - averagedVars(dataId, varPrim + varId % nDim) * averagedVars(dataId, varPrim + (varId + 1) % nDim);
4319 if constexpr(isothermal) {
4320 averagedVars(dataId, varSquare + 3 * nDim - 3) =
4321 CSsq * CSsq * // So far this factor is only valid for LB: <p'p'> = CSsq^2 * <rho'rho'>
4322 (weight * averagedVars(dataId, varSquare + 3 * nDim - 3)
4323 - averagedVars(dataId, varPrim + nDim) * averagedVars(dataId, varPrim + nDim));
4325 averagedVars(dataId, varSquare + 3 * nDim - 3) =
4326 weight * averagedVars(dataId, varSquare + 3 * nDim - 3)
4327 - averagedVars(dataId, varPrim + nDim + 1) * averagedVars(dataId, varPrim + nDim + 1);
4332 MInt varIndex = postData().getPropertyVariableOffset("correlation
").first;
4333 for(MInt correlationId = 0; correlationId < m_noCorrelationLines; correlationId++) {
4334 MInt index = m_correlationIndexMapping[correlationId][dataId];
4336 MFloat sum_varRef = m_globalCorrelationExchangeVarMean[correlationId][index];
4337 MFloat sum_varRefSquare = m_globalCorrelationExchangeVarRMS[correlationId][index];
4339 MInt corrVarIndex = m_correlationVariableIndex[correlationId];
4340 MFloat mean_varRef = weight * averagedVars(dataId, varIndex + correlationId)
4341 - weight * sum_varRef * averagedVars(dataId, varPrim + corrVarIndex);
4343 MFloat mean_varRefSquare = weight * sum_varRefSquare - std::pow(weight * sum_varRef, 2);
4345 MFloat mean_varSquare = averagedVars(dataId, varSquare + corrVarIndex);
4347 averagedVars(dataId, varIndex + correlationId) = mean_varRef / std::sqrt(mean_varRefSquare * mean_varSquare);
4352 if(m_statisticCombustionAnalysis) {
4353 const MInt varComb = postData().getPropertyVariableOffset("statisticCombustionAnalysis
").first;
4354 averagedVars(dataId, varComb) *= weight;
4355 averagedVars(dataId, varComb + 1) =
4356 averagedVars(dataId, varComb + 1) * weight - pow(averagedVars(dataId, varPrim + nDim + 2), 2);
4357 averagedVars(dataId, varComb + 2) =
4358 averagedVars(dataId, varComb + 2) * weight - pow(averagedVars(dataId, varComb), 2);
4361 if(m_averageVorticity) {
4362 const MInt varVort = postData().getPropertyVariableOffset("averageVorticity
").first;
4363 for(MInt varId = 0; varId < m_noAveragedVorticities; varId++) {
4364 averagedVars(dataId, varVort + varId) *= weight;
4368 if(m_averageSpeedOfSound) {
4369 const MInt varSos = postData().getPropertyVariableOffset("averageSpeedOfSound
").first;
4370 for(MInt varId = 0; varId < m_noSpeedOfSoundVars; varId++) {
4371 averagedVars(dataId, varSos + varId) *= weight;
4375 if(m_noSourceVars > 0) {
4376 const MInt varSourceVars = postData().m_sourceVarsIndex;
4377 for(MInt varId = 0; varId < m_noSourceVars; varId++) {
4378 averagedVars(dataId, varSourceVars + varId) *= weight;
4385 ss << pp->solver().outputDir() << "Mean_s
" << to_string(postData().solverId()) << "_
" << setw(8) << setfill('0')
4386 << m_averageStartTimestep << "-
" << setw(8) << setfill('0') << m_averageStopTimestep << ParallelIo::fileExt();
4387 const MString name = ss.str();
4389 m_log << "Saving averaged variables
" << name << endl;
4391 postData().saveDataFile(false, name, noVars, postData().m_variablesName, &averagedVars(0, 0));
4393 ParallelIo parallelIo(name, maia::parallel_io::PIO_APPEND, postData().mpiComm());
4395 parallelIo.setAttribute(m_averageStartTimestep, "averageStartTimestep
");
4396 parallelIo.setAttribute(m_averageStopTimestep, "averageStopTimestep
");
4397 parallelIo.setAttribute(m_averageInterval, "averageInterval
");
4400 m_computeAndSaveMean = false;
4411template <MInt nDim, class ppType>
4412void PostProcessing<nDim, ppType>::movingAveragePost() {
4416 for(MInt t = m_movingAverageStartTimestep; t <= m_movingAverageStopTimestep; t += m_movingAverageInterval) {
4417 toNextOut = m_movingAverageInterval - (t - m_movingAverageStartTimestep) % m_movingAverageInterval;
4418 if(toNextOut < m_movingAverageDataPoints * m_movingAverageInterval) {
4419 pp->solver().loadSampleVariables(t);
4439template <MInt nDim, class ppType>
4440void PostProcessing<nDim, ppType>::movingAverage() {
4443 if(!pp->solver().grid().isActive()) return;
4445 MInt noCells = pp->solver().grid().noInternalCells();
4446 MInt noVars = m_noVariables;
4447 if(m_averageVorticity == 1) {
4448 noVars += (2 * nDim - 3);
4450 const MFloat* vars = 0;
4452 // store data for averaging if we are on the right timestep
4453 if(globalTimeStep >= m_movingAverageStartTimestep - (m_movingAverageDataPoints - 1) * m_movingAverageInterval
4454 && (globalTimeStep - m_movingAverageStartTimestep) % m_movingAverageInterval == 0
4455 && globalTimeStep <= m_movingAverageStopTimestep) {
4456 MInt offset = (m_movingAverageCounter % m_movingAverageDataPoints) * noVars;
4458 for(MInt cellId = 0; cellId < noCells; cellId++) {
4459 // pp->solver().getSampleVariables(cellId, vars);
4460 getSampleVariables(cellId, vars, true);
4461 for(MInt varId = 0; varId < m_noVariables; varId++) {
4462 m_movAvgVariables[cellId][offset + varId] = vars[varId];
4466 if(m_averageVorticity) {
4467 ScratchSpace<MFloat> vorticity(pp->solver().a_noCells() * (2 * nDim - 3), AT_, "vorticity
");
4468 pp->getVorticity(&vorticity[0]);
4469 for(MInt cellId = 0; cellId < noCells; cellId++) {
4470 for(MInt varId = m_noVariables; varId < noVars; varId++) {
4471 m_movAvgVariables[cellId][offset + varId] =
4472 vorticity[pp->solver().a_noCells() * (varId - m_noVariables) + cellId];
4477 m_movingAverageCounter++;
4480 // calculate moving average and write to file
4481 if(globalTimeStep >= m_movingAverageStartTimestep
4482 && (globalTimeStep - m_movingAverageStartTimestep) % m_movingAverageInterval == 0
4483 && globalTimeStep <= m_movingAverageStopTimestep) {
4484 m_log << " ^ * Moving average timestep
" << globalTimeStep << "\n
";
4486 MInt noPoints = min(m_movingAverageCounter, m_movingAverageDataPoints);
4487 MInt offset = ((m_movingAverageCounter - 1) % m_movingAverageDataPoints) * noVars;
4488 ScratchSpace<MFloat> averagedVars(noCells * noVars, AT_, "averagedVars
");
4489 ScratchSpace<MFloat> currentVars(noCells * noVars, AT_, "currentVars
");
4491 for(MInt i = 0; i < noCells * noVars; i++) {
4492 averagedVars[i] = 0;
4495 // collect all variables for current timeStep
4496 for(MInt cellId = 0; cellId < noCells; cellId++) {
4497 for(MInt varId = 0; varId < noVars; varId++) {
4498 currentVars[cellId * noVars + varId] = m_movAvgVariables[cellId][offset + varId];
4503 for(MInt cellId = 0; cellId < noCells; cellId++) {
4504 for(MInt pId = 0; pId < noPoints; pId++) {
4505 for(MInt varId = 0; varId < noVars; varId++) {
4506 averagedVars[cellId * noVars + varId] += m_movAvgVariables[cellId][pId * noVars + varId];
4511 for(MInt i = 0; i < noCells * noVars; i++) {
4512 averagedVars[i] /= noPoints;
4515 // output to parallelIo file
4516 ParallelIo::size_type cellsOffset, totalNoCells;
4517 MFloat currentTime = pp->solver().time();
4518 stringstream fileName;
4519 fileName << pp->solver().outputDir() << "movingAverage_
" << globalTimeStep << ParallelIo::fileExt();
4520 ParallelIo parallelIo(fileName.str(), maia::parallel_io::PIO_REPLACE, pp->solver().mpiComm());
4521 parallelIo.calcOffset(noCells, &cellsOffset, &totalNoCells, pp->solver().mpiComm());
4523 // write global attributes
4524 parallelIo.setAttribute(pp->solver().grid().gridInputFileName(), "gridFile
");
4525 parallelIo.setAttribute(globalTimeStep, "timeStep
");
4526 parallelIo.setAttribute(currentTime, "time
");
4527 parallelIo.setAttribute(m_movingAverageInterval, "movingAvgInterval
");
4528 parallelIo.setAttribute(m_movingAverageDataPoints, "movingAvgDataPoints
");
4530 // write parameters for dimensionalization
4531 vector<pair<MFloat, MString>> dimParams;
4532 pp->solver().getDimensionalizationParams(dimParams);
4534 for(MInt i = 0; i < (MInt)dimParams.size(); i++) {
4535 parallelIo.setAttribute(dimParams[i].first, dimParams[i].second);
4538 // define all arrays in output file
4539 for(MInt varId = 0; varId < noVars; varId++) {
4540 stringstream varName;
4541 varName << "variables
" << varId;
4542 parallelIo.defineArray(maia::parallel_io::PIO_FLOAT, varName.str(), totalNoCells);
4543 parallelIo.setAttribute(m_movAvgVarNames[varId], "name
", varName.str());
4546 parallelIo.defineArray(maia::parallel_io::PIO_FLOAT, varName.str(), totalNoCells);
4547 parallelIo.setAttribute(m_movAvgVarNames[m_movAvgNoVariables + varId], "name
", varName.str());
4550 parallelIo.setOffset(noCells, cellsOffset);
4551 // write all variables
4552 for(MInt varId = 0; varId < noVars; varId++) {
4553 stringstream varName;
4554 varName << "variables
" << varId;
4555 parallelIo.writeArray(&(currentVars[varId]), varName.str(), noVars);
4557 parallelIo.writeArray(&(averagedVars[varId]), varName.str(), noVars);
4570template <MInt nDim, class ppType>
4571void PostProcessing<nDim, ppType>::probeLocation() {
4574 if(!pp->solver().grid().isActive()) return;
4576 const MFloat* cellVars = 0;
4577 for(MInt np = 0; np < m_noProbePoints; np++) {
4578 if(m_probeCellIds[np] == -1) continue;
4580 // pp->solver().getSampleVariables(m_probeCellIds[np], cellVars); // get variables for single cell
4581 getSampleVariables(m_probeCellIds[np], cellVars, true);
4583 // just to be sure, also write the domainId, in case the point lies exacly between 2 domains
4584 m_probeFileStreams[np] << pp->solver().domainId() << " " << globalTimeStep << " ";
4585 for(MInt i = 0; i < m_noVariables; i++) {
4586 m_probeFileStreams[np] << cellVars[i] << " ";
4588 m_probeFileStreams[np] << endl;
4600template <MInt nDim, class ppType>
4601void PostProcessing<nDim, ppType>::probeLinePeriodicPost() {
4604 if(!pp->solver().grid().isActive()) return;
4606 if(m_postprocessFileName != "") {
4607 m_log << " ^ * probe line
for file
" << m_postprocessFileName << endl;
4608 TERMM(1, "FIXME untested
");
4609 postData().loadMeanFile(m_postprocessFileName);
4610 probeLinePeriodic();
4612 for(MInt t = m_averageStartTimestep; t <= m_averageStopTimestep; t += m_averageInterval) {
4613 pp->solver().loadSampleVariables(t);
4614 probeLinePeriodic();
4627template <MInt nDim, class ppType>
4628void PostProcessing<nDim, ppType>::probeLinePre() {
4631 if(!pp->solver().grid().isActive()) return;
4633 for(MInt t = m_probeLineStartTimestep; t <= m_probeLineStopTimestep; t += m_probeLineInterval) {
4634 pp->solver().loadSampleVariables(t);
4647template <MInt nDim, class ppType>
4648void PostProcessing<nDim, ppType>::probeLinePost() {
4651 if(!pp->solver().grid().isActive()) return;
4653 m_log << " ^ * probe line
for file
" << m_postprocessFileName << endl;
4654 postData().loadMeanFile(m_postprocessFileName);
4666template <MInt nDim, class ppType>
4667void PostProcessing<nDim, ppType>::probeLine() {
4670 if(!pp->solver().grid().isActive()) return;
4672 using namespace maia::parallel_io;
4674 // check for average timestep or postprocessFileName (globalTimeStep==0 if file is loaded with
4675 if((globalTimeStep >= m_probeLineStartTimestep
4676 && (globalTimeStep - m_probeLineStartTimestep) % m_probeLineInterval == 0
4677 && globalTimeStep <= m_probeLineStopTimestep)
4678 || globalTimeStep == 0) {
4679 MInt noVars = m_noVariables;
4680 vector<MString> datasetNames;
4681 datasetNames.clear();
4683 noVars = postData().fileNoVars();
4684 datasetNames = postData().fileVarNames();
4685 TERMM_IF_NOT_COND((MInt)datasetNames.size() == noVars, "file var names size mismatch
");
4687 pp->solver().getSampleVariableNames(datasetNames);
4690 const MInt step = (isMeanFile()) ? 0 : globalTimeStep;
4691 stringstream fileName;
4692 fileName << pp->solver().outputDir() << "probeLines_
" << step << ParallelIo::fileExt();
4693 ParallelIo parallelIo(fileName.str(), maia::parallel_io::PIO_REPLACE, pp->solver().mpiComm());
4695 // define all arrays in output file
4696 // TODO labels:PP @ansgar_pp add full set of coordinates to file, or store line position as attributes
4697 for(MInt probeLineId = 0; probeLineId < m_noProbeLines; probeLineId++) {
4698 stringstream varNameBase;
4699 varNameBase << "line_
" << probeLineId;
4700 string coordName = varNameBase.str() + "_coordinates
";
4702 parallelIo.defineArray(PIO_FLOAT, coordName, m_globalNoProbeLineIds[probeLineId]);
4703 parallelIo.setAttribute(probeLineId, "lineId
", coordName);
4705 varNameBase << "_var_
";
4706 for(MInt varId = 0; varId < noVars; varId++) {
4707 stringstream varName;
4708 varName << varNameBase.str() << varId;
4709 parallelIo.defineArray(PIO_FLOAT, varName.str(), m_globalNoProbeLineIds[probeLineId]);
4710 parallelIo.setAttribute(probeLineId, "lineId
", varName.str());
4711 if((MInt)datasetNames.size() == noVars) {
4712 parallelIo.setAttribute(datasetNames[varId], "name
", varName.str());
4717 for(MInt probeLineId = 0; probeLineId < m_noProbeLines; probeLineId++) { // loop over all probe lines
4718 if(m_probeLineDirection[probeLineId] < 0 || m_probeLineDirection[probeLineId] >= nDim) {
4722 m_log << " ^ * probe line timestep
" << globalTimeStep << " for line #
" << probeLineId << endl;
4724 ScratchSpace<MFloat> vars(noVars * std::max(m_noProbeLineIds[probeLineId], 1), "vars
", AT_);
4725 const MFloat* cellVars = nullptr;
4727 // collect local variables
4728 for(MInt i = 0; i < m_noProbeLineIds[probeLineId]; i++) {
4729 const MInt probeId = m_probeLineIds[probeLineId][i];
4730 getSampleVariables(probeId, cellVars, false);
4731 for(MInt varId = 0; varId < noVars; varId++) {
4732 vars[i * noVars + varId] = cellVars[varId];
4736 parallelIo.setOffset(m_noProbeLineIds[probeLineId], m_probeLineOffsets[probeLineId]);
4739 stringstream varNameBase;
4740 varNameBase << "line_
" << probeLineId;
4741 string coordName = varNameBase.str() + "_coordinates
";
4742 parallelIo.writeArray(m_probeLinePositions[probeLineId], coordName);
4744 varNameBase << "_var_
";
4745 for(MInt varId = 0; varId < noVars; varId++) { // write all variables to file
4746 stringstream varName;
4747 varName << varNameBase.str() << varId;
4748 parallelIo.writeArray(&(vars[varId]), varName.str(), noVars);
4762template <MInt nDim, class ppType>
4763void PostProcessing<nDim, ppType>::probeArbitraryLinePost() {
4766 if(m_postprocessFileName != "") {
4767 m_log << " ^ * probe arbitrary line
for file
" << m_postprocessFileName << endl;
4768 TERMM(1, "FIXME untested
");
4769 postData().loadMeanFile(m_postprocessFileName);
4770 probeArbitraryLine();
4772 for(MInt t = m_probeLineStartTimestep; t <= m_probeLineStopTimestep; t += m_probeLineInterval) {
4773 pp->solver().loadSampleVariables(t);
4774 probeArbitraryLine();
4787template <MInt nDim, class ppType>
4788void PostProcessing<nDim, ppType>::probeArbitraryLine() {
4791 using namespace maia::parallel_io;
4793 // check for average timestep or postprocessFileName (globalTimeStep==0 if file is loaded with
4794 if((globalTimeStep >= m_probeLineStartTimestep
4795 && (globalTimeStep - m_probeLineStartTimestep) % m_probeLineInterval == 0
4796 && globalTimeStep <= m_probeLineStopTimestep)
4797 || globalTimeStep == 0) {
4798 MInt noVars = m_noVariables;
4800 noVars = postData().fileNoVars();
4803 MInt step = (isMeanFile()) ? 0 : globalTimeStep;
4804 stringstream fileName;
4805 fileName << pp->solver().outputDir() << "probeArbitraryLines_
" << step << ParallelIo::fileExt();
4806 ParallelIo parallelIo(fileName.str(), maia::parallel_io::PIO_REPLACE, pp->solver().mpiComm());
4808 // define all arrays in output file
4809 for(MInt probeLineId = 0; probeLineId < m_noArbLines; probeLineId++) {
4810 stringstream varNameBase;
4811 varNameBase << "line_
" << probeLineId;
4813 for(MInt dimId = 0; dimId < nDim; dimId++) {
4814 stringstream coordName;
4815 coordName << varNameBase.str() << "_coordinate
" << dimId;
4816 parallelIo.defineArray(PIO_FLOAT, coordName.str(), m_globalNoArbLineIds[probeLineId]);
4817 parallelIo.setAttribute(probeLineId, "lineId
", coordName.str());
4820 varNameBase << "_var_
";
4821 for(MInt varId = 0; varId < noVars; varId++) {
4822 stringstream varName;
4823 varName << varNameBase.str() << varId;
4824 parallelIo.defineArray(PIO_FLOAT, varName.str(), m_globalNoArbLineIds[probeLineId]);
4825 parallelIo.setAttribute(probeLineId, "lineId
", varName.str());
4828 stringstream varName;
4829 varName << varNameBase.str() << noVars;
4830 parallelIo.defineArray(PIO_FLOAT, varName.str(), m_globalNoArbLineIds[probeLineId]);
4831 parallelIo.setAttribute(probeLineId, "lineId
", varName.str());
4834 parallelIo.defineScalar(PIO_FLOAT, "time
");
4835 parallelIo.defineScalar(PIO_INT, "noLines
");
4836 parallelIo.defineScalar(PIO_INT, "noVars
");
4837 parallelIo.defineScalar(PIO_INT, "nDim
");
4839 for(MInt probeLineId = 0; probeLineId < m_noArbLines; probeLineId++) { // loop over all probe lines
4840 m_log << " ^ * probe arbitrary line timestep
" << globalTimeStep << " for line #
" << probeLineId
4843 MInt noIds = m_noArbLineIds[probeLineId];
4844 ScratchSpace<MFloat> vars((noVars + 1) * mMax(noIds, 1), "vars
", AT_); // +1 for heat flux
4847 MFloatScratchSpace pointVars(noVars, AT_, "pointVars
");
4850 // collect local variables
4851 for(MInt i = 0; i < m_noArbLineIds[probeLineId]; i++) {
4852 probeId = m_arbLineIds[probeLineId][i];
4854 for(MInt j = 0; j < nDim; j++) {
4855 position[j] = m_arbLineCoordinates[probeLineId][nDim * i + j];
4859 // for(MInt varId = 0; varId < noVars; varId++) {
4860 // pointVars[varId] = m_averagedVars[probeId][varId];
4863 if(string2enum(pp->solver().solverType()) == MAIA_FINITE_VOLUME
4864 || string2enum(m_solverType) == MAIA_FV_GEQU_PV || string2enum(m_solverType) == MAIA_FV_LEVELSET
4865 || string2enum(m_solverType) == MAIA_FV_MB || string2enum(m_solverType) == MAIA_FV_MB_NEW_RK) {
4866 // reinterpret_cast<FvCartesianSolver<nDim>*>(pp->solver()).getPrimitiveVariables(probeId, position,
4867 // &pointVars[0], 1);
4868 pp->getPrimitiveVariables(probeId, position, &pointVars[0], 1);
4870 TERMM(-1, "ERROR: Not implemented!
");
4874 for(MInt varId = 0; varId < noVars; varId++) {
4875 vars[i * (noVars + 1) + varId] = pointVars[varId];
4877 // only relevant for fv-solver types...
4878 if(string2enum(pp->solver().solverType()) == MAIA_FINITE_VOLUME || string2enum(m_solverType) == MAIA_FV_GEQU_PV
4879 || string2enum(m_solverType) == MAIA_FV_LEVELSET || string2enum(m_solverType) == MAIA_FV_MB
4880 || string2enum(m_solverType) == MAIA_FV_MB_NEW_RK) {
4881 vars[i * (noVars + 1) + noVars] =
4882 // reinterpret_cast<FvCartesianSolver<nDim>*>(pp->solver()).getBoundaryHeatFlux(probeId);
4883 pp->getBoundaryHeatFlux(probeId);
4887 parallelIo.setOffset(m_noArbLineIds[probeLineId], m_arbLineOffsets[probeLineId]);
4890 parallelIo.writeScalar(pp->solver().time(), "time
");
4891 parallelIo.writeScalar(nDim, "nDim
");
4892 parallelIo.writeScalar(m_noArbLines, "noLines
");
4893 parallelIo.writeScalar(noVars + 1, "noVars
");
4895 stringstream varNameBase;
4896 varNameBase << "line_
" << probeLineId;
4897 for(MInt dimId = 0; dimId < nDim; dimId++) {
4898 stringstream coordName;
4899 coordName << varNameBase.str() << "_coordinate
" << dimId;
4900 parallelIo.writeArray(&(m_arbLineCoordinates[probeLineId][dimId]), coordName.str(), nDim);
4903 varNameBase << "_var_
";
4904 for(MInt varId = 0; varId < noVars + 1; varId++) { // write all variables to file
4905 stringstream varName;
4906 varName << varNameBase.str() << varId;
4907 parallelIo.writeArray(&(vars[varId]), varName.str(), noVars + 1);
4921template <MInt nDim, class ppType>
4922void PostProcessing<nDim, ppType>::probeSlicePre() {
4925 if(!pp->solver().grid().isActive()) return;
4927 // TODO labels:PP recombine probeSlicePre/Post
4928 for(MInt t = m_probeSliceStartTimestep; t <= m_probeSliceStopTimestep; t += m_probeSliceInterval) {
4929 m_log << " ^ * probe slices
for timestep
" << t << endl;
4930 pp->solver().loadSampleVariables(t);
4943template <MInt nDim, class ppType>
4944void PostProcessing<nDim, ppType>::probeSlicePost() {
4947 if(!pp->solver().grid().isActive()) return;
4949 m_log << " ^ * probe slice
for file
" << m_postprocessFileName << endl;
4950 postData().loadMeanFile(m_postprocessFileName);
4954template <MInt nDim, class ppType>
4955void PostProcessing<nDim, ppType>::probeSliceIn() {
4958 if(!pp->solver().grid().isActive()) return;
4960 if(m_statisticCombustionAnalysis) {
4961 pp->solver().calculateHeatRelease();
4975template <MInt nDim, class ppType>
4976void PostProcessing<nDim, ppType>::probeSlice() {
4979 if(!pp->solver().grid().isActive()) return;
4981 using namespace maia::parallel_io;
4983 IF_CONSTEXPR(nDim == 2) return;
4985 const MBool isSliceStep = (globalTimeStep >= m_probeSliceStartTimestep
4986 && (globalTimeStep - m_probeSliceStartTimestep) % m_probeSliceInterval == 0
4987 && globalTimeStep <= m_probeSliceStopTimestep)
4988 || globalTimeStep == 0 || isMeanFile();
4990 cerr << "isSliceStep:
" << pp->solver().domainId() << " " << isSliceStep << endl;
4991 cerr << pp->solver().domainId() << " " << m_probeSliceStartTimestep << " " << m_probeSliceStopTimestep << " "
4992 << m_probeSliceInterval << endl;
4993 cerr << (MBool)(globalTimeStep >= m_probeSliceStartTimestep) << " "
4994 << (MBool)((globalTimeStep - m_probeSliceStartTimestep) % m_probeSliceInterval == 0) << " "
4995 << (MBool)(globalTimeStep <= m_probeSliceStopTimestep) << " " << (MBool)(globalTimeStep == 0) << " "
4996 << (MBool)(isMeanFile()) << endl;
4998 // Implementation of CartesianGrid::createGridSlice()
4999 if(m_sliceAiaFileFormat) {
5001 MInt noVars = std::accumulate(m_noSliceVars.begin(), m_noSliceVars.end(), 0);
5002 MUint noVarIds = m_noSliceVars.size();
5005 noVars = postData().fileNoVars();
5008 ASSERT(!m_sliceVarIds.empty(), "");
5009 pp->solver().calcSamplingVariables(m_sliceVarIds, false);
5012 const MInt step = (isMeanFile()) ? 0 : globalTimeStep;
5014 for(MInt probeSliceId = 0; probeSliceId < m_noProbeSlices; probeSliceId++) {
5015 const MInt noIds = m_noProbeSliceIds[probeSliceId];
5016 MFloatScratchSpace vars(max(noIds, 1), noVars, AT_, "vars
");
5018 // collect local variables
5019 for(MInt i = 0; i < m_noProbeSliceIds[probeSliceId]; i++) {
5020 const MInt probeId = m_probeSliceIds[probeSliceId][i];
5023 for(MUint v = 0; v < noVarIds; v++) {
5024 ASSERT(!isMeanFile() || v < 1, "Error
for probeSlice of mean file
");
5025 const MInt varId = (isMeanFile()) ? -1 : m_sliceVarIds[v];
5026 calcSamplingVar(probeId, varId, &vars(i, varOffset));
5027 varOffset += m_noSliceVars[v];
5030 saveSliceAiaFileFormat(step, noVars, vars, probeSliceId);
5034 // check for average timestep or postprocessFileName (globalTimeStep==0 if file is loaded with
5036 MInt noVars = m_noVariables;
5038 noVars = postData().fileNoVars();
5041 MInt step = (isMeanFile()) ? 0 : globalTimeStep;
5042 stringstream fileName;
5043 fileName << pp->solver().outputDir() << "probeSlices_
" << step << ParallelIo::fileExt();
5044 ParallelIo parallelIo(fileName.str(), maia::parallel_io::PIO_REPLACE, pp->solver().mpiComm());
5046 // define all arrays in output file
5047 for(MInt probeSliceId = 0; probeSliceId < m_noProbeSlices; probeSliceId++) {
5048 stringstream varNameBase;
5049 varNameBase << "slice_
" << probeSliceId;
5050 string coordName = varNameBase.str() + "_coordinates0
";
5052 parallelIo.defineArray(PIO_FLOAT, coordName, m_globalNoProbeSliceIds[probeSliceId]);
5053 parallelIo.setAttribute(probeSliceId, "sliceId
", coordName);
5055 coordName = varNameBase.str() + "_coordinates1
";
5057 parallelIo.defineArray(PIO_FLOAT, coordName, m_globalNoProbeSliceIds[probeSliceId]);
5058 parallelIo.setAttribute(probeSliceId, "sliceId
", coordName);
5060 varNameBase << "_var_
";
5061 for(MInt varId = 0; varId < noVars; varId++) {
5062 stringstream varName;
5063 varName << varNameBase.str() << varId;
5064 parallelIo.defineArray(PIO_FLOAT, varName.str(), m_globalNoProbeSliceIds[probeSliceId]);
5065 parallelIo.setAttribute(probeSliceId, "sliceId
", varName.str());
5069 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) { // loop over all slices
5070 if((m_probeSliceDir[2 * sliceId] < 0 || m_probeSliceDir[2 * sliceId] >= nDim)
5071 || (m_probeSliceDir[2 * sliceId + 1] < 0 || m_probeSliceDir[2 * sliceId + 1] >= nDim)
5072 || (m_probeSliceDir[2 * sliceId] == m_probeSliceDir[2 * sliceId + 1])) {
5076 m_log << " ^ * probe slice timestep
" << globalTimeStep << " for probe slice #
" << sliceId << endl;
5078 MInt noIds = m_noProbeSliceIds[sliceId];
5079 if(noIds == 0) noIds = 1; // avoid dereferencing array with length 0 in writeArray(...)
5080 ScratchSpace<MFloat> vars(noVars * noIds, "vars
", AT_);
5082 const MFloat* cellVars = nullptr;
5084 // collect local variables
5085 for(MInt i = 0; i < m_noProbeSliceIds[sliceId]; i++) {
5086 const MInt probeId = m_probeSliceIds[sliceId][i];
5087 getSampleVariables(probeId, cellVars, false);
5088 for(MInt varId = 0; varId < noVars; varId++) {
5089 vars[i * noVars + varId] = cellVars[varId];
5093 parallelIo.setOffset(m_noProbeSliceIds[sliceId], m_probeSliceOffsets[sliceId]);
5096 stringstream varNameBase;
5097 varNameBase << "slice_
" << sliceId;
5098 string coordName = varNameBase.str() + "_coordinates0
";
5099 parallelIo.writeArray(&(m_probeSlicePositions[sliceId][0]), coordName, 2);
5100 coordName = varNameBase.str() + "_coordinates1
";
5101 parallelIo.writeArray(&(m_probeSlicePositions[sliceId][1]), coordName, 2);
5103 varNameBase << "_var_
";
5104 for(MInt varId = 0; varId < noVars; varId++) { // write all variables to file
5105 stringstream varName;
5106 varName << varNameBase.str() << varId;
5107 parallelIo.writeArray(&(vars[varId]), varName.str(), noVars);
5122template <MInt nDim, class ppType>
5123void PostProcessing<nDim, ppType>::probeArbitrarySlicePost() {
5126 if(m_postprocessFileName != "") {
5127 m_log << " ^ * probe arbitrary slice
for file
" << m_postprocessFileName << endl;
5128 TERMM(1, "FIXME untested
");
5129 postData().loadMeanFile(m_postprocessFileName);
5130 probeArbitrarySlice();
5132 for(MInt t = m_averageStartTimestep; t <= m_averageStopTimestep; t += m_averageInterval) {
5133 pp->solver().loadSampleVariables(t);
5134 probeArbitrarySlice();
5147template <MInt nDim, class ppType>
5148void PostProcessing<nDim, ppType>::probeArbitrarySlice() {
5151 using namespace maia::parallel_io;
5153 // check for average timestep or postprocessFileName (globalTimeStep==0 if file is loaded with
5154 if((globalTimeStep >= m_averageStartTimestep && (globalTimeStep - m_averageStartTimestep) % m_averageInterval == 0
5155 && globalTimeStep <= m_averageStopTimestep)
5156 || globalTimeStep == 0) {
5157 MInt noVars = m_noVariables;
5159 noVars = postData().fileNoVars();
5162 const MInt step = (isMeanFile()) ? 0 : globalTimeStep;
5163 stringstream fileName;
5164 fileName << pp->solver().outputDir() << "probeArbitrarySlices_
" << step << ParallelIo::fileExt();
5165 ParallelIo parallelIo(fileName.str(), maia::parallel_io::PIO_REPLACE, pp->solver().mpiComm());
5167 if(!m_spatialAveraging) {
5168 cout << globalDomainId() << ":Creating
" << m_noArbSlices << " arbitary slices....
";
5169 m_log << "Creating
" << m_noArbSlices << " arbitary slices...
";
5171 cout << globalDomainId() << ":Creating
" << m_noArbSlices << " arbitary slices with spatial averaging....
";
5172 m_log << "Creating
" << m_noArbSlices << " arbitary slices with spatial averaging....
";
5175 // define all arrays in output file
5176 for(MInt probeSliceId = 0; probeSliceId < m_noArbSlices; probeSliceId++) {
5177 stringstream varNameBase;
5178 varNameBase << "slice_
" << probeSliceId;
5180 for(MInt dimId = 0; dimId < nDim; dimId++) {
5181 stringstream coordName;
5182 coordName << varNameBase.str() << "_coordinate
" << dimId;
5183 parallelIo.defineArray(PIO_FLOAT, coordName.str(), m_globalNoArbSlicePoints[probeSliceId]);
5184 parallelIo.setAttribute(probeSliceId, "sliceId
", coordName.str());
5187 varNameBase << "_var_
";
5188 for(MInt varId = 0; varId < noVars; varId++) {
5189 stringstream varName;
5190 varName << varNameBase.str() << varId;
5191 parallelIo.defineArray(PIO_FLOAT, varName.str(), m_globalNoArbSlicePoints[probeSliceId]);
5192 parallelIo.setAttribute(probeSliceId, "sliceId
", varName.str());
5195 // if (m_spatialAveraging) {
5196 // stringstream spatialAveragingName;
5198 // parallelIo.defineArray(PIO_FLOAT, spatialAveragingName.str(), noVars);
5199 // parallelIo.setAttribute(probeSliceId, "sliceId
", spatialAveragingName.str());
5203 for(MInt probeSliceId = 0; probeSliceId < m_noArbSlices; probeSliceId++) { // loop over all probe slices
5204 m_log << " ^ * probe arbitrary slice timestep
" << globalTimeStep << " for slice #
" << probeSliceId
5207 MInt noPoints = m_noArbSlicePoints[probeSliceId];
5211 } // avoid dereferencing array with length 0 in writeArray(...)
5212 MFloatScratchSpace vars(noVars * noPoints, AT_, "vars
");
5213 MFloatScratchSpace pointVars(noVars, AT_, "pointVars
");
5216 // MFloatScratchSpace pointVars(noVars, AT_, "pointVars
");
5219 // collect local variables
5220 for(MInt i = 0; i < m_noArbSlicePoints[probeSliceId]; i++) {
5221 probeId = m_arbSliceIds[probeSliceId][i];
5223 for(MInt j = 0; j < nDim; j++) {
5224 position[j] = m_arbSliceCoordinates[probeSliceId][nDim * i + j];
5229 // for(MInt varId = 0; varId < noVars; varId++) {
5230 // pointVars[varId] = m_averagedVars[probeId][varId];
5233 pp->solver().getInterpolatedVariables(probeId, position, &pointVars[0]);
5236 for(MInt varId = 0; varId < noVars; varId++) {
5237 vars[i * noVars + varId] = pointVars[varId];
5241 parallelIo.setOffset(m_noArbSlicePoints[probeSliceId], m_arbSliceOffsets[probeSliceId]);
5244 stringstream varNameBase;
5245 varNameBase << "slice_
" << probeSliceId;
5246 for(MInt dimId = 0; dimId < nDim; dimId++) {
5247 stringstream coordName;
5248 coordName << varNameBase.str() << "_coordinate
" << dimId;
5249 parallelIo.writeArray(&(m_arbSliceCoordinates[probeSliceId][dimId]), coordName.str(), nDim);
5252 varNameBase << "_var_
";
5253 for(MInt varId = 0; varId < noVars; varId++) { // write all variables to file
5254 stringstream varName;
5255 varName << varNameBase.str() << varId;
5256 parallelIo.writeArray(&(vars[varId]), varName.str(), noVars);
5260 // if (m_spatialAveraging) {
5262 // spatialAveraging.fill(0.0);
5264 // MInt globalNoIds = m_noArbSlicePoints[probeSliceId];
5265 // if (1 < pp->solver().noDomains()) {
5266 // MPI_Allreduce(MPI_IN_PLACE, &globalNoIds, 1, MPI_INT, MPI_SUM, pp->solver().mpiComm(), AT_,
5267 // "MPI_IN_PLACE
", "globalNoIds
" );
5270 // for (MInt varId = 0; varId < noVars; ++varId) {
5271 // for (MInt i = 0; i < m_noArbSlicePoints[probeSliceId]; ++i) {
5272 // spatialAveraging[varId] += vars[(i * noVars) + varId];
5275 // if (0 < m_noArbSlicePoints[probeSliceId]) {
5276 // spatialAveraging[varId] /= globalNoIds;
5279 // if (1 < pp->solver().noDomains()) {
5280 // MPI_Allreduce(MPI_IN_PLACE, &spatialAveraging[varId], 1, MPI_DOUBLE, MPI_SUM,
5281 // pp->solver().mpiComm(), AT_, "MPI_IN_PLACE
", "spatialAveraging[varId]
" );
5286 // if (0 == pp->solver().domainId()) {
5287 // parallelIo.setOffset(noVars, 0);
5289 // parallelIo.setOffset(0, 0);
5291 // stringstream spatialAveragingName;
5293 // parallelIo.writeArray(&spatialAveraging[0], spatialAveragingName.str());
5313template <MInt nDim, class ppType>
5314void PostProcessing<nDim, ppType>::averageSolutionsSlice() {
5317 if(!pp->solver().grid().isActive()) return;
5319 using namespace maia::parallel_io;
5321 // loop through slices
5322 for(MInt sliceId = 0; sliceId < m_noProbeSlices; sliceId++) {
5323 stringstream initFileName;
5324 initFileName << pp->solver().outputDir() << "probeSlice_
" << m_sliceAxis[sliceId] << "_
"
5325 << m_sliceIntercept[sliceId] << "_
" << m_averageStartTimestep << ParallelIo::fileExt();
5327 ParallelIo initParallelIo(initFileName.str(), PIO_READ, pp->solver().mpiComm());
5329 // domain decomposition
5330 ParallelIo::size_type offset, totalCount;
5331 initParallelIo.readScalar(&totalCount, "noCells
");
5332 MInt localCount = totalCount / pp->solver().noDomains();
5333 if(pp->solver().domainId() == pp->solver().noDomains() - 1) {
5334 localCount += (totalCount - pp->solver().noDomains() * localCount);
5337 initParallelIo.calcOffset(localCount, &offset, &totalCount, pp->solver().mpiComm());
5339 MInt datasetSize = 0;
5340 vector<MString> datasetNames;
5342 // get dataset names
5343 vector<MString> variableNames = initParallelIo.getDatasetNames();
5345 for(MUint var = 0; var < variableNames.size(); var++) { // search for all datasets named variables0, variables1...
5347 if(variableNames[var].find("variables
") != MString::npos) datasetSize++;
5350 for(MInt var = 0; var < datasetSize; var++) {
5351 string tmpDatasetName;
5352 stringstream variableName;
5353 variableName << "variables
" << var;
5354 initParallelIo.getAttribute(&tmpDatasetName, "name
", variableName.str());
5355 datasetNames.push_back(tmpDatasetName);
5358 // Initialize arrays
5359 MFloatScratchSpace tmpVar(localCount, AT_, "tmpVar
");
5360 MFloatScratchSpace tmpSum(datasetSize, localCount, AT_, "tmpSum
");
5361 MFloatScratchSpace tmpSquare(datasetSize, localCount, AT_, "tmpSquare
");
5363 for(MInt var = 0; var < datasetSize; var++) {
5364 for(MInt cellId = 0; cellId < localCount; cellId++) {
5365 tmpSum(var, cellId) = 0;
5366 tmpSquare(var, cellId) = 0;
5370 // loop through timesteps
5371 for(MInt t = m_averageStartTimestep; t <= m_averageStopTimestep; t += m_averageInterval) {
5372 stringstream fileName;
5373 fileName << pp->solver().outputDir() << "probeSlice_
" << m_sliceAxis[sliceId] << "_
" << m_sliceIntercept[sliceId]
5374 << "_
" << t << ParallelIo::fileExt();
5375 ParallelIo parallelIo(fileName.str(), PIO_READ, pp->solver().mpiComm());
5376 parallelIo.setOffset(localCount, offset);
5378 // loop through datasets
5379 for(MInt var = 0; var < datasetSize; var++) {
5380 stringstream variableName;
5381 variableName << "variables
" << var;
5382 parallelIo.readArray(tmpVar.getPointer(), variableName.str());
5384 for(MInt cellId = 0; cellId < localCount; cellId++) {
5385 tmpSum[cellId * datasetSize + var] += tmpVar[cellId];
5386 tmpSquare[cellId * datasetSize + var] += tmpVar[cellId] * tmpVar[cellId];
5390 MFloat weight = 1.0 / (((m_averageStopTimestep - m_averageStartTimestep) / m_averageInterval) + 1);
5391 for(MInt var = 0; var < datasetSize; var++) {
5392 for(MInt cellId = 0; cellId < localCount; cellId++) {
5393 tmpSum[cellId * datasetSize + var] = tmpSum[cellId * datasetSize + var] * weight;
5394 tmpSquare[cellId * datasetSize + var] =
5395 tmpSquare[cellId * datasetSize + var] * weight
5396 - tmpSum[cellId * datasetSize + var] * tmpSum[cellId * datasetSize + var];
5401 stringstream solutionFileName;
5402 solutionFileName << pp->solver().outputDir() << "Mean_probeSlice_
" << m_sliceAxis[sliceId] << "_
"
5403 << m_sliceIntercept[sliceId] << "_
" << m_averageStartTimestep << "-
" << m_averageStopTimestep
5404 << ParallelIo::fileExt();
5406 initParallelIo.getAttribute(&gridName, "gridFile
", "");
5407 ParallelIo solutionParallelIo(solutionFileName.str(), PIO_REPLACE, pp->solver().mpiComm());
5408 solutionParallelIo.setAttribute(gridName, "gridFile
");
5409 solutionParallelIo.defineScalar(PIO_INT, "noCells
");
5411 for(MInt var = 0; var < datasetSize; var++) {
5412 stringstream varName;
5413 varName << "variables
" << var;
5414 solutionParallelIo.defineArray(PIO_FLOAT, varName.str(), totalCount);
5415 solutionParallelIo.setAttribute(datasetNames[var] + "m
", "name
", varName.str());
5417 for(MInt var = 0; var < datasetSize; var++) {
5418 stringstream varName;
5419 varName << "variables
" << datasetSize + var;
5420 solutionParallelIo.defineArray(PIO_FLOAT, varName.str(), totalCount);
5421 solutionParallelIo.setAttribute(datasetNames[var] + "'", "name", varName.str());
5424 solutionParallelIo.setOffset(localCount, offset);
5425 solutionParallelIo.writeScalar(totalCount, "noCells");
5427 for(MInt var = 0; var < datasetSize; var++) {
5428 stringstream varName;
5429 varName << "variables" << var;
5430 solutionParallelIo.writeArray(&(tmpSum[var]), varName.str(), datasetSize);
5432 for(MInt var = 0; var < datasetSize; var++) {
5433 stringstream varName;
5434 varName << "variables" << datasetSize + var;
5435 solutionParallelIo.writeArray(&(tmpSquare[var]), varName.str(), datasetSize);
5449template <MInt nDim, class ppType>
5450void PostProcessing<nDim, ppType>::getSampleVariables(MInt cellId, const MFloat*& vars, MBool mode) {
5454 pp->solver().getSampleVariables(cellId, vars);
5457 // MInt dataId = solver2DataIdParent(cellId);
5458 // vars = &postData().a_variable(dataId, 0);
5459 // TODO labels:PP is this correct, or use dataId?
5460 vars = &postData().a_averagedVariable(cellId, 0);
5462 pp->solver().getSampleVariables(cellId, vars);
5466template <MInt nDim, class ppType>
5467void PostProcessing<nDim, ppType>::getSampleVariables(MInt const cellId, std::vector<MFloat>& vars) {
5469 // TODO labels:PP Make this equivalent to getSampleVariables(MInt cellId, const MFloat*& vars, MBool mode) ?
5470 pp->solver().getSampleVariables(cellId, vars);
5474template <MInt nDim, class ppType>
5475void PostProcessing<nDim, ppType>::calcSamplingVar(const MInt cellId, const MInt sampleVarId, MFloat* const vars) {
5479 TERMM_IF_NOT_COND(sampleVarId == -1, "Error: sampleVarId for mean file needs to be -1.");
5480 MInt dataId = convertIdParent(pp->solver(), postData(), cellId);
5481 std::copy_n(&postData().a_variable(dataId, 0), postData().fileNoVars(), vars);
5483 const MBool interpolate = false;
5484 pp->solver().calcSamplingVarAtPoint(nullptr, cellId, sampleVarId, vars, interpolate);
5497template <MInt nDim, class ppType>
5498void PostProcessing<nDim, ppType>::spatialAveragingPost() {
5501 if(m_postprocessFileName != "") {
5502 m_log << " ^ * spatial averaging for file " << m_postprocessFileName << endl;
5503 TERMM(1, "FIXME untested");
5504 postData().loadMeanFile(m_postprocessFileName);
5507 if(pp->solver().domainId() == 0) {
5508 for(MInt i = 0; i < pp->solver().noDomains(); i++) {
5509 m_spatialVarsDispls[i] = (postData().fileNoVars() + 1) * m_spatialDispls[i];
5510 m_spatialVarsRecvcnts[i] = (postData().fileNoVars() + 1) * m_spatialRecvcnts[i];
5517 for(MInt t = m_averageStartTimestep; t <= m_averageStopTimestep; t += m_averageInterval) {
5518 pp->solver().loadSampleVariables(t);
5533template <MInt nDim, class ppType>
5534void PostProcessing<nDim, ppType>::spatialAveraging() {
5537 if((globalTimeStep >= m_averageStartTimestep && (globalTimeStep - m_averageStartTimestep) % m_averageInterval == 0
5538 && globalTimeStep <= m_averageStopTimestep)
5539 || globalTimeStep == 0) {
5540 m_log << " ^ * spatial averaging timestep " << globalTimeStep << "\n";
5542 MInt noCells = pp->solver().grid().noInternalCells();
5543 MInt noVars = m_noVariables;
5545 noVars = postData().fileNoVars();
5549 const MFloat* cellVars = 0;
5551 MInt dir1 = m_spatialDirection1;
5552 MInt dir2 = m_spatialDirection2;
5554 if(dir1 == -1 && dir2 == -1) { // single point
5556 ScratchSpace<MFloat> spatial_sum(noVars + 1, AT_, "spatial_sum");
5557 for(MInt i = 0; i < (noVars + 1); i++) {
5562 for(MInt cellId = 0; cellId < noCells; cellId++) {
5563 if(pp->solver().grid().tree().noChildren(cellId) > 0) continue; // check if leaf cell
5565 getSampleVariables(cellId, cellVars, false);
5567 lvlCell = pp->solver().grid().tree().level(cellId);
5569 for(MInt varId = 0; varId < noVars; varId++) {
5570 spatial_sum[varId] += 1 / m_spatialWeightSinglePoint * (m_spatialLvlWeight[lvlCell] * cellVars[varId]);
5573 spatial_sum[noVars] = m_spatialWeightSinglePoint;
5576 MFloat globalWeight = m_spatialWeightSinglePoint;
5577 MPI_Allreduce(MPI_IN_PLACE, &globalWeight, 1, MPI_DOUBLE, MPI_SUM, pp->solver().mpiComm(), AT_, "MPI_IN_PLACE",
5580 for(MInt varId = 0; varId < noVars; varId++) {
5581 spatial_sum[varId] = m_spatialWeightSinglePoint / globalWeight * spatial_sum[varId];
5584 MPI_Allreduce(MPI_IN_PLACE, spatial_sum.begin(), noVars, MPI_DOUBLE, MPI_SUM, pp->solver().mpiComm(), AT_,
5585 "MPI_IN_PLACE", "spatial_sum.begin()"); // allreduce not nececcary
5587 spatial_sum[noVars] = globalWeight;
5590 // TODO labels:PP,IO
5592 if(pp->solver().domainId() == 0) {
5593 stringstream fileName;
5594 fileName << "spatialAveraging_singlePoint.txt";
5596 file.open((fileName.str()).c_str());
5598 for(MInt varId = 0; varId < noVars; varId++) {
5599 file << spatial_sum[varId] << "\t";
5604 } else if((dir1 == -1 && dir2 != -1) || (dir2 == -1 && dir1 != -1)) { // line
5605 ScratchSpace<MFloat> spatial_sum(m_spatialLineNoCells * (noVars + 1), AT_, "spatial_sum"); // member?
5606 if(m_spatialLineAllVars == 0) {
5607 mAlloc(m_spatialLineAllVars, m_spatialCoordSum * (noVars + 1), "m_spatialLineAllVars", AT_);
5610 for(MInt i = 0; i < m_spatialLineNoCells * (noVars + 1); i++) {
5614 MInt coordId, index;
5615 MFloat mapCoord, cellCoord, weight;
5616 for(MInt cellId = 0; cellId < noCells; cellId++) {
5617 if(pp->solver().grid().tree().noChildren(cellId) > 0) continue; // check if leaf cell
5619 getSampleVariables(cellId, cellVars, false);
5621 cellCoord = pp->solver().a_coordinate(cellId, dir1);
5622 map<MFloat, MFloat, coord_comp_1d_>::const_iterator it3 = m_spatialLineCellToMap.find(cellCoord);
5624 if(it3 != m_spatialLineCellToMap.end()) {
5625 mapCoord = (*it3).second;
5627 mapCoord = cellCoord;
5630 map<MFloat, MInt, coord_comp_1d_>::iterator it_coord = m_spatialLineCoordinates.find(mapCoord);
5631 coordId = distance(m_spatialLineCoordinates.begin(), it_coord);
5633 if(coordId >= m_spatialLineNoCells) {
5637 lvlCell = pp->solver().grid().tree().level(cellId);
5638 weight = spatial_sum[coordId * (noVars + 1) + noVars] + m_spatialLvlWeight[lvlCell]; // new summed weight
5640 for(MInt varId = 0; varId < noVars; varId++) {
5641 index = coordId * (noVars + 1) + varId;
5643 spatial_sum[index] = 1 / weight
5644 * (m_spatialLvlWeight[lvlCell] * cellVars[varId]
5645 + spatial_sum[coordId * (noVars + 1) + noVars] * spatial_sum[index]);
5647 spatial_sum[coordId * (noVars + 1) + noVars] = weight;
5650 MPI_Gatherv(spatial_sum.begin(), m_spatialLineNoCells * (noVars + 1), MPI_DOUBLE, m_spatialLineAllVars,
5651 m_spatialVarsRecvcnts, m_spatialVarsDispls, MPI_DOUBLE, 0, pp->solver().mpiComm(), AT_,
5652 "spatial_sum.begin()", "m_spatialLineAllVars");
5654 ScratchSpace<MFloat> final_vars(m_spatialGlobalLineCoordinates.size() * (noVars + 1), AT_,
5655 "final_vars"); // make member?
5656 for(MUint varId = 0; varId < m_spatialGlobalLineCoordinates.size() * (noVars + 1); varId++) {
5657 final_vars[varId] = 0;
5659 if(pp->solver().domainId() == 0) {
5660 for(MInt cellId = 0; cellId < m_spatialCoordSum; cellId++) { // assemble gathered variables
5661 mapCoord = m_spatialLineAllCoord[cellId];
5662 map<MFloat, MInt, coord_comp_1d_>::iterator it_coord = m_spatialGlobalLineCoordinates.find(mapCoord);
5664 if(it_coord == m_spatialGlobalLineCoordinates.end()) {
5665 it_coord = m_spatialGlobalLineCoordinates.find((*m_spatialGlobalLineCellToMap.find(mapCoord)).second);
5667 if(it_coord == m_spatialGlobalLineCoordinates.end()) {
5671 coordId = distance(m_spatialGlobalLineCoordinates.begin(), it_coord);
5673 index = coordId * (noVars + 1);
5674 weight = m_spatialLineAllVars[cellId * (noVars + 1) + noVars];
5676 for(MInt varId = 0; varId < noVars; varId++) {
5677 final_vars[index + varId] = 1 / (final_vars[index + noVars] + weight)
5678 * (weight * m_spatialLineAllVars[cellId * (noVars + 1) + varId]
5679 + final_vars[index + noVars] * final_vars[index + varId]);
5681 final_vars[index + noVars] += weight;
5685 MFloat summed_weight = 0;
5686 for(MUint i = 0; i < m_spatialGlobalLineCoordinates.size(); i++) {
5687 summed_weight += final_vars[i * (noVars + 1) + noVars];
5689 m_log << "spatial averaging: summed_weight " << summed_weight << endl;
5694 if(pp->solver().domainId() == 0) {
5695 stringstream fileName;
5696 fileName << "spatialAveraging_line_" << dir1 << "_" << globalTimeStep << ".txt";
5698 file.open((fileName.str()).c_str());
5701 map<MFloat, MInt, coord_comp_1d_>::const_iterator it_ = m_spatialGlobalLineCoordinates.begin();
5702 for(MUint i = 0; i < m_spatialGlobalLineCoordinates.size(); i++) {
5703 file << (*it_).first << " ";
5704 for(MInt varId = 0; varId < noVars + 1; varId++) {
5705 file << final_vars[i * (noVars + 1) + varId] << " ";
5713 if(dir1 < 0 || dir1 > nDim - 1 || dir2 < 0 || dir2 > nDim - 1 || dir1 == dir2) {
5714 m_log << "invalid spatial directions " << dir1 << " " << dir2 << endl;
5718 ScratchSpace<MFloat> spatial_sum(m_spatialPlaneNoCells * (noVars + 1), AT_, "spatial_sum"); // member?
5720 if(m_spatialPlaneAllVars == 0) {
5721 mAlloc(m_spatialPlaneAllVars, m_spatialCoordSum * (noVars + 1), "m_spatialPlaneAllVars", AT_);
5724 for(MInt i = 0; i < m_spatialPlaneNoCells * (noVars + 1); i++) {
5728 MInt coordId, index;
5730 pair<MFloat, MFloat> cellCoord, mapCoord;
5731 for(MInt cellId = 0; cellId < noCells; cellId++) {
5732 if(pp->solver().grid().tree().noChildren(cellId) > 0) continue; // check if leaf cell
5734 getSampleVariables(cellId, cellVars, false);
5736 cellCoord = make_pair(pp->solver().a_coordinate(cellId, dir1), pp->solver().a_coordinate(cellId, dir2));
5737 map<pair<MFloat, MFloat>, pair<MFloat, MFloat>, coord_comp_2d_>::const_iterator it3 =
5738 m_spatialPlaneCellToMap.find(cellCoord);
5740 if(it3 != m_spatialPlaneCellToMap.end()) {
5741 mapCoord = (*it3).second;
5743 mapCoord = cellCoord;
5746 map<pair<MFloat, MFloat>, MInt, coord_comp_2d_>::iterator it_coord = m_spatialPlaneCoordinates.find(mapCoord);
5747 coordId = distance(m_spatialPlaneCoordinates.begin(), it_coord);
5749 if(coordId >= m_spatialPlaneNoCells) {
5753 lvlCell = pp->solver().grid().tree().level(cellId);
5754 weight = spatial_sum[coordId * (noVars + 1) + noVars] + m_spatialLvlWeight[lvlCell]; // new summed weight
5756 for(MInt varId = 0; varId < noVars; varId++) {
5757 index = coordId * (noVars + 1) + varId;
5759 spatial_sum[index] = 1 / weight
5760 * (m_spatialLvlWeight[lvlCell] * cellVars[varId]
5761 + spatial_sum[coordId * (noVars + 1) + noVars] * spatial_sum[index]);
5763 spatial_sum[coordId * (noVars + 1) + noVars] = weight;
5766 MPI_Gatherv(spatial_sum.begin(), m_spatialPlaneNoCells * (noVars + 1), MPI_DOUBLE, m_spatialPlaneAllVars,
5767 m_spatialVarsRecvcnts, m_spatialVarsDispls, MPI_DOUBLE, 0, pp->solver().mpiComm(), AT_,
5768 "spatial_sum.begin()", "m_spatialPlaneAllVars");
5770 ScratchSpace<MFloat> final_vars(m_spatialGlobalPlaneCoordinates.size() * (noVars + 1), AT_,
5771 "final_vars"); // make member?
5772 for(MUint varId = 0; varId < m_spatialGlobalPlaneCoordinates.size() * (noVars + 1); varId++) {
5773 final_vars[varId] = 0;
5775 if(pp->solver().domainId() == 0) {
5776 for(MInt cellId = 0; cellId < m_spatialCoordSum; cellId++) { // assemble gathered variables
5777 mapCoord = make_pair(m_spatialPlaneAllCoord[2 * cellId], m_spatialPlaneAllCoord[2 * cellId + 1]);
5778 map<pair<MFloat, MFloat>, MInt, coord_comp_2d_>::iterator it_coord =
5779 m_spatialGlobalPlaneCoordinates.find(mapCoord);
5781 if(it_coord == m_spatialGlobalPlaneCoordinates.end()) {
5782 it_coord = m_spatialGlobalPlaneCoordinates.find((*m_spatialGlobalPlaneCellToMap.find(mapCoord)).second);
5784 if(it_coord == m_spatialGlobalPlaneCoordinates.end()) {
5788 coordId = distance(m_spatialGlobalPlaneCoordinates.begin(), it_coord);
5790 index = coordId * (noVars + 1);
5791 weight = m_spatialPlaneAllVars[cellId * (noVars + 1) + noVars];
5793 for(MInt varId = 0; varId < noVars; varId++) {
5794 final_vars[index + varId] = 1 / (final_vars[index + noVars] + weight)
5795 * (weight * m_spatialPlaneAllVars[cellId * (noVars + 1) + varId]
5796 + final_vars[index + noVars] * final_vars[index + varId]);
5798 final_vars[index + noVars] += weight;
5802 MFloat summed_weight = 0;
5803 for(MUint i = 0; i < m_spatialGlobalPlaneCoordinates.size(); i++) {
5804 summed_weight += final_vars[i * (noVars + 1) + noVars];
5806 m_log << "spatial averaging: summed_weight " << summed_weight << endl;
5811 if(pp->solver().domainId() == 0) {
5812 stringstream fileName;
5813 fileName << "spatialAveraging_plane_" << dir1 << "_" << dir2 << "_" << globalTimeStep << ".txt";
5815 file.open((fileName.str()).c_str());
5819 file << "coordinate\t" << "um\t\t" << "vm\t\t";
5820 IF_CONSTEXPR(nDim == 3) {
5823 file << "rhom\t\t" << "pm\t\t" << "summed weight" << endl;
5826 auto it_ = m_spatialGlobalPlaneCoordinates.begin();
5827 for(MUint i = 0; i < m_spatialGlobalPlaneCoordinates.size(); i++) {
5828 file << (*it_).first.first << " " << (*it_).first.second << " ";
5829 for(MInt varId = 0; varId < noVars + 1; varId++) {
5830 file << final_vars[i * (noVars + 1) + varId] << " ";
5853template <MInt nDim, class ppType>
5854void PostProcessing<nDim, ppType>::createCellToMap1D(map<MFloat, MInt, coord_comp_1d_>& coordinates,
5855 map<MFloat, MFloat, coord_comp_1d_>& cell_to_map) {
5858 MFloat length0 = m_gridProxy->cellLengthAtLevel(0);
5859 const MInt minLvl = m_gridProxy->minLevel();
5861 multimap<MFloat, MFloat>
5862 map_to_cell; // store small cells with key equal to the large cell coordinate in which to average
5863 pair<multimap<MFloat, MFloat>::iterator, multimap<MFloat, MFloat>::iterator> range;
5864 multimap<MFloat, MFloat>::iterator it_multi, it_multi2;
5869 // find largest cells in the cut planes
5870 auto it = coordinates.begin();
5871 auto it2 = coordinates.begin();
5872 auto itTmp = coordinates.begin();
5873 auto itTmp2 = coordinates.begin();
5875 for(it = it2 = coordinates.begin(); it != coordinates.end();) {
5876 if(it2 == coordinates.end()) {
5878 if(it == coordinates.end()) break;
5882 } else if((*it).second == (*it2).second) { // same level
5888 if((*it).second > (*it2).second) {
5894 if(fabs(c1 - c2) > length0 / FPOW2(minLvl)) {
5900 } else if(((c1 < c2) && (c1 + 0.5 * length0 / FPOW2((*it).second) > c2))
5901 || ((c1 > c2) && (c1 - 0.5 * length0 / FPOW2((*it).second) < c2))) {
5902 range = map_to_cell.equal_range((*it2).first);
5903 c = distance(range.first, range.second);
5906 for(it_multi = range.first; c != 0 && r_count != c; r_count++) {
5907 it_multi2 = it_multi;
5908 it_multi2++; // access to next element before erase of it_multi
5909 coord = (*it_multi).second;
5910 map_to_cell.erase(it_multi);
5911 map_to_cell.insert(pair<MFloat, MFloat>((*it).first, coord));
5912 it_multi = it_multi2;
5915 map_to_cell.insert(pair<MFloat, MFloat>((*it).first, (*it2).first));
5918 coordinates.erase(it2);
5922 coordinates.erase(it2);
5934 // cell to map: cell/position -> average cell/position
5935 for(it_multi = map_to_cell.begin(); it_multi != map_to_cell.end(); it_multi++) {
5936 cell_to_map[(*it_multi).second] = (*it_multi).first;
5951template <MInt nDim, class ppType>
5952void PostProcessing<nDim, ppType>::createCellToMap2D(
5953 map<pair<MFloat, MFloat>, MInt, coord_comp_2d_>& coordinates,
5954 map<pair<MFloat, MFloat>, pair<MFloat, MFloat>, coord_comp_2d_>& cell_to_map) {
5957 MFloat length0 = m_gridProxy->cellLengthAtLevel(0);
5958 const MInt minLvl = m_gridProxy->minLevel();
5960 multimap<pair<MFloat, MFloat>, pair<MFloat, MFloat>>
5961 map_to_cell; // store small cells with key equal to the large cell coordinate in which to average
5962 pair<multimap<pair<MFloat, MFloat>, pair<MFloat, MFloat>>::iterator,
5963 multimap<pair<MFloat, MFloat>, pair<MFloat, MFloat>>::iterator>
5965 multimap<pair<MFloat, MFloat>, pair<MFloat, MFloat>>::iterator it_multi, it_multi2;
5966 pair<MFloat, MFloat> coord;
5970 // find largest cells in the cut planes
5971 auto it = coordinates.begin();
5972 auto it2 = coordinates.begin();
5973 auto itTmp = coordinates.begin();
5974 auto itTmp2 = coordinates.begin();
5975 pair<MFloat, MFloat> c1, c2;
5976 for(it = it2 = coordinates.begin(); it != coordinates.end();) {
5977 if(it2 == coordinates.end()) {
5979 if(it == coordinates.end()) break;
5983 } else if((*it).second == (*it2).second) { // same level
5989 if((*it).second > (*it2).second) {
5995 if(fabs(c1.first - c2.first) > length0 / FPOW2(minLvl) && fabs(c1.second - c2.second) > length0 / FPOW2(minLvl)) {
6000 else if(((c1.first < c2.first) && (c1.first + 0.5 * length0 / FPOW2((*it).second) > c2.first))
6001 || ((c1.first > c2.first) && (c1.first - 0.5 * length0 / FPOW2((*it).second) < c2.first))) {
6002 if(((c1.second < c2.second) && (c1.second + 0.5 * length0 / FPOW2((*it).second) > c2.second))
6003 || ((c1.second > c2.second) && (c1.second - 0.5 * length0 / FPOW2((*it).second) < c2.second))) {
6004 range = map_to_cell.equal_range((*it2).first);
6005 c = distance(range.first, range.second);
6008 for(it_multi = range.first; c != 0 && r_count != c; r_count++) {
6009 it_multi2 = it_multi;
6010 it_multi2++; // access to next element before erase of it_multi
6011 coord = (*it_multi).second;
6012 map_to_cell.erase(it_multi);
6013 map_to_cell.insert(pair<pair<MFloat, MFloat>, pair<MFloat, MFloat>>((*it).first, coord));
6014 it_multi = it_multi2;
6017 map_to_cell.insert(pair<pair<MFloat, MFloat>, pair<MFloat, MFloat>>((*it).first, (*it2).first));
6020 coordinates.erase(it2);
6024 coordinates.erase(it2);
6037 // cell to map: cell/position -> average cell/position
6038 for(it_multi = map_to_cell.begin(); it_multi != map_to_cell.end(); it_multi++) {
6039 cell_to_map[(*it_multi).second] = (*it_multi).first;
6051template <MInt nDim, class ppType>
6052void PostProcessing<nDim, ppType>::findClosestProbePointsGridCells() {
6055 m_log << " = finding closest cells for all probe points on this domain" << endl;
6057 MFloatScratchSpace sqleveldiags(pp->solver().grid().maxLevel(), AT_, "leveldiags");
6058 for(MInt lev = 0; lev < pp->solver().grid().maxLevel(); lev++)
6060 MFloat len = pp->solver().grid().cellLengthAtLevel(lev+1);
6061 sqleveldiags.p[lev] = 3*len*len;
6065 for(MInt np = 0; np < m_noProbePoints; np++) {
6068 for(MInt d = 0; d < nDim; d++)
6069 dist += (pp->solver().a_coordinate(0, d) - m_probeCoordinates[np][d])
6070 * (pp->solver().a_coordinate(0, d) - m_probeCoordinates[np][d]);
6072 m_probeCellIds[np] = 0;
6074 for(MInt i = 0; i < pp->solver().grid().noInternalCells(); i++) {
6075 // skip cells without children
6076 if(m_gridProxy->tree().noChildren(i) != 0) continue;
6079 for(MInt d = 0; d < nDim; d++)
6080 tmpdist += (pp->solver().a_coordinate(i, d) - m_probeCoordinates[np][d])
6081 * (pp->solver().a_coordinate(i, d) - m_probeCoordinates[np][d]);
6083 if(tmpdist <= dist) {
6084 m_probeCellIds[np] = i;
6089 MBool is_inside = true;
6090 for(MInt d = 0; d < nDim; d++)
6091 is_inside = is_inside
6092 && (fabs(pp->solver().a_coordinate(m_probeCellIds[np], d) - m_probeCoordinates[np][d])
6093 <= m_gridProxy->cellLengthAtLevel(m_gridProxy->tree().level(m_probeCellIds[np]) + 1));
6095 if(!is_inside) m_probeCellIds[np] = -1;
6099template <MInt nDim, class ppType>
6100void PostProcessing<nDim, ppType>::saveSliceAiaFileFormat(const MInt step, const MInt noVars, MFloatScratchSpace& vars,
6101 const MInt sliceId) {
6104 stringstream fileName;
6105 vector<MString> datasetNames;
6108 // TODO labels:PP @ansgar use a unique name for each solver
6109 fileName << pp->solver().outputDir() << "probeSlice_" << m_sliceAxis[sliceId] << "_" << m_sliceIntercept[sliceId]
6110 << "_" << step << ParallelIo::fileExt();
6112 // Assemble all slice variable names
6113 datasetNames.clear();
6114 for(MUint v = 0; v < m_sliceVarIds.size(); v++) {
6115 datasetNames.insert(datasetNames.end(), m_sliceVarNames[v].begin(), m_sliceVarNames[v].end());
6118 const MUint slashPos = m_postprocessFileName.find_last_of("/");
6119 const MUint pos = (slashPos < m_postprocessFileName.length()) ? slashPos + 1 : 0;
6120 const MString fileNameRaw = m_postprocessFileName.substr(pos, m_postprocessFileName.find_last_of(".") - pos);
6121 // TODO labels:PP @ansgar use a unique name for each solver
6122 fileName << pp->solver().outputDir() << fileNameRaw << "_slice_" << m_sliceAxis[sliceId] << "_"
6123 << m_sliceIntercept[sliceId] << ParallelIo::fileExt();
6125 datasetNames = postData().fileVarNames();
6127 // if(m_fileVarNames.size() > 0) {
6128 // datasetNames = m_fileVarNames;
6130 // datasetNames = {"um", "vm", "wm", "rhom", "pm", "cm", "hm", "u'",
6131 // "v
'", "w'", "u
'v'", "v
'w'", "w
'u'", "p
'", "c'", "h
'"};
6136 ParallelIo parallelIo(fileName.str(), maia::parallel_io::PIO_REPLACE, pp->solver().mpiComm());
6137 const MString varNameBase = "variables";
6140 parallelIo.setAttribute(m_globalNoProbeSliceIds[sliceId], "noCells");
6141 parallelIo.setAttribute(pp->solver().solverId(), "solverId");
6142 parallelIo.setAttribute(pp->solver().getCurrentTimeStep(), "globalTimeStep");
6143 parallelIo.setAttribute(pp->solver().time(), "time");
6144 parallelIo.setAttribute(m_probeSliceGridNames[sliceId], "gridFile");
6146 for(MInt varId = 0; varId < noVars; varId++) {
6147 stringstream varName;
6148 varName << varNameBase << varId;
6149 parallelIo.defineArray(maia::parallel_io::PIO_FLOAT, varName.str(), m_globalNoProbeSliceIds[sliceId]);
6151 if(datasetNames.size() != (MUint)noVars) {
6152 name << "var" << varId;
6154 name << datasetNames[varId];
6156 parallelIo.setAttribute(name.str(), "name", varName.str());
6160 // Set variables for writing depening on the selected IO method
6163 MInt* hilbertInfo = nullptr;
6164 if(m_optimizedSliceIo) {
6165 maxNoIds = m_noProbeSliceMaxNoContHilbertIds[sliceId];
6166 noIds = m_noProbeSliceNoContHilbertIds[sliceId];
6167 hilbertInfo = m_noProbeSliceContHilbertInfo[sliceId];
6169 maxNoIds = m_noProbeSliceMaxNoHilbertIds[sliceId];
6170 noIds = m_noProbeSliceNoHilbertIds[sliceId];
6171 hilbertInfo = m_noProbeSliceHilbertInfo[sliceId];
6174 const MFloat writeTimeStart = wallTime();
6175 for(MInt varId = 0; varId < noVars; varId++) { // write all variables to file
6176 stringstream varName;
6177 varName << varNameBase << varId;
6179 // Call the writeArray function on every domain equally often, since data is written in chunks with the same
6180 // hilbertId or chunks of contiguous hilbert ids
6181 for(MInt h = 0, pOffset = 0; h < maxNoIds; h++) {
6183 // write data with same hilbertId or contigous chunk of hilbertIds, since in the slices cells are sorted by
6184 // their hilbertId of the slice
6185 parallelIo.setOffset(hilbertInfo[h * 3 + 1], hilbertInfo[h * 3 + 2]);
6186 parallelIo.writeArray(&(vars[varId + pOffset * noVars]), varName.str(), noVars);
6187 // local offset in data array
6188 pOffset += hilbertInfo[h * 3 + 1];
6190 // dummy calls if current domain has finished writing data, but other domains dont
6191 parallelIo.setOffset(0, 0);
6192 parallelIo.writeArray(&(vars[varId]), varName.str(), noVars);
6196 const MFloat writeTimeTotal = wallTime() - writeTimeStart;
6197 if(pp->solver().domainId() == 0) {
6198 std::cerr << "Slice #" << sliceId << " " << fileName.str() << " write time: " << writeTimeTotal << std::endl;
6210template <MInt nDim, class ppType>
6211void PostProcessing<nDim, ppType>::collectMinLvlCells() {
6214 MInt noCells = pp->solver().grid().noInternalCells();
6215 ScratchSpace<MInt> minLvlCellIds(noCells, AT_, "minLvlCellIds");
6216 MInt noMinLvlCells = 0;
6218 for(MInt cellId = 0; cellId < noCells; cellId++) {
6219 if(m_gridProxy->tree().parent(cellId) == -1 || m_gridProxy->tree().parent(cellId) >= noCells) { // ask jerry!
6220 minLvlCellIds[noMinLvlCells++] = cellId;
6224 m_noMinLvlIds = noMinLvlCells;
6225 mAlloc(m_minLvlIds, m_noMinLvlIds, "m_minLvlIds", AT_);
6227 for(MInt cellId = 0; cellId < noMinLvlCells; cellId++) {
6228 m_minLvlIds[cellId] = minLvlCellIds[cellId];
6248template <MInt nDim, class ppType>
6249void PostProcessing<nDim, ppType>::findContainingCell(const MFloat* coord, MInt& cellId) {
6252 MInt noMinLvlCells = m_noMinLvlIds;
6253 MInt curId, childCellId;
6254 MFloat halfCellLength;
6255 const MFloat* cellCoords;
6258 // find min level cell containing coord
6259 for(MInt i = 0; i < noMinLvlCells; i++) {
6261 curId = m_minLvlIds[i];
6262 cellCoords = &pp->solver().a_coordinate(curId, 0);
6263 halfCellLength = m_gridProxy->halfCellLength(curId);
6265 for(MInt dimId = 0; dimId < nDim; dimId++) {
6266 if(abs(coord[dimId] - cellCoords[dimId]) <= halfCellLength) cnt++;
6274 if(cellId == -1) return;
6276 // loop down to leaf cell containing coord
6277 while(m_gridProxy->tree().noChildren(cellId) > 0) {
6279 for(childId = 0; childId < IPOW2(nDim); childId++) {
6281 childCellId = m_gridProxy->tree().child(cellId, childId);
6282 if(childCellId < 0) continue;
6283 cellCoords = &pp->solver().a_coordinate(childCellId, 0);
6284 halfCellLength = m_gridProxy->halfCellLength(childCellId);
6286 for(MInt dimId = 0; dimId < nDim; dimId++) {
6287 if(abs(coord[dimId] - cellCoords[dimId]) <= halfCellLength) cnt++;
6291 cellId = childCellId;
6295 if(childId == IPOW2(nDim)) {
6301 // check if coord is on cell face and determine if neighboring cell is in another domain
6302 // if coord lies between domains it is considered to belong to the domain with the lowest id
6303 MBool cellInOtherDomain = 0;
6306 MInt nghbrId, globalId, parentId;
6308 cellCoords = &pp->solver().a_coordinate(cellId, 0);
6309 halfCellLength = m_gridProxy->halfCellLength(cellId);
6310 for(MInt dimId = 0; dimId < nDim; dimId++) {
6311 distance = abs(coord[dimId] - cellCoords[dimId]);
6312 if(distance >= halfCellLength && distance < halfCellLength + MFloatEps) {
6313 direction = (coord[dimId] - cellCoords[dimId] < 0) ? 0 : 1;
6316 // check for neighbor
6317 if(m_gridProxy->tree().hasNeighbor(cellId, 2 * dimId + direction)) { // same level neighbor
6318 nghbrId = m_gridProxy->tree().neighbor(cellId, 2 * dimId + direction);
6319 globalId = m_gridProxy->tree().globalId(nghbrId);
6320 } else { // check for neighbor on parent level
6321 parentId = m_gridProxy->tree().parent(cellId);
6322 if(parentId != -1) {
6323 if(m_gridProxy->tree().hasNeighbor(parentId, 2 * dimId + direction)) {
6324 nghbrId = m_gridProxy->tree().neighbor(parentId, 2 * dimId + direction);
6325 globalId = m_gridProxy->tree().globalId(nghbrId);
6330 if(globalId != -1) {
6331 // check if neighboring cell is in a "lower" domain
6332 if(pp->solver().grid().raw().findNeighborDomainId(globalId) < pp->solver().domainId()) {
6333 cellInOtherDomain = 1;
6338 if(cellInOtherDomain) {
6352template <MInt nDim, class ppType>
6353void PostProcessing<nDim, ppType>::initWritePointData() {
6367 m_pdFileName = Context::getSolverProperty<MString>("pp_pdFileName", m_postprocessingId, AT_, &m_pdFileName);
6376 m_pdStartTimestep = 0;
6377 m_pdStartTimestep = Context::getSolverProperty<MInt>("pp_pdStartTimestep", m_postprocessingId, AT_);
6386 m_pdStopTimestep = 0;
6387 m_pdStopTimestep = Context::getSolverProperty<MInt>("pp_pdStopTimestep", m_postprocessingId, AT_);
6397 m_pdRestartInterval = 0;
6398 m_pdRestartInterval = Context::getSolverProperty<MInt>("pp_pdRestartInterval", m_postprocessingId, AT_);
6400 IF_CONSTEXPR(nDim == 3) {
6401 TERMM(1, "This function is untested for 3D. First make sure it works, then delete this warning...");
6403 // Store u,v, (w) and p
6404 MInt noVars = nDim + 1;
6407 MInt noTimesteps = m_pdStopTimestep - m_pdStartTimestep;
6408 if(noTimesteps < 0) {
6409 TERMM(1, "The stop timestep for the writing of point data must be greater than the start timestep.");
6412 collectMinLvlCells();
6414 vector<MFloat> tmpCoordinates{};
6415 if(pp->solver().domainId() == 0) {
6419 ifstream csvFile(m_pdFileName);
6420 // Read all Lines and get coordniates
6421 while(getline(csvFile, line)) {
6425 for(MInt i = 0; i < nDim; i++) {
6429 // Start line count at one
6430 err << "Error at line " << noPoints + 1 << ": " << line << "\n"
6431 << "Either wrong dimension (nDim = " << nDim << ") or otherwise wrong format."
6432 << "Format should be nDim floats seperated by spaces per line.";
6433 TERMM(1, err.str());
6435 tmpCoordinates.push_back(curFloat);
6440 MPI_Bcast(&noCoords, 1, MPI_INT, 0, pp->solver().mpiComm(), AT_, "noCoords");
6442 vector<MFloat> points(noCoords * nDim);
6443 if(pp->solver().domainId() == 0) {
6444 copy(tmpCoordinates.begin(), tmpCoordinates.end(), points.begin());
6446 MPI_Bcast(&points[0], noCoords * nDim, MPI_DOUBLE, 0, pp->solver().mpiComm(), AT_, "points[0]");
6448 // Find the containing cells for the coordinates and save their cell id
6450 for(MInt pointId = 0; pointId < noCoords; pointId++) {
6451 findContainingCell(&points[nDim * pointId], cellId);
6455 m_pdPoints.insert(m_pdPoints.end(), &points[nDim * pointId], &points[nDim * (pointId + 1)]);
6456 m_pdCells.push_back(cellId);
6460 m_pdNoPoints = noPoints;
6461 m_pdVars.resize(noPoints * noTimesteps * noVars, 0.0);
6471template <MInt nDim, class ppType>
6472void PostProcessing<nDim, ppType>::writePointData() {
6475 IF_CONSTEXPR(nDim == 3) {
6476 TERMM(1, "This function is untested for 3D. First make sure it works, then delete this warning...");
6478 // Store u,v, (w) and p
6479 MInt noVars = nDim + 1;
6480 MInt noPoints = (m_pdNoPoints > 0) ? m_pdNoPoints : 1;
6481 MInt noTimesteps = m_pdStopTimestep - m_pdStartTimestep;
6483 if(globalTimeStep > m_pdStartTimestep && globalTimeStep <= m_pdStopTimestep) {
6484 if(m_pdNoPoints > 0) {
6485 MInt curTimestep = globalTimeStep - m_pdStartTimestep;
6486 for(MInt pointId = 0; pointId < m_pdNoPoints; pointId++) {
6487 const MFloat* cellVars = 0;
6488 // pp->solver().getSampleVariables(m_pdCells[pointId], cellVars);
6489 getSampleVariables(m_pdCells[pointId], cellVars, true);
6490 MInt startPos = pointId * (noTimesteps * noVars) + (curTimestep - 1) * noVars;
6491 if(cellVars == nullptr) continue;
6492 // Store the velocities
6493 for(MInt dimId = 0; dimId < nDim; dimId++) {
6494 m_pdVars[startPos + dimId] = cellVars[dimId];
6497 m_pdVars[startPos + nDim] = cellVars[nDim + 1];
6502 if(globalTimeStep == m_pdStopTimestep || (globalTimeStep % m_pdRestartInterval == 0)) {
6503 // Save to Netcdf file
6505 outputDir = Context::getSolverProperty<MString>("outputDir", m_postprocessingId, AT_);
6507 ParallelIo::size_type pointsOffset, totalNoPoints;
6508 stringstream fileName;
6509 if(globalTimeStep != m_pdStopTimestep) {
6510 fileName << outputDir << "microphones_" << m_pdStartTimestep << "_" << m_pdStopTimestep << "_" << globalTimeStep
6513 fileName << outputDir << "microphones_" << m_pdStartTimestep << "_" << m_pdStopTimestep << ".Netcdf";
6515 ParallelIo parallelIo(fileName.str(), maia::parallel_io::PIO_REPLACE, pp->solver().mpiComm());
6516 parallelIo.calcOffset(m_pdNoPoints, &pointsOffset, &totalNoPoints, pp->solver().mpiComm());
6519 MString dimNames[3] = {"x", "y", "z"};
6521 for(MInt dimId = 0; dimId < nDim; dimId++) {
6522 stringstream dimName;
6523 dimName << "coordinates" << dimId;
6524 parallelIo.defineArray(maia::parallel_io::PIO_FLOAT, dimName.str(), totalNoPoints);
6525 parallelIo.setAttribute(dimNames[dimId], "name", dimName.str());
6527 parallelIo.defineArray(maia::parallel_io::PIO_FLOAT, "cellIds", totalNoPoints);
6529 ParallelIo::size_type dimSizes[] = {totalNoPoints, noTimesteps, noVars};
6530 parallelIo.defineArray(maia::parallel_io::PIO_FLOAT, "microphones", 3, &dimSizes[0]);
6531 parallelIo.setAttribute("desc", "description", "microphones");
6532 parallelIo.setAttribute("timesteps", "dim_0", "microphones");
6533 parallelIo.setAttribute("points", "dim_1", "microphones");
6534 parallelIo.setAttribute("vars", "dim_2", "microphones");
6535 parallelIo.setAttribute("u", "var_0", "microphones");
6536 parallelIo.setAttribute("v", "var_1", "microphones");
6537 IF_CONSTEXPR(nDim == 2) { parallelIo.setAttribute("p", "var_2", "microphones"); }
6539 parallelIo.setAttribute("w", "var_2", "microphones");
6540 parallelIo.setAttribute("p", "var_3", "microphones");
6543 parallelIo.setOffset(m_pdNoPoints, pointsOffset);
6546 // Write coordinates as x and y
6547 for(MInt dimId = 0; dimId < nDim; dimId++) {
6548 ScratchSpace<MFloat> coord(noPoints, AT_, "coordinates");
6549 for(MInt i = 0; i < m_pdNoPoints; i++) {
6550 coord[i] = m_pdPoints[nDim * i + dimId];
6552 stringstream dimName;
6553 dimName << "coordinates" << dimId;
6554 parallelIo.writeArray(&(coord[0]), dimName.str());
6558 ScratchSpace<MFloat> cellIds(noPoints, AT_, "cellIds");
6559 for(MInt i = 0; i < m_pdNoPoints; i++) {
6560 cellIds[i] = m_pdCells[i];
6562 parallelIo.writeArray(&(cellIds[0]), "cellIds");
6564 // Write values at microphones
6565 ScratchSpace<MFloat> microphones(noPoints, noTimesteps, noVars, AT_, "microphones_data");
6566 if(m_pdNoPoints > 0) {
6567 for(MInt i = 0; i < noPoints; i++) {
6568 for(MInt j = 0; j < noTimesteps; j++) {
6569 for(MInt k = 0; k < noVars; k++) {
6570 microphones(i, j, k) = m_pdVars[i * (noTimesteps * noVars) + j * noVars + k];
6576 parallelIo.setOffset(m_pdNoPoints, pointsOffset, 3);
6577 parallelIo.writeArray(µphones[0], "microphones");
6592template <MInt nDim, class ppType>
6593MInt PostProcessing<nDim, ppType>::findNearestGridCell(const MFloat* coord) {
6596 MInt out_cellId = -1;
6597 MFloat t_distance = 0.0;
6599 m_log << "grid size is " << m_gridProxy->tree().size() << endl;
6601 // Loop over all (boundary) cells
6602 for(MInt i = 0; i < m_gridProxy->tree().size(); ++i) {
6603 if(m_gridProxy->tree().noChildren(i) > 0) {
6606 if(m_gridProxy->tree().hasProperty(i, Cell::IsHalo)) {
6609 if(m_gridProxy->tree().globalId(i) < 0) {
6612 // Get Distance to the cell
6613 MFloat i_distance = 0.0;
6615 i_distance += (coord[0] - pp->solver().a_coordinate(i, 0)) * (coord[0] - pp->solver().a_coordinate(i, 0));
6616 i_distance += (coord[1] - pp->solver().a_coordinate(i, 1)) * (coord[1] - pp->solver().a_coordinate(i, 1));
6618 IF_CONSTEXPR(nDim == 3) {
6619 i_distance += (coord[2] - pp->solver().a_coordinate(i, 2)) * (coord[2] - pp->solver().a_coordinate(i, 2));
6622 if((i_distance < t_distance) || (-1 == out_cellId)) {
6623 t_distance = i_distance;
6628 // if (m_gridProxy->tree().hasProperty(out_cellId, Cell::IsHalo)) {
6646template <MInt nDim, class ppType>
6647void PostProcessing<nDim, ppType>::movePointsToGrid(MFloat* in_points, MInt in_noPoints, MInt in_moveType) {
6655 MFloatInt* distRank = new MFloatInt[in_noPoints / nDim];
6657 MIntScratchSpace gridCellId(in_noPoints / nDim, AT_, "gridPointId");
6658 gridCellId.fill(-1);
6660 for(MInt i = 0; i < in_noPoints; i += nDim) {
6661 MFloat checkPoint[3];
6663 checkPoint[0] = in_points[i];
6664 checkPoint[1] = in_points[i + 1];
6665 IF_CONSTEXPR(nDim == 3) { checkPoint[2] = in_points[i + 2]; }
6667 m_log << "checkPoint is " << checkPoint[0] << ", " << checkPoint[1] << ", " << checkPoint[2] << endl;
6669 if(1 == in_moveType) {
6670 gridCellId[i / nDim] = pp->solver().grid().raw().findContainingLeafCell(checkPoint);
6673 if(-1 == gridCellId[i / nDim]) {
6674 gridCellId[i / nDim] = findNearestGridCell(checkPoint);
6676 m_log << "gridCellId is " << gridCellId[i / nDim] << endl;
6678 distRank[i / nDim].val = 0.0;
6679 distRank[i / nDim].val += (in_points[i] - pp->solver().a_coordinate(gridCellId[i / nDim], 0))
6680 * (in_points[i] - pp->solver().a_coordinate(gridCellId[i / nDim], 0));
6681 distRank[i / nDim].val += (in_points[i + 1] - pp->solver().a_coordinate(gridCellId[i / nDim], 1))
6682 * (in_points[i + 1] - pp->solver().a_coordinate(gridCellId[i / nDim], 1));
6683 IF_CONSTEXPR(nDim == 3) {
6684 distRank[i / nDim].val += (in_points[i + 2] - pp->solver().a_coordinate(gridCellId[i / nDim], 2))
6685 * (in_points[i + 2] - pp->solver().a_coordinate(gridCellId[i / nDim], 2));
6687 distRank[i / nDim].rank = pp->solver().domainId();
6691 m_log << "Before correction old coordinates for each points are..." << endl;
6692 for(MInt i = 0; i < in_noPoints; i += nDim) {
6693 m_log << "Coordinate for point " << i / nDim << " is " << in_points[i] << ", " << in_points[i + 1] << ", "
6694 << in_points[i + 2] << endl;
6697 if(1 < pp->solver().noDomains()) {
6698 m_log << "Before Communication" << endl;
6699 for(MInt i = 0; i < in_noPoints; i += nDim) {
6700 m_log << "Distance for point " << i / nDim << " is " << distRank[i / nDim].val << " on rank "
6701 << distRank[i / nDim].rank << endl;
6704 // Check which rank has the lowest distance to each point
6705 MPI_Allreduce(MPI_IN_PLACE, distRank, in_noPoints / nDim, MPI_DOUBLE_INT, MPI_MINLOC, pp->solver().mpiComm(), AT_,
6706 "MPI_IN_PLACE", "distRank");
6708 m_log << "After Communication" << endl;
6709 for(MInt i = 0; i < in_noPoints; i += nDim) {
6710 m_log << "Smallest distance for point " << i / nDim << " is " << distRank[i / nDim].val << " on rank "
6711 << distRank[i / nDim].rank << endl;
6715 for(MInt i = 0; i < in_noPoints; i += nDim) {
6716 // If I'm the right rank, move the point and broadcast the
new coordinates
6717 if(distRank[i / nDim].rank ==
pp->solver().domainId()) {
6718 in_points[i] = pp->solver().a_coordinate(gridCellId[i / nDim], 0);
6719 in_points[i + 1] = pp->solver().a_coordinate(gridCellId[i / nDim], 1);
6720 IF_CONSTEXPR(nDim == 3) { in_points[i + 2] = pp->solver().a_coordinate(gridCellId[i / nDim], 2); }
6722 if(1 <
pp->solver().noDomains()) {
6723 MPI_Bcast(&in_points[i], nDim, MPI_DOUBLE, distRank[i / nDim].rank, pp->solver().mpiComm(), AT_,
6728 if(1 <
pp->solver().noDomains()) {
6729 MPI_Bcast(&in_points[i], nDim, MPI_DOUBLE, distRank[i / nDim].rank, pp->solver().mpiComm(), AT_,
6735 m_log <<
"After correction new coordinates for each points are..." << endl;
6736 for(
MInt i = 0; i < in_noPoints; i += nDim) {
6737 m_log <<
"Coordinate for point " << i / nDim <<
" is " << in_points[i] <<
", " << in_points[i + 1] <<
", "
6738 << in_points[i + 2] << endl;
6749template <MInt nDim,
class ppType>
6768 mTerm(1, AT_,
"Missmatching spray intervals");
6781template <MInt nDim,
class ppType>
6784#if not defined(MAIA_DISABLE_LB)
6786 pp->solver().saveCoarseSolution();
void mAlloc(T *&a, const MLong N, const MString &objectName, MString function)
allocates memory for one-dimensional array 'a' of size N
static MInt propertyLength(const MString &name, MInt solverId=m_noSolvers)
Returns the number of elements of a property.
static MBool propertyExists(const MString &name, MInt solver=m_noSolvers)
This function checks if a property exists in general.
MInt noInternalCells() const override
Return the number of internal cells within this solver.
void setPropertyVariableOffset(MString s, MInt start, MInt length)
MInt noVariables() const
Return the number of primitive variables.
const MFloat & c_coordinate(const MInt cellId, const MInt dim)
Returns the coordinate of the cell from the grid().tree() cellId for dimension dir.
virtual void saveVolumeSamplingData()
virtual void averageSolutionsInSolve()
void initTimeStepProperties()
Initializes timestep properties for postprocessing.
MInt * m_noCorrelationIds
virtual void computePLIsoTurbulenceStatistics()
MInt * m_globalNoProbeLineIds
MInt * m_correlationVariableIndex
std::ofstream * m_probeFileStreams
MInt m_spatialLineNoCells
std::vector< std::vector< MString > > m_postprocessingMethodsDesc
MInt * m_probeLineDirection
std::vector< MInt > m_activeSourceTerms
MBool m_averageSpeedOfSound
void averageSolutions()
Averages solutions.
std::set< MInt > m_activeMeanVars
virtual void initSprayData()
virtual void initParticleStatistics()
MInt * m_globalNoArbSlicePoints
MInt m_noSpeedOfSoundVars
MFloat ** m_globalCorrelationPositions
void initProbeLine()
Initializes properties and memory for line probing.
void probeArbitrarySlicePost()
MInt m_averageStopTimestep
MInt * m_probeLineOffsets
MInt * m_noProbeSliceMaxNoHilbertIds
std::vector< tpost > tvecpost
void initProbeSlice()
Initializes properties and memory for slice probing.
MFloat ** m_globalCorrelationExchangeVar
MInt ** m_noProbeLineAverageIds
virtual void initMovingAverage()
Initializes properties and allocates memory for moving averaging.
void pp_saveCoarseSolution()
MInt ** m_globalNoProbeLineAverageIds
void initProbeArbitraryLine()
initializes properties and data for arbitrary line probing
MString * m_probeSliceGridNames
MInt ** m_noProbeSliceHilbertInfo
void initProbeLinePeriodic()
void initProbeArbitrarySlice()
initializes properties and data for arbitrary slice probing
MBool m_statisticCombustionAnalysis
void initProbePoint()
Initializes properties for point probing.
MInt ** m_noGlobalProbeSliceIds
virtual void initAverageVariables()
allocates memory for averageSolutions() and averageSolutionsInSolve()
MFloat *** m_globalProbeLineAverageVars
void averageSolutionsSlice()
MFloat *** m_probeLineAveragePositions
MFloat ** m_correlationExchangeVarRMS
MInt * m_noProbeSliceNoHilbertIds
MFloat ** m_globalCorrelationExchangeVarMean
void probeArbitrarySlice()
void initSpatialAveraging()
virtual void saveSurfaceSamplingData()
virtual void writeSprayData()
virtual void writeLPTSolutionFile()
MInt * m_probeLineAverageDirection
void probeArbitraryLinePost()
MFloat * m_arbSlicePoints
MInt m_averageStartTimestep
typename maia::grid::Proxy< nDim > GridProxy
MInt * m_noArbSlicePoints
void postprocessInSolve(MBool finalTimeStep) override
MFloat ** m_correlationCoordinates
MString * m_movAvgVarNames
virtual void probeLinePeriodicPost()
MInt m_movingAverageStartTimestep
MFloat ** m_correlationPositions
MInt * m_probeSliceOffsets
void spatialAveragingPost()
void initWritePointData()
void postprocessPostSolve() override
virtual void initLPTSolutionFile()
MInt * m_globalNoArbLineIds
MFloat * m_sliceIntercept
void postprocessSolution() override
MInt * m_globalNoProbeSliceIds
MFloat ** m_probeLinePositions
void initPostProcessing() override
Reads all required properties in and prepares for postprocessing.
virtual void initAveragingProperties()
Initialize properties relevant for temporal averaging.
void computeAndSaveMean()
MInt m_movingAverageDataPoints
MInt ** m_globalCorrelationIds
MFloat ** m_correlationExchangeVar
MInt * m_spatialVarsRecvcnts
void initTimeStepPropertiesSlice()
MInt m_movingAverageInterval
MFloat ** m_movAvgVariables
MInt m_noCorrelationLines
MInt m_sprayWriteInterval
MFloat ** m_probeSlicePositions
std::vector< MInt > * m_cell2globalIndex
MInt m_movingAverageCounter
void neededMeanVarsForSourceTerm(const MInt sourceTerm, std::vector< MInt > &meanVars) const
MInt * m_correlationDirection
MInt m_noProbeLineAverageSteps
std::vector< tvecpost > m_postprocessingMethods
void findClosestProbePointsGridCells()
MFloat * m_arbSlicePointsDistribution
MFloat * m_spatialLineAllVars
MInt *** m_probeLineAverageIds
MInt m_globalnoSlicePositions
MInt m_sprayComputeInterval
MInt * m_probeLinePeriodic
virtual void computeSprayData()
void postprocessPreSolve() override
MInt ** m_correlationIndexMapping
std::array< MString, ST::totalNoSourceTerms > s_sourceTermNames
MFloat * m_spatialPlaneAllCoord
virtual void initPLIsoTurbulenceStatistics()
virtual void computeIsoTurbulenceStatistics()
MFloat ** m_arbLineCoordinates
tvecpost m_postprocessingSolution
MFloat * m_spatialPlaneAllVars
MString * m_postprocessingOps
MString m_postprocessFileName
MFloat * m_spatialLineAllCoord
MInt m_noAveragedVorticities
void probeArbitraryLine()
MInt m_movingAverageStopTimestep
~PostProcessing()
Destructor for the massive paralle postprocessing solver.
virtual void probeLinePeriodic()
MInt m_spatialPlaneNoCells
virtual void computeParticleStatistics()
MFloat ** m_globalCorrelationExchangeVarRMS
MInt * m_noProbeSliceMaxNoContHilbertIds
MInt * m_spatialVarsDispls
MFloat ** m_correlationExchangeVarMean
MInt * m_noPeriodicSliceCells
MFloat ** m_probeCoordinates
MFloat ** m_probeLineCoordinates
MInt * m_noProbeSliceNoContHilbertIds
virtual void savePointSamplingData()
MFloat ** m_arbSliceCoordinates
void initPeriodicSliceAverage()
Initializes the periodic averaging on a slice.
MInt ** m_noProbeSliceContHilbertInfo
MFloat ** m_probeLineAverageCoordinates
MInt * m_globalNoCorrelationIds
void initReduceToLevelAvg()
Initializes properties for grid level reduction.
PostProcessingLb< nDim > * pp
CRTP.
void initTimeStepPropertiesLine()
void periodicSliceAverage()
MInt m_noPostprocessingOps
void initPointSamplingData() override
void initVolumeSamplingData() override
void initSurfaceSamplingData() override
void initIsoTurbulenceStatistics() override
init function for Isotropic Turbulence Statistics
This class is a ScratchSpace.
void fill(T val)
fill the scratch with a given value
virtual MInt noDomains() const
constexpr GridProxy & grid() const
MInt string2enum(MString theString)
This global function translates strings in their corresponding enum values (integer values)....
@ PP_SPATIAL_AVERAGE_POST
@ PP_PROBE_LINE_PERIODIC_POST
@ PP_PL_ISO_TURBULENCE_STATISTICS
@ PP_ISO_TURBULENCE_STATISTICS
@ PP_COMPUTE_DIVERGENCEVELOCITY_IN
@ PP_PROBE_ARB_SLICE_POST
@ PP_COMPUTE_DIVERGENCEVELOCITY_PRE
@ PP_REDUCE_TO_LEVEL_AVERAGES_PRE
@ PP_PROBE_LINE_PERIODIC_IN
@ PP_COMPUTE_DIVERGENCEVELOCITY_POST
@ PP_REDUCE_TO_LEVEL_POST
void mTerm(const MInt errorCode, const MString &location, const MString &message)
std::basic_string< char > MString
int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Gatherv
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allreduce
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, const MString &name, const MString &varname)
same as MPI_Bcast
int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Gather
MFloat dist(const Point< DIM > &p, const Point< DIM > &q)