36 m_material = std::make_unique<MaterialState<nDim>>(solverId());
40 m_nonDimensional =
false;
41 m_nonDimensional = Context::getSolverProperty<MBool>(
"nonDimensionaliseLPT", solverId(), AT_, &m_nonDimensional);
46 m_skewnessTimeStep = Context::getSolverProperty<MInt>(
"skewnessTimeStep", solverId(), AT_, &m_skewnessTimeStep);
63 m_cells.setLptCollectorCoupling(m_massCoupling, m_momentumCoupling, m_heatCoupling);
66 m_cells.setLptCollectorNoSpecies(1);
70 m_cells.setLptCollectorSlopes();
74 if(m_spawnParticles) {
78 if(m_momentumCoupling || m_heatCoupling) {
79 readMomentumCouplingProps();
87 if(m_activeSecondaryBUp || m_activePrimaryBUp) {
89 m_sprayModel->init(
this);
90 m_log <<
"Spray model has been activated." << std::endl;
97 m_cells.reset(grid().maxNoCells());
98 this->setHaloCellsOnInactiveRanks();
107 initGridProperties();
111 updateExchangeCells();
112 grid().updateLeafCellExchange();
115 grid().findEqualLevelNeighborsParDiagonal(
false);
119 if(a_timeStepComputationInterval() > 0 &&
globalTimeStep % a_timeStepComputationInterval() == 0) {
120 forceTimeStep(computeTimeStep());
123 countParticlesInCells();
126 MInt noCells = a_noCells();
127 MInt minCells = noCells;
128 MInt maxCells = noCells;
129 MInt noPart = a_noParticles();
130 MInt minParts = noPart;
131 MInt maxParts = noPart;
133 MPI_Allreduce(MPI_IN_PLACE, &noCells, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noCells");
134 MPI_Allreduce(MPI_IN_PLACE, &noPart, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noPart");
135 MPI_Allreduce(MPI_IN_PLACE, &maxCells, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"maxCells");
136 MPI_Allreduce(MPI_IN_PLACE, &minCells, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"minCells");
137 MPI_Allreduce(MPI_IN_PLACE, &maxParts, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"maxParts");
138 MPI_Allreduce(MPI_IN_PLACE, &minParts, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"minParts");
140 if(domainId() == 0) {
141 cerr <<
"LPT cell-statistics : " << minCells <<
" - " << maxCells <<
" avg " << noCells / noDomains() << endl;
142 cerr <<
"LPT particle-statistics : " << minParts <<
" - " << maxParts <<
" avg " << noPart / noDomains() << endl;
157 m_cells.reset(grid().maxNoCells());
158 ASSERT(grid().tree().size() < grid().maxNoCells(),
"Increase collector size!");
159 for(
MInt cellId = 0; cellId < grid().noInternalCells(); cellId++) {
161 a_isHalo(cellId) =
false;
162 a_isValidCell(cellId) =
true;
163 a_isWindow(cellId) =
false;
164 a_regridTrigger(cellId) =
false;
165 a_noParticlesInCell(cellId) = 0;
166 a_noEllipsoidsInCell(cellId) = 0;
167 a_bndryCellId(cellId) = -1;
168 for(
MInt n = 0; n < nDim; n++) {
169 a_fluidVelocity(cellId, n) = 0.0;
171 a_fluidDensity(cellId) = 0.0;
172 a_fluidPressure(cellId) = 0.0;
173 a_fluidTemperature(cellId) = 0.0;
175 a_fluidSpecies(cellId) = 0.0;
178 if(m_momentumCoupling) {
179 for(
MInt i = 0; i < nDim; i++) {
180 a_momentumFlux(cellId, i) = 0;
182 a_workFlux(cellId) = 0;
185 a_heatFlux(cellId) = 0;
188 a_massFlux(cellId) = 0;
191 for(
MInt varId = 0; varId < nDim; varId++) {
192 for(
MInt dir = 0; dir < nDim; dir++) {
193 a_velocitySlope(cellId, varId, dir) = F0;
198 for(
MInt cellId = grid().noInternalCells(); cellId < grid().tree().size(); cellId++) {
200 a_isHalo(cellId) =
true;
201 a_isWindow(cellId) =
false;
202 a_isValidCell(cellId) =
true;
203 a_regridTrigger(cellId) =
false;
204 a_noParticlesInCell(cellId) = 0;
205 a_noEllipsoidsInCell(cellId) = 0;
206 a_bndryCellId(cellId) = -1;
207 for(
MInt n = 0; n < nDim; n++) {
208 a_fluidVelocity(cellId, n) = 0.0;
210 a_fluidDensity(cellId) = 0.0;
211 a_fluidPressure(cellId) = 0.0;
212 a_fluidTemperature(cellId) = 0.0;
214 a_fluidSpecies(cellId) = 0.0;
216 if(m_momentumCoupling) {
217 for(
MInt i = 0; i < nDim; i++) {
218 a_momentumFlux(cellId, i) = 0;
220 a_workFlux(cellId) = 0;
223 a_heatFlux(cellId) = 0;
226 a_massFlux(cellId) = 0;
229 for(
MInt varId = 0; varId < nDim; varId++) {
230 for(
MInt dir = 0; dir < nDim; dir++) {
231 a_velocitySlope(cellId, varId, dir) = F0;
251 m_partList.reserve(
static_cast<MUlong>(m_maxNoParticles));
252 if(m_ellipsoids) m_partListEllipsoid.reserve(
static_cast<MUlong>(m_maxNoParticles));
269 intOrder = Context::getSolverProperty<MInt>(
"particleInterpolationOrder", solverId(), AT_, &intOrder);
270 m_partList[0].s_interpolationOrder = intOrder;
273 intMethod = Context::getSolverProperty<MInt>(
"particleInterpolationMethod", solverId(), AT_, &intMethod);
274 m_partList[0].s_interpolationMethod = intMethod;
276 MFloat distFactorImp = 0.5;
278 Context::getSolverProperty<MFloat>(
"particleInterpolationDistFactor", solverId(), AT_, &distFactorImp);
279 m_partList[0].s_distFactorImp = distFactorImp;
281 m_partList[0].s_backPtr =
this;
285 m_partListEllipsoid[0].s_interpolationOrder = intOrder;
286 m_partListEllipsoid[0].s_interpolationMethod = intMethod;
287 m_partListEllipsoid[0].s_backPtr =
this;
288 m_partListEllipsoid[0].s_distFactorImp = distFactorImp;
294 if(m_nonDimensional) {
295 const MFloat Re = Context::getSolverProperty<MFloat>(
"Re", solverId(), AT_);
296 m_partList[0].s_Re = Re;
297 if(m_ellipsoids) m_partListEllipsoid[0].s_Re = Re;
300 for(
MInt i = 0; i < nDim; i++) {
301 const MFloat Frm = Context::getSolverProperty<MFloat>(
"Frm", solverId(), AT_, i);
302 m_partList[0].s_Frm[i] = Frm;
303 if(m_ellipsoids) m_partListEllipsoid[0].s_Frm[i] = Frm;
304 m_FrMag +=
POW2(Frm);
306 m_FrMag = sqrt(m_FrMag);
308 if(m_heatCoupling || m_evaporation) {
309 m_partList[0].s_Pr = Context::getSolverProperty<MFloat>(
"Pr", solverId(), AT_);
310 m_partList[0].s_Sc = Context::getSolverProperty<MFloat>(
"Sc", solverId(), AT_);
312 if(m_activeSecondaryBUp) m_partList[0].s_We = Context::getSolverProperty<MFloat>(
"We", solverId(), AT_);
315 m_partList[0].s_Re = F1;
316 if(m_ellipsoids) m_partListEllipsoid[0].s_Re = F1;
317 MFloat defaultGravity = 0.0;
319 for(
MInt i = 0; i < nDim; i++) {
320 gravity = Context::getSolverProperty<MFloat>(
"particleGravity", solverId(), AT_, &defaultGravity, i);
321 m_partList[0].s_Frm[i] = gravity;
322 if(m_ellipsoids) m_partListEllipsoid[0].s_Frm[i] = gravity;
323 m_FrMag +=
POW2(gravity);
325 m_FrMag = sqrt(m_FrMag);
340 lengthFactor = Context::getSolverProperty<MFloat>(
"lengthFactor", solverId(), AT_, &lengthFactor);
341 m_partList[0].s_lengthFactor = lengthFactor;
342 if(m_ellipsoids) m_partListEllipsoid[0].s_lengthFactor = lengthFactor;
358 constexpr MInt maxNoSurfaces = 1;
359 mAlloc(m_bndryCells, m_maxNoBndryCells, nDim, 0, 0, maxNoSurfaces, 0,
"m_bndryCells", AT_);
362 m_noOuterBndryCells = 0;
363 m_bndryCells->resetSize(0);
377 if(noDomains() < 1) {
381 const MInt noNghbrDomains = grid().noNeighborDomains();
384 m_queueToSend.resize(noNghbrDomains);
385 m_sendSize.resize(noNghbrDomains);
386 m_recvSize.resize(noNghbrDomains);
387 m_sendBuffer.resize(noNghbrDomains);
388 m_recvBuffer.resize(noNghbrDomains);
389 m_intSendBuffer.resize(noNghbrDomains);
390 m_intRecvBuffer.resize(noNghbrDomains);
391 m_mpi_reqSendFloat.resize(noNghbrDomains);
392 m_mpi_reqRecvFloat.resize(noNghbrDomains);
393 m_mpi_reqSendSize.resize(noNghbrDomains);
394 m_mpi_reqSendInt.resize(noNghbrDomains);
395 m_mpi_reqRecvInt.resize(noNghbrDomains);
396 m_mpi_statusProbe.resize(noNghbrDomains);
399 const MInt bufferSize = !m_ellipsoids ? bufSize<LPTSpherical<nDim>>()
401 const MInt intBufferSize = !m_ellipsoids
402 ? intBufSize<LPTSpherical<nDim>>()
405 for(
MInt i = 0; i < noNghbrDomains; i++) {
408 m_sendBuffer[i] = make_unique<MFloat[]>(bufferSize);
409 m_recvBuffer[i] = make_unique<MFloat[]>(bufferSize);
410 m_intSendBuffer[i] = make_unique<MInt[]>(intBufferSize);
411 m_intRecvBuffer[i] = make_unique<MInt[]>(intBufferSize);
415 for(
MInt i = 0; i < noNghbrDomains; i++) {
416 for(
MInt j = 0; j < bufferSize; j++) {
417 m_sendBuffer[i][j] = 0.0;
418 m_recvBuffer[i][j] = 0.0;
420 for(
MInt j = 0; j < intBufferSize; j++) {
421 m_intSendBuffer[i][j] = 0;
422 m_intRecvBuffer[i][j] = 0;
427 if(m_nonBlockingComm) {
429 if(m_momentumCoupling) m_noSourceTerms++;
430 if(m_momentumCoupling) m_noSourceTerms = m_noSourceTerms + nDim + 1;
431 if(m_heatCoupling) m_noSourceTerms++;
433 m_sourceSend.resize(noNghbrDomains);
434 m_sourceRecv.resize(noNghbrDomains);
436 if(m_sourceSendRequest !=
nullptr)
mDeallocate(m_sourceSendRequest);
437 mAlloc(m_sourceSendRequest, noNghbrDomains,
"sourceSendReq", AT_);
438 if(m_sourceRecvRequest !=
nullptr)
mDeallocate(m_sourceRecvRequest);
439 mAlloc(m_sourceRecvRequest, noNghbrDomains,
"sourceRecvReq", AT_);
441 if(m_flowSendRequest !=
nullptr) {
444 mAlloc(m_flowSendRequest, noNghbrDomains,
"flowSendReq", AT_);
445 if(m_flowRecvRequest !=
nullptr) {
448 mAlloc(m_flowRecvRequest, noNghbrDomains,
"flowRecvReq", AT_);
450 if(m_checkAdaptationSendRequest !=
nullptr) {
453 mAlloc(m_checkAdaptationSendRequest, noDomains(),
"checkAdapSendReq", AT_);
454 if(m_checkAdaptationRecvRequest !=
nullptr) {
457 mAlloc(m_checkAdaptationRecvRequest, noDomains(),
"checkAdapRecvReq", AT_);
459 m_checkAdaptationSend.resize(noDomains());
460 m_checkAdaptationRecv.resize(noDomains());
463 if(m_slopesSendRequest !=
nullptr) {
466 mAlloc(m_slopesSendRequest, noNghbrDomains,
"slopesSendReq", AT_);
467 if(m_slopesRecvRequest !=
nullptr) {
470 mAlloc(m_slopesRecvRequest, noNghbrDomains,
"slopesRecvReq", AT_);
475 if(m_nonBlockingComm) {
476 MInt maxNoHaloCells = 0;
477 MInt maxNoWindowCells = 0;
482 for(
MInt i = 0; i < noNghbrDomains; i++) {
483 maxNoHaloCells =
mMax(maxNoHaloCells, grid().noLeafHaloCells(i));
484 maxNoWindowCells =
mMax(maxNoWindowCells, grid().noLeafWindowCells(i));
485 m_sourceSendRequest[i] = MPI_REQUEST_NULL;
486 m_sourceRecvRequest[i] = MPI_REQUEST_NULL;
487 m_flowSendRequest[i] = MPI_REQUEST_NULL;
488 m_flowRecvRequest[i] = MPI_REQUEST_NULL;
490 m_slopesSendRequest[i] = MPI_REQUEST_NULL;
491 m_slopesRecvRequest[i] = MPI_REQUEST_NULL;
493 windowCellsCnt[i] = grid().noLeafWindowCells(i);
494 haloCellsCnt[i] = grid().noLeafHaloCells(i);
497 for(
MInt d = 0; d < noDomains(); d++) {
498 m_checkAdaptationRecvRequest[d] = MPI_REQUEST_NULL;
499 m_checkAdaptationSendRequest[d] = MPI_REQUEST_NULL;
502 if(m_flowRecv !=
nullptr) {
505 mAlloc(m_flowRecv, noNeighborDomains(), &haloCellsCnt[0], PV.noVars() + m_evaporation,
"m_flowRecv", AT_);
506 if(m_flowSend !=
nullptr) {
509 mAlloc(m_flowSend, noNeighborDomains(), &windowCellsCnt[0], PV.noVars() + m_evaporation,
"m_flowSend", AT_);
512 for(
MInt i = 0; i < noNghbrDomains; i++) {
513 m_sourceSend[i] = make_unique<MFloat[]>(maxNoHaloCells * m_noSourceTerms);
514 m_sourceRecv[i] = make_unique<MFloat[]>(maxNoWindowCells * m_noSourceTerms);
518 for(
MInt i = 0; i < noNghbrDomains; i++) {
519 for(
MInt j = 0; j < grid().noLeafHaloCells(i) * m_noSourceTerms; j++) {
520 m_sourceSend[i][j] = 0.0;
522 for(
MInt j = 0; j < grid().noLeafWindowCells(i) * m_noSourceTerms; j++) {
523 m_sourceRecv[i][j] = 0.0;
526 for(
MInt i = 0; i < noDomains(); i++) {
527 m_checkAdaptationSend[i] = 0;
528 m_checkAdaptationRecv[i] = 0;
532 if(m_slopesRecv !=
nullptr) {
535 mAlloc(m_slopesRecv, noNeighborDomains(), &haloCellsCnt[0], nDim * nDim,
"m_slopesRecv", AT_);
536 if(m_slopesSend !=
nullptr) {
539 mAlloc(m_slopesSend, noNeighborDomains(), &windowCellsCnt[0], nDim * nDim,
"m_slopesSend", AT_);
543 if(m_respawn && fullReinit) {
548 local.val = (
MInt)m_respawnCells.size();
549 local.rank = domainId();
552 MPI_Allreduce(&local, &global, 1, MPI_2INT, MPI_MAXLOC, mpiComm(), AT_,
"local",
"global");
553 m_respawnDomain = global.rank;
554 MInt* particleRespawnCellsPerDomain =
nullptr;
555 if(domainId() == m_respawnDomain) {
556 particleRespawnCellsPerDomain =
new MInt[noDomains()];
558 MPI_Gather(&local.val, 1, MPI_INT, particleRespawnCellsPerDomain, 1, MPI_INT, m_respawnDomain, mpiComm(), AT_,
559 "local.val",
"particleRespawnCellsPerDomain");
562 if(domainId() == m_respawnDomain) {
563 MInt globalNoRespawnCells = 0;
564 m_noRespawnDomains = 0;
565 for(
MInt i = 0; i < noDomains(); ++i) {
566 globalNoRespawnCells += particleRespawnCellsPerDomain[i];
567 if(particleRespawnCellsPerDomain[i] > 0) {
568 m_noRespawnDomains++;
571 if(globalNoRespawnCells == 0) {
572 stringstream errorMessage;
573 errorMessage <<
"globally no RespawnCells found despite the set properties particleRespawn " << m_respawn
574 <<
" and general Respawn (particleRespawnType==0) or massiveParallel ";
575 mTerm(1, AT_, errorMessage.str());
577 m_respawnGlobalDomainOffsets =
new MInt[m_noRespawnDomains + 1];
578 m_respawnGlobalDomainOffsets[0] = 0;
579 m_respawnDomainRanks =
new MInt[m_noRespawnDomains];
581 for(
MInt i = 0; i < noDomains(); ++i) {
582 if(particleRespawnCellsPerDomain[i] > 0) {
583 m_respawnDomainRanks[counter] = i;
584 m_respawnGlobalDomainOffsets[counter + 1] =
585 m_respawnGlobalDomainOffsets[counter] + particleRespawnCellsPerDomain[i];
590 delete[] particleRespawnCellsPerDomain;
592 m_noRespawnDomains = 1;
604 computeBoundingBox(m_globBbox.begin());
606 for(
MInt n = 0; n < nDim; n++) {
607 m_globDomainLength[n] = m_globBbox[n + nDim] - m_globBbox[n];
608 periodic += grid().raw().periodicCartesianDir(n);
628 const MInt noParticles = loadParticleRestartFile();
629 m_log << noParticles <<
" particles loaded from restart-file." << endl;
632 calculateTerminalVelocities();
637 MInt noParticles = 0;
649 switch(m_initializationMethod) {
653 MFloat* particleDiameters =
nullptr;
657 MFloat particleEllipsoidInitNoPartperCell = F0;
658 vector<pair<MFloat, MFloat>> ellipsoidsInitVector;
659 MInt noEllipsoidCat = 0;
671 MFloat particleInitNoPartperCell = 0.0;
672 particleInitNoPartperCell = Context::getSolverProperty<MFloat>(
"particleInitNoPartperCell", solverId(), AT_,
673 &particleInitNoPartperCell);
675 if(particleInitNoPartperCell > 0) {
676 m_log <<
"LPT.cpp:initialCondition(): Initializing " << particleInitNoPartperCell <<
" particles per cell."
686 array<MFloat, 10> particleDiametersDefaults = {0.00001, 0.00002, 0.00003, 0.00004, 0.00005,
687 0.00006, 0.00007, 0.00008, 0.00009, 0.00010};
688 MInt noDifferentPart = 10;
691 particleDiameters =
new MFloat[noDifferentPart];
692 for(
MInt i = 0; i < noDifferentPart; i++) {
693 particleDiameters[i] = Context::getSolverProperty<MFloat>(
"particleDiameters", solverId(), AT_, i);
696 particleDiameters =
new MFloat[noDifferentPart];
697 copy(&particleDiametersDefaults[0], &particleDiametersDefaults[9], particleDiameters);
706 MIntScratchSpace particleInitQuotasOfDiameters(noDifferentPart, AT_,
"particleInitQuotasOfDiameters");
710 if(count == noDifferentPart) {
711 for(
MInt i = 0; i < count; i++) {
712 particleInitQuotasOfDiameters[i] =
713 Context::getSolverProperty<MInt>(
"particleInitRatesOfDiameters", solverId(), AT_, i);
721 stringstream errorMessage;
722 errorMessage <<
" LPT::initialize() with m_particleInitializationMethod"
723 <<
" = -1 : Try to initialize particles with " << noDifferentPart
724 <<
" different diameters (property particleDiameters), but found " << count
725 <<
" different rates (property particleInitRatesOfDiameters) " << endl;
726 mTerm(1, AT_, errorMessage.str());
730 for(
MInt i = 0; i < noDifferentPart; ++i) {
731 particleInitQuotasOfDiameters[i] = 1;
739 accumulate(&particleInitQuotasOfDiameters[0], &particleInitQuotasOfDiameters[0] + noDifferentPart, 0);
740 dia =
new MFloat[sumOfRates];
742 for(
MInt i = 0; i < noDifferentPart; ++i) {
743 for(
MInt j = 0; j < particleInitQuotasOfDiameters[i]; ++j) {
744 dia[counter] = particleDiameters[i];
748 shuffle(dia, &dia[sumOfRates], randomInit());
750 calculateTerminalVelocities();
754 MFloat* ellipsoidSemiMinorAxis =
nullptr;
755 MFloat* ellipsoidAspectRatios =
nullptr;
756 const MInt noEllipsoidCatDefault = 6;
757 array<MFloat, noEllipsoidCatDefault> ellipsoidSemiMinorAxisDefaults = {
758 0.000031748, 0.000039850263, 0.00002773445, 0.000034668, 0.0000259842, 0.000031498026};
759 array<MFloat, noEllipsoidCatDefault> ellipsoidsAspectRatioDefaults = {2.0, 2.0, 3.0, 3.0, 4.0, 4.0};
761 particleEllipsoidInitNoPartperCell = F0;
762 particleEllipsoidInitNoPartperCell = Context::getSolverProperty<MFloat>(
763 "particleEllipsoidInitNoPartperCell", solverId(), AT_, &particleEllipsoidInitNoPartperCell);
765 if(particleEllipsoidInitNoPartperCell > 0) {
766 m_log <<
"LPT.cpp:initialCondition(): Initializing " << particleEllipsoidInitNoPartperCell
767 <<
" ellipsoids per cell." << endl;
773 "Error the properties particleEllipsoidSemiMinorAxis and particleEllipsoidAspectRatio "
774 " do not have the same count!");
776 ellipsoidSemiMinorAxis =
new MFloat[noEllipsoidCat];
777 ellipsoidAspectRatios =
new MFloat[noEllipsoidCat];
778 for(
MInt i = 0; i < noEllipsoidCat; i++) {
779 ellipsoidSemiMinorAxis[i] =
780 Context::getSolverProperty<MFloat>(
"particleEllipsoidSemiMinorAxis", solverId(), AT_, i);
781 ellipsoidAspectRatios[i] =
782 Context::getSolverProperty<MFloat>(
"particleEllipsoidAspectRatio", solverId(), AT_, i);
785 noEllipsoidCat = noEllipsoidCatDefault;
786 ellipsoidSemiMinorAxis =
new MFloat[noEllipsoidCat];
787 ellipsoidAspectRatios =
new MFloat[noEllipsoidCat];
788 copy(&ellipsoidSemiMinorAxisDefaults[0], &ellipsoidSemiMinorAxisDefaults[noEllipsoidCat - 1],
789 ellipsoidSemiMinorAxis);
790 copy(&ellipsoidsAspectRatioDefaults[0], &ellipsoidsAspectRatioDefaults[noEllipsoidCat - 1],
791 ellipsoidAspectRatios);
794 MIntScratchSpace ellipsoidInitQuotasPerCat(noEllipsoidCat, AT_,
"particleInitQuotasOfDiameters");
798 if(count == noEllipsoidCat) {
799 for(
MInt i = 0; i < count; i++) {
800 ellipsoidInitQuotasPerCat[i] =
801 Context::getSolverProperty<MInt>(
"particleEllipsoidInitRatesPerCat", solverId(), AT_, i);
804 stringstream errorMessage;
805 errorMessage <<
" LPT::initialize() with m_particleInitializationMethod"
806 <<
" = -1 : Try to initialize particles with " << noEllipsoidCat
807 <<
" different ellipsoidal categories, but found " << count
808 <<
" different rates (property particleEllipsoidInitRatesPerCat) " << endl;
809 mTerm(1, AT_, errorMessage.str());
812 for(
MInt i = 0; i < noEllipsoidCat; ++i) {
813 ellipsoidInitQuotasPerCat[i] = 1;
817 for(
MInt i = 0; i < noEllipsoidCat; ++i) {
818 for(
MInt j = 0; j < ellipsoidInitQuotasPerCat[i]; ++j) {
819 ellipsoidsInitVector.emplace_back(ellipsoidSemiMinorAxis[i], ellipsoidAspectRatios[i]);
822 shuffle(ellipsoidsInitVector.begin(), ellipsoidsInitVector.end(), randomInit());
824 delete[] ellipsoidSemiMinorAxis;
825 delete[] ellipsoidAspectRatios;
828 if(particleEllipsoidInitNoPartperCell > 0 || particleInitNoPartperCell > 0) {
837 m_geometry->getBoundingBox(&particleInitOffsets[0]);
838 for(
MInt i = 0; i < nDim * 2; ++i) {
839 particleInitOffsets[i] =
840 Context::getSolverProperty<MFloat>(
"particleInitOffsets", solverId(), AT_, &particleInitOffsets[i], i);
845 if(m_ellipsoids) sayer = domainId() % noEllipsoidCat;
846 MInt noParticleCells = 0;
847 MBool randomLocWthCell =
true;
856 Context::getSolverProperty<MBool>(
"particleRandLocWthCell", solverId(), AT_, &randomLocWthCell);
865 array<MFloat, nDim> particleInitVelocity{};
866 for(
MInt i = 0; i < nDim; ++i) {
867 particleInitVelocity.at(i) = Context::getSolverProperty<MFloat>(
"particleInitVelocity", solverId(), AT_,
868 &particleInitVelocity.at(i), i);
873 if(!c_isLeafCell(cellId))
continue;
875 MBool hasNoNeighbor =
false;
876 for(
MInt dir = 0; dir < m_noDirs; dir++) {
877 if(!c_hasNeighbor(cellId,
m_revDir[dir])
878 && (c_parentId(cellId) == -1 || !c_hasNeighbor(c_parentId(cellId),
m_revDir[dir]))) {
879 hasNoNeighbor =
true;
883 if(hasNoNeighbor)
continue;
886 for(
MInt direction = 0; direction < nDim; ++direction) {
887 if(c_coordinate(cellId, direction) < particleInitOffsets[direction]
888 || c_coordinate(cellId, direction) > particleInitOffsets[nDim + direction]) {
892 if(!inside)
continue;
895 while((m_partList.size() /
MFloat(noParticleCells)) < particleInitNoPartperCell) {
896 addParticle(cellId, dia[teller], m_material->densityRatio(),
static_cast<MInt>(randomLocWthCell));
898 m_partList.back().m_velocity = particleInitVelocity;
901 teller = (teller + 1) % sumOfRates;
903 shuffle(dia, &dia[sumOfRates], randomInit());
909 while((m_partListEllipsoid.size() /
MFloat(noParticleCells)) < particleEllipsoidInitNoPartperCell) {
910 addEllipsoid(cellId, ellipsoidsInitVector[sayer].first, ellipsoidsInitVector[sayer].second,
911 m_material->densityRatio(),
static_cast<MInt>(randomLocWthCell));
913 m_partListEllipsoid.back().m_velocity = particleInitVelocity;
915 sayer = (sayer + 1) % ellipsoidsInitVector.size();
917 shuffle(ellipsoidsInitVector.begin(), ellipsoidsInitVector.end(), randomInit());
924 cerr << domainId() <<
": Particles initialized: " << noParticles << endl;
925 exchangeOffset(noParticles);
928 delete[] particleDiameters;
940 inflowDir = Context::getSolverProperty<MInt>(
"particleInflowDir", solverId(), AT_, &inflowDir);
943 if(!c_isLeafCell(cellId))
continue;
944 if(!c_hasNeighbor(cellId, revDir)
945 && (c_parentId(cellId) == -1 || !c_hasNeighbor(c_parentId(cellId), revDir))) {
946 addParticle(cellId, diam, m_material->densityRatio(), 0);
950 for(
MInt t = 0; t < nDim; t++) {
951 m_partList.back().m_oldVel.at(t) = 0;
952 m_partList.back().m_velocity.at(t) = 0;
957 cerr <<
"domainId " << domainId() <<
" created " << noParticles << endl;
959 if(m_ellipsoids)
mTerm(1, AT_,
"Init mode 1 not yet implemented for ellipsoidal particles!");
961 exchangeOffset(noParticles);
969 array<MFloat, nDim> thisCoord{};
970 array<MFloat, nDim> thisVel{};
971 MFloat thisDiameter = NAN;
974 readFile.open(
"part.txt", ios_base::in);
976 mTerm(1, AT_,
"Error reading particle input file 'part.txt'.");
985 readFile >> thisDiameter;
986 if((readFile.rdstate() & ifstream::eofbit) != 0) {
990 for(
MInt i = 0; i < nDim; i++) {
991 readFile >> thisCoord.at(i);
994 if(m_initializationMethod == 4) {
995 for(
MInt i = 0; i < nDim; i++) {
996 readFile >> thisVel.at(i);
1001 MInt thisCellId = grid().findContainingLeafCell(&thisCoord[0]);
1005 if(noDomains() > 1) {
1006 MInt spawnDomain = domainId();
1007 MInt minDomain = -1;
1008 if(thisCellId == -1) {
1009 spawnDomain = std::numeric_limits<MInt>::max();
1013 MPI_Allreduce(&spawnDomain, &minDomain, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"spawnDomain",
"minDomain");
1015 if(minDomain != domainId()) {
1021 if(thisCellId == -1) {
1022 if(noDomains() == 1) {
1023 m_log <<
"Particle initialisation file contains particles outside of the geometry." << endl;
1028 addParticle(thisCellId, thisDiameter, density);
1029 for(
MInt i = 0; i < nDim; i++) {
1030 m_partList.back().m_position.at(i) = thisCoord.at(i);
1032 for(
MInt i = 0; i < nDim; i++) {
1033 m_partList.back().m_velocity.at(i) = thisVel.at(i);
1035 for(
MInt i = 0; i < nDim; i++) {
1036 m_partList.back().m_accel.at(i) = 0.0;
1038 for(
MInt i = 0; i < nDim; i++) {
1039 m_partList.back().m_oldPos.at(i) = thisCoord.at(i);
1041 for(
MInt i = 0; i < nDim; i++) {
1042 m_partList.back().m_oldVel.at(i) = thisVel.at(i);
1048 m_log << noParticles <<
" particles from file introduced in this solver." << endl;
1051 MInt noEllipsoids = 0;
1053 array<MFloat, nDim> thisCoordE{};
1054 array<MFloat, nDim> thisVelE{};
1055 array<MFloat, nDim> thisOrientE{};
1056 MFloat thisAspectRatio = NAN;
1057 MFloat thisSemiMinorAxis = NAN;
1060 readFile.
open(
"ellipsoids.txt", ios_base::in);
1062 mTerm(1, AT_,
"Error reading ellipsoidal particle input file 'ellipsoids.txt'.");
1073 readFile >> thisAspectRatio;
1074 readFile >> thisSemiMinorAxis;
1075 if((readFile.rdstate() & ifstream::eofbit) != 0) {
1078 readFile >> densityE;
1079 for(
MInt i = 0; i < nDim; i++) {
1080 readFile >> thisCoordE.at(i);
1083 if(m_initializationMethod >= 3) {
1084 for(
MInt i = 0; i < nDim; i++) {
1085 readFile >> thisOrientE.at(i);
1086 thisOrientE.at(i) = thisOrientE.at(i) * PI / 180.0;
1089 if(m_initializationMethod == 4) {
1090 for(
MInt i = 0; i < nDim; i++) {
1091 readFile >> thisVelE.at(i);
1096 MInt thisCellId = grid().findContainingLeafCell(&thisCoordE[0]);
1100 if(noDomains() > 1) {
1101 MInt spawnDomain = domainId();
1102 MInt minDomain = -1;
1103 if(thisCellId == -1) {
1104 spawnDomain = std::numeric_limits<MInt>::max();
1108 MPI_Allreduce(&spawnDomain, &minDomain, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"spawnDomain",
"minDomain");
1110 if(minDomain != domainId()) {
1116 if(thisCellId == -1) {
1117 if(noDomains() == 1) {
1118 m_log <<
"Ellipsoidal particle initialisation file contains particles outside of the geometry." << endl;
1123 addEllipsoid(thisCellId, thisSemiMinorAxis, thisAspectRatio, densityE);
1124 for(
MInt i = 0; i < nDim; i++) {
1125 m_partListEllipsoid.back().m_position.at(i) = thisCoordE.at(i);
1126 m_partListEllipsoid.back().m_oldPos.at(i) = thisCoordE.at(i);
1127 m_partListEllipsoid.back().m_velocity.at(i) = thisVelE.at(i);
1128 m_partListEllipsoid.back().m_oldVel.at(i) = thisVelE.at(i);
1129 m_partListEllipsoid.back().m_accel.at(i) = F0;
1130 m_partListEllipsoid.back().m_oldAccel.at(i) = F0;
1131 m_partListEllipsoid.back().m_angularVel.at(i) = F0;
1132 m_partListEllipsoid.back().m_oldAngularVel.at(i) = F0;
1133 m_partListEllipsoid.back().m_angularAccel.at(i) = F0;
1134 m_partListEllipsoid.back().m_oldAngularAccel.at(i) = F0;
1136 if(m_initializationMethod >= 3) {
1140 m_partListEllipsoid.back().m_quaternion.at(0) = F1;
1141 m_partListEllipsoid.back().m_quaternion.at(1) = F0;
1142 m_partListEllipsoid.back().m_quaternion.at(2) = F0;
1143 m_partListEllipsoid.back().m_quaternion.at(3) = F0;
1145 for(
MInt i = 0; i < 4; i++) {
1146 m_partListEllipsoid.back().m_oldQuaternion.at(i) = m_partListEllipsoid.back().m_quaternion.at(i);
1154 m_log << noEllipsoids <<
" ellipsoidal particles from file introduced in this solver." << endl;
1156 exchangeOffset(noParticles);
1161 ASSERT(m_activePrimaryBUp,
"");
1166 "LPT::initialize() switch variable 'm_particleInitializationMethod' not "
1167 "matching any case");
1182 if(m_spawnParticles && !m_restart) {
1183 m_particleResiduum = 1.0;
1186 if(!isActive())
return;
1188 if(domainId() == 0) {
1189 cerr <<
"Finding cartesian neighbors for LPT-solver!" << endl;
1191 grid().findCartesianNghbIds();
1194 if(domainId() == 0) {
1199 if(domainId() == 0) {
1200 cerr <<
"Writing initial particle file" << endl;
1217 ASSERT(m_freeIndices.empty(),
"");
1218 m_adaptationLevel = maxUniformRefinementLevel();
1219 m_forceAdaptation =
false;
1226 receiveVelocitySlopes();
1230 countParticlesInCells();
1235 if(m_spawnCellId > -1) {
1236 a_noParticlesInCell(m_spawnCellId) += 20;
1237 if(m_ellipsoids) a_noEllipsoidsInCell(m_spawnCellId) += 20;
1240 this->exchangeData(&a_noParticlesInCell(0), 1);
1241 if(m_ellipsoids) this->exchangeData(&a_noEllipsoidsInCell(0), 1);
1245 if(!m_nonBlockingComm) {
1246 exchangeSourceTerms();
1249 receiveSourceTerms();
1253 resetSourceTerms(noInternalCells());
1259 for(
MInt cellId = 0; cellId < a_noCells(); cellId++) {
1260 a_bndryCellId(cellId) = -1;
1262 for(
MInt id = 0;
id < m_bndryCells->size();
id++) {
1263 const MInt cellId = m_bndryCells->a[
id].m_cellId;
1264 a_bndryCellId(cellId) = 1;
1265 MInt parentId = c_parentId(cellId);
1266 while(parentId > -1 && parentId < a_noCells()) {
1267 a_bndryCellId(parentId) = 1;
1268 parentId = c_parentId(parentId);
1271 this->exchangeData(&a_bndryCellId(0), 1);
1274 if(m_wallCollisions) {
1275 m_bndryCells->resetSize(0);
1276 m_noOuterBndryCells = 0;
1286 std::vector<MFloat>& sensorWeight,
1287 std::vector<std::bitset<64>>& sensorCellFlag,
1288 std::vector<MInt>& sensorSolverId) {
1291 MInt noSensors = this->m_noInitialSensors;
1296 const MInt sensorOffset = (signed)sensors.size();
1297 ASSERT(sensorOffset == 0 || grid().raw().treeb().
noSolvers() > 1,
"");
1298 sensors.resize(sensorOffset + noSensors, vector<MFloat>(grid().raw().m_noInternalCells, F0));
1299 sensorWeight.resize(sensorOffset + noSensors, -1);
1300 sensorCellFlag.resize(grid().raw().m_noInternalCells, sensorOffset + noSensors);
1301 sensorSolverId.resize(sensorOffset + noSensors, solverId());
1304 if(domainId() == 0) {
1305 cerr <<
"Setting " << noSensors <<
" sensors for LPT-Solver adaptation." << endl;
1308 for(
MInt sen = 0; sen < noSensors; sen++) {
1309 sensorWeight[sensorOffset + sen] = this->m_sensorWeight[sen];
1312 ASSERT(m_freeIndices.empty(),
"");
1313 m_freeIndices.clear();
1315 if(!isActive())
return;
1317 for(
MInt sen = 0; sen < noSensors; sen++) {
1318 (this->*(this->m_sensorFnPtr[sen]))(sensors, sensorCellFlag, sensorWeight, sensorOffset, sen);
1350 std::vector<MFloat>& sensorWeight,
MInt sensorOffset,
MInt sen) {
1351 static constexpr MFloat particleLimit = 1;
1353 std::function<
MFloat(
const MInt)> noPart = [&](
const MInt cellId) {
1354 return static_cast<MFloat>(a_noParticlesInCell(cellId)) +
static_cast<MFloat>(a_noEllipsoidsInCell(cellId));
1357 this->sensorLimit(sensors, sensorCellFlag, sensorWeight, sensorOffset, sen, noPart, particleLimit, &m_bandWidth[0],
1369 std::vector<MFloat>& sensorWeight,
MInt sensorOffset,
MInt sen) {
1373 bandWidth[maxRefinementLevel() - 1] = range1;
1374 for(
MInt i = maxRefinementLevel() - 2; i >= 0; i--) {
1375 bandWidth[i] = (bandWidth[i + 1] / 2) + 1 + range2;
1378 static constexpr MFloat limit = 1;
1379 std::function<
MFloat(
const MInt)> isBndry = [&](
const MInt cellId) {
1380 return static_cast<MFloat>(a_bndryCellId(cellId));
1383 this->sensorLimit(sensors, sensorCellFlag, sensorWeight, sensorOffset, sen, isBndry, limit, &bandWidth[0],
true,
1397 this->compactCells();
1399 m_freeIndices.clear();
1401 grid().updateOther();
1403 updateDomainInfo(grid().domainId(), grid().noDomains(), grid().mpiComm(), AT_);
1404 this->checkNoHaloLayers();
1406 if(!isActive())
return;
1408 grid().updateLeafCellExchange();
1413 if(m_spawnCellId > -1) {
1414 a_noParticlesInCell(m_spawnCellId) += 1;
1415 if(m_ellipsoids) a_noEllipsoidsInCell(m_spawnCellId) += 1;
1419 this->exchangeData(&a_noParticlesInCell(0), 1);
1420 this->exchangeData(&a_bndryCellId(0), 1);
1421 if(m_ellipsoids) this->exchangeData(&a_noEllipsoidsInCell(0), 1);
1423 resetSourceTerms(noInternalCells());
1425 m_adaptationLevel++;
1437 if(!isActive())
return;
1439 if(domainId() == 0) {
1440 cerr <<
"Finding cartesian neighbors for LPT-solver!" << endl;
1442 grid().findCartesianNghbIds();
1444 updateExchangeCells();
1448 m_cellToNghbrHood.clear();
1449 m_cellToNghbrHoodInterpolation.clear();
1454 for(
MInt i = 0; i < a_noParticles(); i++) {
1455 m_partList[i].m_neighborList.clear();
1456 const MInt cellId = grid().findContainingLeafCell(&m_partList[i].m_position[0]);
1457 ASSERT(cellId > -1,
"");
1458 m_partList[i].m_cellId = cellId;
1459 m_partList[i].m_oldCellId = cellId;
1460 ASSERT(m_partList[i].m_cellId > 0,
"ERROR: Illegal cellId after adaptation!");
1461 ASSERT(c_isLeafCell(cellId) && c_level(cellId) == maxRefinementLevel(),
"");
1463 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
1464 m_partListEllipsoid[i].m_neighborList.clear();
1465 const MInt cellId = grid().findContainingLeafCell(&m_partListEllipsoid[i].m_position[0]);
1466 ASSERT(cellId > -1,
"");
1467 m_partListEllipsoid[i].m_cellId = cellId;
1468 m_partListEllipsoid[i].m_oldCellId = cellId;
1469 ASSERT(m_partListEllipsoid[i].m_cellId > 0,
"ERROR: Illegal cellId after adaptation!");
1470 ASSERT(c_isLeafCell(cellId) && c_level(cellId) == maxRefinementLevel(),
"");
1473 countParticlesInCells();
1476 updateFluidFraction();
1482 for(
MInt cellId = 0; cellId < a_noCells(); cellId++) {
1483 a_bndryCellId(cellId) = -1;
1487 MInt noCells = a_noCells();
1488 MInt minCells = noCells;
1489 MInt maxCells = noCells;
1490 MInt noPart = a_noParticles();
1491 MInt minParts = noPart;
1492 MInt maxParts = noPart;
1493 MInt noEllips = a_noEllipsoidalParticles();
1494 MInt minEllips = noEllips;
1495 MInt maxEllips = noEllips;
1497 MPI_Allreduce(MPI_IN_PLACE, &noCells, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noCells");
1498 MPI_Allreduce(MPI_IN_PLACE, &noPart, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noPart");
1499 MPI_Allreduce(MPI_IN_PLACE, &maxCells, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"maxCells");
1500 MPI_Allreduce(MPI_IN_PLACE, &minCells, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"minCells");
1501 MPI_Allreduce(MPI_IN_PLACE, &maxParts, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"maxParts");
1502 MPI_Allreduce(MPI_IN_PLACE, &minParts, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"minParts");
1504 MPI_Allreduce(MPI_IN_PLACE, &noEllips, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noEllips");
1505 MPI_Allreduce(MPI_IN_PLACE, &maxEllips, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"maxEllips");
1506 MPI_Allreduce(MPI_IN_PLACE, &minEllips, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"minEllips");
1509 if(domainId() == 0) {
1510 cerr <<
"LPT cell-statistics : " << minCells <<
" - " << maxCells <<
" avg " << noCells / noDomains() << endl;
1511 cerr <<
"LPT particle-statistics : " << minParts <<
" - " << maxParts <<
" avg " << noPart / noDomains() << endl;
1513 cerr <<
"LPT ellipsoid-statistics : " << minEllips <<
" - " << maxEllips <<
" avg " << noEllips / noDomains()
1520 grid().resizeGridMap(m_cells.size());
1529 grid().swapGridIds(cellId0, cellId1);
1539 const MInt solverCellId = grid().tree().grid2solver(gridCellId);
1541 ASSERT(solverCellId > -1 && solverCellId < m_cells.size(),
"solverCellId is: " << solverCellId);
1543 MInt noNewChildren = 0;
1545 for(
MInt child = 0; child < grid().m_maxNoChilds; child++) {
1546 const MInt gridChildId = grid().raw().treeb().child(gridCellId, child);
1547 if(gridChildId == -1)
continue;
1550 if(!grid().raw().a_hasProperty(gridChildId, Cell::WasNewlyCreated)
1551 && grid().raw().a_hasProperty(gridCellId, Cell::IsPartLvlAncestor)) {
1556 if(!isActive()) ASSERT(grid().raw().a_isHalo(gridChildId),
"");
1558 if(!grid().solverFlag(gridChildId, solverId()))
continue;
1560 const MInt solverChildId = this->createCellId(gridChildId);
1563 for(
MInt n = 0; n < nDim; n++) {
1564 a_fluidVelocity(solverChildId, n) = 0.0;
1566 a_fluidDensity(solverChildId) = 0.0;
1567 a_fluidPressure(solverChildId) = 0.0;
1568 a_fluidTemperature(solverChildId) = 0.0;
1571 a_fluidSpecies(solverChildId) = 0.0;
1573 a_isValidCell(solverChildId) =
true;
1575 a_bndryCellId(solverChildId) = a_bndryCellId(solverCellId);
1577 for(
MInt varId = 0; varId < nDim; varId++) {
1578 for(
MInt dir = 0; dir < nDim; dir++) {
1579 a_velocitySlope(solverChildId, varId, dir) = F0;
1585 if(noNewChildren == 0)
return;
1587 const MInt noParticles = a_noParticlesInCell(solverCellId);
1588 const MInt noEllipsoids = a_noEllipsoidsInCell(solverCellId);
1593 const MInt noParticlesEach = noParticles / noNewChildren;
1594 const MInt noEllipsoidsEach = noEllipsoids / noNewChildren;
1596 for(
MInt child = 0; child < c_noChildren(solverCellId) -1; child++) {
1597 const MInt childId = c_childId(solverCellId,child);
1598 if(childId < 0)
continue;
1599 a_noParticlesInCell(childId) = noParticles > 0 ? noParticlesEach : 0;
1600 a_noEllipsoidsInCell(childId) = noEllipsoids > 0 ? noEllipsoidsEach : 0;
1603 if(m_momentumCoupling) {
1604 for(
MInt i = 0; i < nDim; i++) {
1605 a_momentumFlux(childId, i) = a_momentumFlux(solverCellId, i) / noNewChildren;
1607 a_workFlux(childId) = a_workFlux(solverCellId) / noNewChildren;
1609 if(m_heatCoupling) {
1610 a_heatFlux(childId) = a_heatFlux(solverCellId) / noNewChildren;
1612 if(m_massCoupling) {
1613 a_massFlux(childId) = a_massFlux(solverCellId) / noNewChildren;
1616 const MInt remainingParticles = noParticles - ((c_noChildren(solverCellId) - 1) * noParticlesEach);
1617 const MInt remainingEllipsoids = noEllipsoids - ((c_noChildren(solverCellId) - 1) * noEllipsoidsEach);
1619 const MInt remainingChild = c_childId(solverCellId, c_noChildren(solverCellId) - 1);
1620 if(remainingChild > 0) {
1621 a_noParticlesInCell(remainingChild) = noParticles > 0 ? remainingParticles : 0;
1622 a_noEllipsoidsInCell(remainingChild) = noEllipsoids > 0 ? remainingEllipsoids : 0;
1624 if(m_momentumCoupling) {
1625 for(
MInt i = 0; i < nDim; i++) {
1626 a_momentumFlux(remainingChild, i) = a_momentumFlux(solverCellId, i) / noNewChildren;
1628 a_workFlux(remainingChild) = a_workFlux(solverCellId) / noNewChildren;
1630 if(m_heatCoupling) {
1631 a_heatFlux(remainingChild) = a_heatFlux(solverCellId) / noNewChildren;
1633 if(m_massCoupling) {
1634 a_massFlux(remainingChild) = a_massFlux(solverCellId) / noNewChildren;
1639 if(noNewChildren > 0 ){
1640 a_noParticlesInCell(solverCellId) = 0;
1641 a_noEllipsoidsInCell(solverCellId) = 0;
1643 if(m_momentumCoupling) {
1644 for(
MInt i = 0; i < nDim; i++) {
1645 a_momentumFlux(solverCellId, i) = 0;
1647 a_workFlux(solverCellId) = 0;
1649 if(m_heatCoupling) {
1650 a_heatFlux(solverCellId) = 0;
1652 if(m_massCoupling) {
1653 a_massFlux(solverCellId) = 0;
1666 ASSERT(grid().raw().a_isHalo(gridCellId),
"");
1669 const MInt solverCellId = grid().tree().grid2solver(gridCellId);
1671 ASSERT(solverCellId > -1 && solverCellId < m_cells.size(),
"solverCellId is: " << solverCellId);
1674 a_noParticlesInCell(solverCellId) = 0;
1675 a_noEllipsoidsInCell(solverCellId) = 0;
1676 if(m_momentumCoupling) {
1677 for(
MInt i = 0; i < nDim; i++) {
1678 a_momentumFlux(solverCellId, i) = 0;
1680 a_workFlux(solverCellId) = 0;
1682 if(m_heatCoupling) {
1683 a_heatFlux(solverCellId) = 0;
1685 if(m_massCoupling) {
1686 a_massFlux(solverCellId) = 0;
1689 for(
MInt c = 0; c < grid().m_maxNoChilds; c++) {
1690 const MInt childId = c_childId(solverCellId, c);
1691 if(childId < 0)
continue;
1694 a_noParticlesInCell(solverCellId) += a_noParticlesInCell(childId);
1695 a_noEllipsoidsInCell(solverCellId) += a_noEllipsoidsInCell(childId);
1696 if(m_momentumCoupling) {
1697 for(
MInt i = 0; i < nDim; i++) {
1698 a_momentumFlux(solverCellId, i) += a_momentumFlux(childId, i);
1700 a_workFlux(solverCellId) += a_workFlux(childId);
1702 if(m_heatCoupling) {
1703 a_heatFlux(solverCellId) += a_heatFlux(childId);
1705 if(m_massCoupling) {
1706 a_massFlux(solverCellId) += a_massFlux(childId);
1709 this->removeCellId(childId);
1720 ASSERT(grid().raw().a_isHalo(gridCellId),
"");
1723 const MInt solverCellId = grid().tree().grid2solver(gridCellId);
1725 ASSERT(gridCellId > -1 && gridCellId < grid().raw().treeb().size() && solverCellId > -1
1726 && solverCellId < m_cells.size() && grid().tree().solver2grid(solverCellId) == gridCellId,
1728 ASSERT(c_noChildren(solverCellId) == 0,
"");
1729 this->removeCellId(solverCellId);
1744 std::ignore = gridCellId;
1745 std::ignore = coords[0];
1746 std::ignore = level;
1783 std::swap(m_cells.properties(cellId1), m_cells.properties(cellId0));
1785 std::swap(m_cells.volumeFraction(cellId1), m_cells.volumeFraction(cellId0));
1786 std::swap(m_cells.noParticles(cellId1), m_cells.noParticles(cellId0));
1787 std::swap(m_cells.noEllipsoids(cellId1), m_cells.noEllipsoids(cellId0));
1789 for(
MInt n = 0; n < PV.noVars(); n++) {
1790 std::swap(m_cells.fluidVariable(cellId1, n), m_cells.fluidVariable(cellId0, n));
1794 std::swap(m_cells.fluidSpecies(cellId1), m_cells.fluidSpecies(cellId0));
1797 if(m_massCoupling) {
1798 std::swap(m_cells.massFlux(cellId1), m_cells.massFlux(cellId0));
1800 if(m_heatCoupling) {
1801 std::swap(m_cells.heatFlux(cellId1), m_cells.heatFlux(cellId0));
1803 if(m_momentumCoupling) {
1804 for(
MInt i = 0; i < nDim; i++) {
1805 std::swap(m_cells.momentumFlux(cellId1, i), m_cells.momentumFlux(cellId0, i));
1807 std::swap(m_cells.workFlux(cellId1), m_cells.workFlux(cellId0));
1809 std::swap(m_cells.bndryCellId(cellId1), m_cells.bndryCellId(cellId0));
1813 for(
MInt varId = 0; varId < nDim; varId++) {
1814 for(
MInt dim = 0; dim < nDim; dim++) {
1815 std::swap(m_cells.velocitySlope(cellId1, varId, dim), m_cells.velocitySlope(cellId0, varId, dim));
1829 ASSERT(isActive(),
"solver is not active");
1831 countParticlesInCells();
1833 const MInt noCellsGrid = grid().raw().treeb().size();
1834 const MInt offset = noCellsGrid * solverId();
1836 for(
MInt cellId = 0; cellId < noInternalCells(); cellId++) {
1837 const MInt gridCellId = grid().tree().solver2grid(cellId);
1838 const MInt id = gridCellId + offset;
1839 solverCellWeight[
id] = m_weightBaseCell * m_weightMulitSolverFactor;
1840 if(!c_isLeafCell(cellId))
continue;
1841 solverCellWeight[
id] = m_weightLeafCell * m_weightMulitSolverFactor;
1842 if(a_noParticlesInCell(cellId) == 0 && a_noEllipsoidsInCell(cellId) == 0)
continue;
1843 solverCellWeight[
id] =
1844 m_weightMulitSolverFactor
1845 * (m_weightParticleCell + (a_noParticlesInCell(cellId) + a_noEllipsoidsInCell(cellId)) * m_weightParticle);
1849 if(m_engineSetup && m_sprayModel !=
nullptr && m_spawnCellId > -1) {
1850 const MInt gridCellId = grid().tree().solver2grid(m_spawnCellId);
1851 const MInt id = gridCellId + offset;
1852 solverCellWeight[
id] += m_weightSpawnCell * m_weightMulitSolverFactor;
1862 const MBool NotUsed(allTimings)) {
1865 const MString namePrefix =
"b" + std::to_string(solverId()) +
"_";
1867 const MFloat load = returnLoadRecord();
1868 const MFloat idle = returnIdleRecord();
1870 solverTimings.emplace_back(namePrefix +
"loadLptSolver", load);
1871 solverTimings.emplace_back(namePrefix +
"idleLptSolver", idle);
1873#ifdef MAIA_TIMER_FUNCTION
1874 solverTimings.emplace_back(namePrefix +
"timeIntegration", RETURN_TIMER_TIME(m_timers[Timers::TimeInt]));
1875 solverTimings.emplace_back(namePrefix +
"motion", RETURN_TIMER_TIME(m_timers[Timers::Motion]));
1876 solverTimings.emplace_back(namePrefix +
"energy", RETURN_TIMER_TIME(m_timers[Timers::Energy]));
1877 solverTimings.emplace_back(namePrefix +
"injection", RETURN_TIMER_TIME(m_timers[Timers::Injection]));
1878 solverTimings.emplace_back(namePrefix +
"secBreakUp", RETURN_TIMER_TIME(m_timers[Timers::Breakup]));
1879 solverTimings.emplace_back(namePrefix +
"wall", RETURN_TIMER_TIME(m_timers[Timers::Wall]));
1880 solverTimings.emplace_back(namePrefix +
"source", RETURN_TIMER_TIME(m_timers[Timers::SourceTerms]));
1881 solverTimings.emplace_back(namePrefix +
"regridExc", RETURN_TIMER_TIME(m_timers[Timers::Exchange2]));
1882 solverTimings.emplace_back(namePrefix +
"exchange", RETURN_TIMER_TIME(m_timers[Timers::Exchange]));
1892 names[0] =
"lpt_cell";
1896 names[1] =
"lpt_particleCell";
1900 names[2] =
"lpt_particle";
1903 if(m_weightSourceCells) {
1905 names[3] =
"lpt_source";
1909 if(noLoadTypes() != count) {
1910 TERMM(1,
"Count does not match noLoadTypes.");
1920 if(m_limitWeights) {
1921 weights[0] =
mMax(weights[0], 0.01 * weights[1]);
1943 for(
MInt type = 0; type < noLoadTypes(); type++) {
1944 loadQuantities[type] = 0;
1947 MInt noLeafCells = 0;
1948 for(
MInt cellId = 0; cellId < grid().noInternalCells(); cellId++) {
1949 if(c_isLeafCell(cellId) && a_isValidCell(cellId)) {
1954 loadQuantities[0] = noLeafCells;
1956 MInt noCellsWithParticles = 0;
1957 MInt noCellsWithSources = 0;
1959 for(
MInt cellId = 0; cellId < grid().noInternalCells(); cellId++) {
1960 if(a_noParticlesInCell(cellId) == 0 && a_noEllipsoidsInCell(cellId) == 0) {
1961 if(m_massCoupling && a_massFlux(cellId) < 0.0) {
1962 noCellsWithSources++;
1966 noCellsWithParticles++;
1968 loadQuantities[1] = noCellsWithParticles;
1969 loadQuantities[2] = a_noSphericalParticles() + a_noEllipsoidalParticles();
1972 if(m_weightSourceCells) {
1973 loadQuantities[3] = noCellsWithSources;
1990 ASSERT(isActive(),
"solver is not active");
1993 const MInt cellId = grid().tree().grid2solver(gridCellId);
1998 if(cellId < 0 || cellId >= grid().noInternalCells()) {
1999 TERMM(1,
"The given cell id is invalid.");
2004 if(c_isLeafCell(cellId) && a_isValidCell(cellId)) {
2005 cellLoad = weights[0];
2008 if(a_noParticlesInCell(cellId) > 0 || a_noEllipsoidsInCell(cellId) > 0) {
2009 cellLoad = weights[1];
2010 cellLoad += a_noParticlesInCell(cellId) * weights[2];
2011 }
else if(m_weightSourceCells) {
2012 if(m_massCoupling && a_massFlux(cellId) < 0.0) {
2013 cellLoad = weights[3];
2017 if(m_sprayModel !=
nullptr && cellId == m_spawnCellId) {
2018 cellLoad += 5 * weights[1];
2034 const MString namePrefix =
"b" + std::to_string(solverId()) +
"_";
2036 const MInt noInternalCells = isActive() ? grid().noInternalCells() : 0;
2038 domainInfo.emplace_back(namePrefix +
"noLptCells", noInternalCells);
2041 MInt noCellsWithParticles = 0;
2042 MInt noLeafCells = 0;
2043 MInt noParticles = 0;
2046 noParticles = a_noParticles();
2047 for(
MInt cellId = 0; cellId < grid().noInternalCells(); cellId++) {
2048 if(a_noParticlesInCell(cellId) > 0 || a_noEllipsoidsInCell(cellId) > 0) {
2049 noCellsWithParticles++;
2051 if(c_isLeafCell(cellId) && a_isValidCell(cellId)) {
2057 domainInfo.emplace_back(namePrefix +
"noLptLeafCells", noLeafCells);
2058 domainInfo.emplace_back(namePrefix +
"noLptParticleCells", noCellsWithParticles);
2059 domainInfo.emplace_back(namePrefix +
"noLptParticles", noParticles);
2072 RECORD_TIMER_START(m_timers[Timers::PreTime]);
2080 m_time += m_timeStep;
2081 ASSERT(m_timeStep > MFloatEps,
"TS not set!");
2084 m_timeStepOld = m_timeStep;
2085 m_timeStepUpdated =
false;
2087 m_noSendParticles = 0;
2093 resetSourceTerms(0);
2096 for(
MInt i = 0; i < a_noParticles(); i++) {
2097 m_partList[i].m_creationTime = 0.0;
2099 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
2100 m_partListEllipsoid[i].m_creationTime = 0.0;
2103 RECORD_TIMER_STOP(m_timers[Timers::PreTime]);
2115 for(
MInt cellId = offset; cellId < a_noCells(); cellId++) {
2116 if(m_momentumCoupling) {
2117 for(
MInt i = 0; i < nDim; i++) {
2118 a_momentumFlux(cellId, i) = 0;
2120 a_workFlux(cellId) = 0;
2122 if(m_heatCoupling) {
2123 a_heatFlux(cellId) = 0;
2125 if(m_massCoupling) {
2126 a_massFlux(cellId) = 0;
2142 if((m_partList.empty() || m_partListEllipsoid.empty()) && !m_spawnParticles && !m_activePrimaryBUp
2149 if(m_skewnessTimeStep != 0 &&
globalTimeStep < m_skewnessTimeStep)
return true;
2151 RECORD_TIMER_START(m_timers[Timers::TimeInt]);
2155 if(m_sleepLPT > 0.0) {
2156 cerr0 <<
"Sleeping LPT-solver for " << m_sleepLPT <<
" milli-seconds!" << endl;
2157 std::this_thread::sleep_for(std::chrono::milliseconds(m_sleepLPT));
2161 cerr0 <<
"Skipping LPT execution!" << endl;
2166 MBool completed =
false;
2167 switch(m_noSolutionSteps) {
2169 completed = oneStageSolutionStep();
2173 completed = fiveStageSolutionStep();
2177 mTerm(1, AT_,
"SolutionStep method not implemented in LPT!");
2182 RECORD_TIMER_STOP(m_timers[Timers::TimeInt]);
2195 NEW_TIMER_GROUP_STATIC(t_part,
"particle timestep");
2196 NEW_TIMER_STATIC(partTS,
"particle timestep", t_part);
2197 NEW_SUB_TIMER_STATIC(spawn,
"spawn", partTS);
2198 NEW_SUB_TIMER_STATIC(motion,
"motion", partTS);
2199 NEW_SUB_TIMER_STATIC(transfer,
"transfer", partTS);
2200 NEW_SUB_TIMER_STATIC(evap,
"evaporation", partTS);
2201 NEW_SUB_TIMER_STATIC(couplingTerms,
"couplingTerms", partTS);
2202 NEW_SUB_TIMER_STATIC(collision1,
"wall-collision", partTS);
2203 NEW_SUB_TIMER_STATIC(primBU,
"primaryBreakUp", partTS);
2204 NEW_SUB_TIMER_STATIC(secBU,
"secondaryBreakUp", partTS);
2205 NEW_SUB_TIMER_STATIC(respawn,
"particle-respawn", partTS);
2206 NEW_SUB_TIMER_STATIC(deleteP,
"Remove-particles", partTS);
2208 RECORD_TIMER_START(partTS);
2212 switch(m_solutionStep) {
2215 RECORD_TIMER_START(primBU);
2220 if(m_activePrimaryBUp) {
2223 RECORD_TIMER_STOP(primBU);
2225 RECORD_TIMER_STOP(partTS);
2233 receiveVelocitySlopes();
2235 if(m_nonBlockingComm && m_activePrimaryBUp) {
2236 RECORD_TIMER_START(transfer);
2237 receiveParticles<true>();
2238 m_primaryExchange =
false;
2239 RECORD_TIMER_STOP(transfer);
2243 RECORD_TIMER_START(motion);
2245 RECORD_TIMER_STOP(motion);
2247 RECORD_TIMER_START(collision1);
2249 RECORD_TIMER_STOP(collision1);
2251 if(m_nonBlockingComm) {
2252 RECORD_TIMER_START(transfer);
2253 exchangeParticles(
false, 0);
2254 RECORD_TIMER_STOP(transfer);
2257 RECORD_TIMER_STOP(partTS);
2262 if(!m_nonBlockingComm) {
2264 RECORD_TIMER_START(transfer);
2265 exchangeParticles(
false, 0);
2266 RECORD_TIMER_STOP(transfer);
2269 RECORD_TIMER_START(transfer);
2270 receiveParticles<false>();
2271 RECORD_TIMER_STOP(transfer);
2275 RECORD_TIMER_START(evap);
2277 RECORD_TIMER_STOP(evap);
2279 if(m_nonBlockingComm) {
2280 RECORD_TIMER_START(couplingTerms);
2282 RECORD_TIMER_STOP(couplingTerms);
2285 if(this->m_adaptation && this->m_noSensors > 0) {
2286 checkRegridTrigger();
2290 RECORD_TIMER_STOP(partTS);
2296 if(!m_nonBlockingComm) {
2298 RECORD_TIMER_START(couplingTerms);
2300 RECORD_TIMER_STOP(couplingTerms);
2303 RECORD_TIMER_START(transfer);
2304 receiveSourceTerms();
2305 RECORD_TIMER_STOP(transfer);
2308 RECORD_TIMER_STOP(partTS);
2315 RECORD_TIMER_START(secBU);
2316 RECORD_TIMER_START(m_timers[Timers::Breakup]);
2317 if(m_activeSecondaryBUp) {
2318 m_sprayModel->secondaryBreakUp(m_timeStep);
2320 RECORD_TIMER_STOP(m_timers[Timers::Breakup]);
2321 RECORD_TIMER_STOP(secBU);
2324 RECORD_TIMER_START(transfer);
2325 if(m_collisions < 5 && m_collisions > 0) {
2326 exchangeParticles(
true, 0);
2328 RECORD_TIMER_STOP(transfer);
2330 RECORD_TIMER_START(spawn);
2331 if(m_spawnParticles) {
2334 RECORD_TIMER_STOP(spawn);
2336 RECORD_TIMER_START(deleteP);
2337 removeInvalidParticles(
false);
2338 RECORD_TIMER_STOP(deleteP);
2340 RECORD_TIMER_START(respawn);
2344 RECORD_TIMER_STOP(respawn);
2346 RECORD_TIMER_START(deleteP);
2347 updateFluidFraction();
2349 if(a_timeStepComputationInterval() > 0 &&
globalTimeStep % a_timeStepComputationInterval() == 0) {
2350 forceTimeStep(computeTimeStep());
2354 if(m_sprayModel !=
nullptr) {
2355 const MInt count = m_sprayModel->m_injDataSize;
2356 vector<MFloat> tmp(count + m_addInjDataCnt);
2357 for(
MInt i = 0; i < count; i++) {
2358 tmp[i] = m_sprayModel->m_injData[i];
2360 tmp[count] = m_sumEvapMass;
2361 tmp[count + 1] = m_sprayModel->m_noRTsecBreakUp;
2362 tmp[count + 2] = m_sprayModel->m_noKHsecBreakUp;
2363 tmp[count + 3] = m_noSendParticles;
2368 RECORD_TIMER_STOP(deleteP);
2369 RECORD_TIMER_STOP(partTS);
2375 mTerm(1, AT_,
"Invalid solution Step!");
2388 NEW_TIMER_GROUP_STATIC(t_part,
"particle timestep");
2389 NEW_TIMER_STATIC(partTS,
"particle timestep", t_part);
2390 NEW_SUB_TIMER_STATIC(spawn,
"spawn", partTS);
2391 NEW_SUB_TIMER_STATIC(motion,
"motion", partTS);
2392 NEW_SUB_TIMER_STATIC(transfer,
"transfer", partTS);
2393 NEW_SUB_TIMER_STATIC(evap,
"evaporation", partTS);
2394 NEW_SUB_TIMER_STATIC(couplingTerms,
"couplingTerms", partTS);
2395 NEW_SUB_TIMER_STATIC(collision1,
"wall-collision", partTS);
2396 NEW_SUB_TIMER_STATIC(primBU,
"primaryBreakUp", partTS);
2397 NEW_SUB_TIMER_STATIC(secBU,
"secondaryBreakUp", partTS);
2398 NEW_SUB_TIMER_STATIC(respawn,
"particle-respawn", partTS);
2399 NEW_SUB_TIMER_STATIC(deleteP,
"Remove-particles", partTS);
2401 RECORD_TIMER_START(partTS);
2403 ASSERT(!m_nonBlockingComm,
"");
2409 RECORD_TIMER_START(primBU);
2410 if(m_activePrimaryBUp) {
2413 RECORD_TIMER_STOP(primBU);
2418 receiveVelocitySlopes();
2421 RECORD_TIMER_START(motion);
2423 RECORD_TIMER_STOP(motion);
2425 RECORD_TIMER_START(collision1);
2427 RECORD_TIMER_STOP(collision1);
2429 RECORD_TIMER_START(transfer);
2430 exchangeParticles(
false, 0);
2431 RECORD_TIMER_STOP(transfer);
2433 RECORD_TIMER_START(evap);
2435 RECORD_TIMER_STOP(evap);
2437 RECORD_TIMER_START(couplingTerms);
2439 RECORD_TIMER_STOP(couplingTerms);
2441 RECORD_TIMER_START(secBU);
2442 RECORD_TIMER_START(m_timers[Timers::Breakup]);
2443 if(m_activeSecondaryBUp) {
2444 m_sprayModel->secondaryBreakUp(m_timeStep);
2446 RECORD_TIMER_STOP(m_timers[Timers::Breakup]);
2447 RECORD_TIMER_STOP(secBU);
2450 RECORD_TIMER_START(transfer);
2451 if(m_collisions < 5 && m_collisions > 0) {
2452 exchangeParticles(
true, 0);
2454 RECORD_TIMER_STOP(transfer);
2456 RECORD_TIMER_START(spawn);
2457 if(m_spawnParticles) {
2460 RECORD_TIMER_STOP(spawn);
2462 RECORD_TIMER_START(deleteP);
2463 removeInvalidParticles(
false);
2464 RECORD_TIMER_STOP(deleteP);
2470 RECORD_TIMER_START(respawn);
2474 RECORD_TIMER_STOP(respawn);
2476 updateFluidFraction();
2478 if(this->m_adaptation && this->m_noSensors > 0) {
2479 checkRegridTrigger();
2482 if(a_timeStepComputationInterval() > 0 &&
globalTimeStep % a_timeStepComputationInterval() == 0) {
2483 forceTimeStep(computeTimeStep());
2487 if(m_sprayModel !=
nullptr) {
2488 const MInt count = m_sprayModel->m_injDataSize;
2489 vector<MFloat> tmp(count + m_addInjDataCnt);
2490 for(
MInt i = 0; i < count; i++) {
2491 tmp[i] = m_sprayModel->m_injData[i];
2493 tmp[count] = m_sumEvapMass;
2494 tmp[count + 1] = m_sprayModel->m_noRTsecBreakUp;
2495 tmp[count + 2] = m_sprayModel->m_noKHsecBreakUp;
2496 tmp[count + 3] = m_noSendParticles;
2501 RECORD_TIMER_STOP(partTS);
2515 NEW_TIMER_GROUP_STATIC(t_post,
"LPT postTimeStep");
2516 NEW_TIMER_STATIC(postTS,
"postTimeStep", t_post);
2518 RECORD_TIMER_START(postTS);
2523 if(m_sleepLPT > 0.0) {
2524 std::this_thread::sleep_for(std::chrono::milliseconds(m_sleepLPT));
2534 removeInvalidParticles(
true);
2535 advanceParticles(0);
2537 RECORD_TIMER_STOP(postTS);
2548 MBool writeRestart =
false;
2552 countParticlesInCells();
2553 if(!m_nonBlockingComm) {
2554 exchangeSourceTerms();
2557 receiveSourceTerms();
2559 receiveVelocitySlopes();
2563 writeRestart =
true;
2566 if(m_restart && !m_restartFile) writeGridRestart =
true;
2568 return (force || writeRestart);
2579 MInt* recalcIdTree) {
2583 writeCellSolutionFile(gridFileName, &recalcIdTree[0]);
2586 writeParticleRestartFile();
2598 writeParticleRestartFile();
2600 MBool writeCellFile =
true;
2601 if(grid().hasInactiveRanks()) writeCellFile =
false;
2605 stringstream gridFile;
2606 stringstream gridFilePath;
2608 gridFilePath << outputDir() <<
"grid_LPTDebug_" <<
globalTimeStep <<
".Netcdf";
2611 grid().raw().saveGrid((gridFilePath.str()).c_str(), recalcIds.
begin());
2613 writeCellSolutionFile((gridFile.str()).c_str(), &recalcIds[0]);
2626 const MInt oldNum = a_noParticles();
2628 ASSERT(!m_nonBlockingComm,
"");
2632 exchangeParticles(
false, oldNum,
false);
2634 motionEquation(oldNum);
2636 exchangeParticles(
false, oldNum);
2638 evaporation(oldNum);
2645 RECORD_TIMER_START(m_timers[Timers::Injection]);
2646 const MInt oldNum = a_noParticles();
2648 m_sprayModel->injection(m_timeStep);
2656 MBool allowNonLeaf = !m_sprayModel->m_broadcastInjected;
2657 m_primaryExchange =
true;
2658 exchangeParticles(
false, oldNum, allowNonLeaf);
2659 if(!m_nonBlockingComm) {
2660 m_primaryExchange =
false;
2662 RECORD_TIMER_STOP(m_timers[Timers::Injection]);
2674 for(
MInt i = a_noParticles() - 1; i >= 0; i--) {
2675 if(!alsoFullyEvaporated && m_partList[i].fullyEvaporated())
continue;
2676 if(m_partList[i].isInvalid()) {
2677 if(!m_partList[i].fullyEvaporated() && !m_partList[i].wasSend()) {
2678 if(m_partList[i].m_cellId < 0 || m_partList[i].m_cellId >= a_noCells() || !a_isHalo(m_partList[i].m_cellId)) {
2679 cerr <<
"Removing at " << m_partList[i].m_position[0] <<
" " << m_partList[i].m_position[1] <<
" "
2680 << m_partList[i].m_position[nDim - 1] <<
" " << m_partList[i].m_cellId <<
" "
2681 << m_partList[i].m_oldCellId <<
" " << m_partList[i].hadWallColl() <<
" " << m_partList[i].m_oldPos[0]
2682 <<
" " << m_partList[i].m_oldPos[1] <<
" " << m_partList[i].m_oldPos[nDim - 1] <<
" "
2683 << m_partList[i].m_partId << endl;
2684 if(m_partList[i].m_cellId > -1 && m_partList[i].m_cellId < a_noCells()) {
2685 cerr <<
"Cell-stats " << a_isHalo(m_partList[i].m_cellId) <<
" " << a_isBndryCell(m_partList[i].m_cellId)
2686 <<
" " << a_isValidCell(m_partList[i].m_cellId) << endl;
2687 if(m_partList[i].m_oldCellId > -1 && m_partList[i].m_oldCellId < a_noCells()) {
2688 cerr <<
"Old-cell-stats " << a_isBndryCell(m_partList[i].m_oldCellId) <<
" "
2689 << a_isValidCell(m_partList[i].m_oldCellId) << endl;
2691 m_partList[i].checkCellChange(
nullptr,
false);
2692 cerr <<
"After-update: " << m_partList[i].m_cellId <<
" " << m_partList[i].isInvalid() <<
" "
2693 << a_isValidCell(m_partList[i].m_cellId) << endl;
2697 m_partList.erase(m_partList.begin() + i);
2701 for(
MInt i = a_noEllipsoidalParticles() - 1; i >= 0; i--) {
2702 if(m_partListEllipsoid[i].isInvalid()) {
2703 if(!m_partListEllipsoid[i].fullyEvaporated() && !m_partListEllipsoid[i].wasSend()) {
2704 if(m_partListEllipsoid[i].m_cellId < 0 || m_partListEllipsoid[i].m_cellId >= a_noCells()
2705 || !a_isHalo(m_partListEllipsoid[i].m_cellId)) {
2706 cerr <<
"Removing at " << m_partListEllipsoid[i].m_position[0] <<
" "
2707 << m_partListEllipsoid[i].m_position[1] <<
" " << m_partListEllipsoid[i].m_position[nDim - 1] <<
" "
2708 << m_partListEllipsoid[i].m_cellId <<
" " << m_partListEllipsoid[i].m_oldCellId <<
" "
2709 << m_partListEllipsoid[i].hadWallColl() <<
" " << m_partListEllipsoid[i].m_oldPos[0] <<
" "
2710 << m_partListEllipsoid[i].m_oldPos[1] <<
" " << m_partListEllipsoid[i].m_oldPos[nDim - 1] <<
" "
2711 << m_partListEllipsoid[i].m_partId << endl;
2712 if(m_partListEllipsoid[i].m_cellId > -1 && m_partListEllipsoid[i].m_cellId < a_noCells()) {
2713 cerr <<
"Cell-stats " << a_isHalo(m_partListEllipsoid[i].m_cellId) <<
" "
2714 << a_isBndryCell(m_partListEllipsoid[i].m_cellId) <<
" "
2715 << a_isValidCell(m_partListEllipsoid[i].m_cellId) << endl;
2716 if(m_partListEllipsoid[i].m_oldCellId > -1 && m_partListEllipsoid[i].m_oldCellId < a_noCells()) {
2717 cerr <<
"Old-cell-stats " << a_isBndryCell(m_partListEllipsoid[i].m_oldCellId) <<
" "
2718 << a_isValidCell(m_partListEllipsoid[i].m_oldCellId) << endl;
2720 m_partListEllipsoid[i].checkCellChange(
nullptr,
false);
2721 cerr <<
"After-update: " << m_partListEllipsoid[i].m_cellId <<
" " << m_partListEllipsoid[i].isInvalid()
2722 <<
" " << a_isValidCell(m_partListEllipsoid[i].m_cellId) << endl;
2726 m_partListEllipsoid.erase(m_partListEllipsoid.begin() + i);
2745 for(
MInt cellId = 0; cellId < a_noCells(); cellId++) {
2746 a_noParticlesInCell(cellId) = 0;
2747 a_noEllipsoidsInCell(cellId) = 0;
2751 for(
MInt i = 0; i < a_noParticles(); i++) {
2752 const MInt cellId = m_partList[i].m_cellId;
2753 if(m_partList[i].isInvalid()) {
2754 mTerm(1, AT_,
"Counting invalid particle!");
2756 ASSERT(cellId > -1,
"");
2757 ASSERT(!a_isHalo(cellId),
"Particle in halo-cell!");
2758 ASSERT(c_isLeafCell(cellId),
"Particle in non-leaf cell!");
2759 a_noParticlesInCell(cellId)++;
2762 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
2763 const MInt cellId = m_partListEllipsoid[i].m_cellId;
2764 if(m_partListEllipsoid[i].isInvalid()) {
2765 mTerm(1, AT_,
"Counting invalid ellipsoid!");
2767 ASSERT(cellId > -1,
"");
2768 ASSERT(!a_isHalo(cellId),
"Ellipsoid in halo-cell!");
2769 if(
globalTimeStep > 0) ASSERT(c_isLeafCell(cellId),
"Ellipsoid in non-leaf cell!");
2770 a_noEllipsoidsInCell(cellId)++;
2787 m_cellToPartMap.clear();
2789 for(
MInt i = 0; i < a_noParticles(); i++) {
2790 m_cellToPartMap.insert({m_partList[i].m_cellId, &m_partList[i]});
2791 ASSERT(!a_isHalo(m_partList[i].m_cellId),
"Particle in halo-cell!");
2794 m_cellToEllipsMap.clear();
2795 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
2796 m_cellToEllipsMap.insert({m_partListEllipsoid[i].m_cellId, &m_partListEllipsoid[i]});
2797 ASSERT(!a_isHalo(m_partListEllipsoid[i].m_cellId),
"Ellipsoid in halo-cell!");
2806 countParticlesInCells();
2809 vector<MInt> cellIds;
2812 static constexpr MInt cellPartLimit = 150;
2813 for(
MInt cellId = 0; cellId < noInternalCells(); cellId++) {
2814 if(a_noParticlesInCell(cellId) < cellPartLimit) {
2817 cellIds.emplace_back(cellId);
2821 MInt totalMerged = 0;
2823 const MFloat dvLimit = 0.5;
2825 const MFloat diamLimit = 1E-5;
2827 for(
auto const& cellId : cellIds) {
2829 auto particlesInCell = m_cellToPartMap.equal_range(cellId);
2832 MInt filterCount = 0;
2833 MInt mergedParticles = 0;
2834 for(
auto i = particlesInCell.first; i != particlesInCell.second; i++) {
2835 auto& particle = i->second;
2837 sqrt(
POW2(particle->m_accel.at(0)) +
POW2(particle->m_accel.at(1)) +
POW2(particle->m_accel.at(2)))
2840 if(magDv < dvLimit && partD < diamLimit) {
2841 if(validPartToMerge ==
nullptr) {
2842 validPartToMerge = particle;
2844 mergeParticles(validPartToMerge, particle);
2847 validPartToMerge =
nullptr;
2854 if(filterCount > 0) {
2855 totalMerged += mergedParticles;
2864 for(
MInt i = (
MInt)m_partList.size() - 1; i >= 0; i--) {
2865 if(smallParticle(m_partList[i])) {
2867 cerr <<
"deleted small particle " << i <<
" at timestep " << m_timeStep << endl;
2870 m_partList.erase(m_partList.begin() + i);
2881 cerr << domainId() <<
": total particles merged " << totalMerged <<
" deleted " << noDeleted << endl;
2891 if(smallParticle(*partA) || smallParticle(*partB)) {
2897 for(
MInt dim = 0; dim < nDim; dim++) {
2898 valueA[dim] = (massA * valueA[dim] + massB * valueB[dim]) / (massA + massB);
2904 array<MFloat, nDim> dir{};
2907 for(
MInt dim = 0; dim < nDim; ++dim) {
2908 dir.at(dim) = massA * valueA[dim] + massB * valueB[dim];
2912 for(
MInt dim = 0; dim < nDim; dim++) {
2913 valueA[dim] = dir.at(dim) * resultingV;
2945 partA->
m_dM = (massA * partA->
m_dM + massB * partB->
m_dM) / (massA + massB);
2963 RECORD_TIMER_START(m_timers[Timers::Motion]);
2964 ASSERT(grid().checkNghbrIds(),
"");
2967#pragma omp parallel default(none) shared(offset)
2971#pragma omp for nowait
2973 for(
MInt i = offset; i < a_noParticles(); i++) {
2974 if(!m_partList[i].isInvalid()) {
2975 m_partList[i].motionEquation();
2979#pragma omp for nowait
2981 for(
MInt i = offset; i < a_noEllipsoidalParticles(); i++) {
2982 if(!m_partListEllipsoid[i].isInvalid()) {
2983 m_partListEllipsoid[i].motionEquation();
2987 RECORD_TIMER_STOP(m_timers[Timers::Motion]);
2992 if(!m_heatCoupling && !m_evaporation)
return;
2994 RECORD_TIMER_START(m_timers[Timers::Energy]);
2995 ASSERT(grid().checkNghbrIds(),
"");
2998#pragma omp parallel default(none) shared(offset)
3002#pragma omp for nowait
3004 for(
MInt i = offset; i < a_noParticles(); i++) {
3005 if(!m_partList[i].isInvalid()) {
3006 m_partList[i].energyEquation();
3010 RECORD_TIMER_STOP(m_timers[Timers::Energy]);
3015 MBool afterBalance =
false;
3017 RECORD_TIMER_START(m_timers[Timers::SourceTerms]);
3020 afterBalance =
true;
3023 ASSERT(grid().checkNghbrIds(),
"");
3025 for(
MInt i = offset; i < a_noParticles(); i++) {
3026 if(!m_partList[i].isInvalid() || m_partList[i].fullyEvaporated()) {
3027 m_partList[i].coupling();
3030 for(
MInt i = offset; i < a_noEllipsoidalParticles(); i++) {
3031 if(!m_partListEllipsoid[i].isInvalid()) {
3032 m_partListEllipsoid[i].coupling();
3036 RECORD_TIMER_STOP(m_timers[Timers::SourceTerms]);
3042 ASSERT(grid().checkNghbrIds(),
"");
3044#pragma omp parallel default(none) shared(offset)
3048#pragma omp for nowait
3050 for(
MInt i = offset; i < a_noParticles(); i++) {
3051 if(!m_partList[i].isInvalid()) {
3052 m_partList[i].advanceParticle();
3056#pragma omp for nowait
3058 for(
MInt i = offset; i < a_noEllipsoidalParticles(); i++) {
3059 if(!m_partListEllipsoid[i].isInvalid()) {
3060 m_partListEllipsoid[i].advanceParticle();
3071 if(!m_wallCollisions)
return;
3073 RECORD_TIMER_START(m_timers[Timers::Wall]);
3076 for(
MInt i = 0; i < a_noParticles(); i++) {
3077 const MInt cellId = m_partList[i].m_cellId;
3078 const MInt oldCellId = m_partList[i].m_oldCellId;
3080 if(cellId < 0)
continue;
3085 if(a_isBndryCell(cellId) || a_isBndryCell(oldCellId)) {
3086 m_partList[i].particleWallCollision();
3087 m_partList[i].wallParticleCollision();
3092 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
3093 const MInt cellId = m_partListEllipsoid[i].m_cellId;
3094 const MInt oldCellId = m_partListEllipsoid[i].m_oldCellId;
3095 if(cellId < 0)
continue;
3096 if(a_isBndryCell(cellId) || a_isBndryCell(oldCellId)) {
3097 m_partListEllipsoid[i].particleWallCollision();
3098 m_partListEllipsoid[i].wallParticleCollision();
3102 RECORD_TIMER_STOP(m_timers[Timers::Wall]);
3116 list<partType> partRespawnList;
3119 auto deleteOffset = partition(m_partList.begin(), m_partList.end(), activeParticle<nDim>);
3120 auto offset = deleteOffset;
3121 while(offset != m_partList.end()) {
3122 if(offset->toBeRespawn()) {
3123 thisPart.partId = offset->m_partId;
3124 thisPart.cellId = offset->m_cellId;
3125 thisPart.diam = offset->m_diameter;
3126 thisPart.densRatio = offset->m_densityRatio;
3127 partRespawnList.push_back(thisPart);
3131 m_partList.erase(deleteOffset, m_partList.end());
3134 if(noDomains() > 1) {
3135 MInt const respawnSize = 3;
3136 MInt const respawnSendSize = respawnSize + 3;
3137 if(domainId() == m_respawnDomain) {
3139 arrayNoOfParts.
p[m_respawnDomain] = (
MInt)partRespawnList.size();
3140 MPI_Gather(MPI_IN_PLACE, 1, MPI_INT, &arrayNoOfParts.
p[0], 1, MPI_INT, m_respawnDomain, mpiComm(), AT_,
"INPLACE",
3145 MInt globalNoRespawnPart = 0;
3146 for(
MInt i = 0; i < noDomains(); ++i) {
3147 globalNoRespawnPart += arrayNoOfParts.
p[i];
3148 arrayNoOfParts.
p[i] *= respawnSize;
3152 for(
MInt i = 1; i <= noDomains(); ++i) {
3153 displs.
p[i] = displs.
p[i - 1] + arrayNoOfParts.
p[i - 1];
3157 MInt counter = displs.
p[m_respawnDomain];
3158 while(!partRespawnList.empty()) {
3159 thisPart = partRespawnList.front();
3160 recvbuf.
p[counter++] =
MFloat(thisPart.partId);
3161 recvbuf.
p[counter++] = thisPart.diam;
3162 recvbuf.
p[counter++] = thisPart.densRatio;
3163 partRespawnList.pop_front();
3165 MPI_Gatherv(MPI_IN_PLACE, arrayNoOfParts.
p[m_respawnDomain], MPI_DOUBLE, &recvbuf.
p[0], &arrayNoOfParts.
p[0],
3166 &displs.
p[0], MPI_DOUBLE, m_respawnDomain, mpiComm(), AT_,
"INPLACE",
"m_respawnDomain");
3171 for(
MInt domain = 0; domain < m_noRespawnDomains; ++domain) {
3172 bufferSizes.
p[domain] = 0;
3177 uniform_int_distribution<MInt> dist_cells(0, m_respawnGlobalDomainOffsets[m_noRespawnDomains] - 1);
3178 uniform_real_distribution<MFloat> dist_coord(0, 1);
3179 for(
MInt i = 0; i < globalNoRespawnPart; ++i) {
3180 cellIds.
p[i] = dist_cells(randomRespawn());
3181 randCoord1.
p[i] = dist_coord(randomRespawn()) - 0.5;
3182 randCoord2.
p[i] = dist_coord(randomRespawn()) - 0.5;
3184 for(
MInt domain = 1; domain <= m_noRespawnDomains; ++domain) {
3185 if(cellIds.
p[i] < m_respawnGlobalDomainOffsets[domain]) {
3186 bufferSizes.
p[domain - 1] += 1;
3191 MInt ownRankInRespawnDomains = -1;
3193 for(
MInt i = 0; i < m_noRespawnDomains; ++i) {
3194 receiveRequest[i] = MPI_REQUEST_NULL;
3195 if(m_respawnDomainRanks[i] == domainId()) {
3196 ownRankInRespawnDomains = i;
3201 &receiveRequest[i], AT_,
"bufferSizes");
3203 MIntScratchSpace countsPerDomain(m_noRespawnDomains, AT_,
"countsPerDomain");
3204 countsPerDomain.
p[0] = 0;
3205 for(
MInt domain = 1; domain < m_noRespawnDomains; ++domain) {
3206 countsPerDomain.
p[domain] = countsPerDomain.
p[domain - 1];
3207 if((domain - 1) != ownRankInRespawnDomains) {
3208 countsPerDomain.
p[domain] += bufferSizes.
p[domain - 1] * respawnSendSize;
3211 MFloatScratchSpace sendbuf(respawnSendSize * (globalNoRespawnPart - bufferSizes.
p[ownRankInRespawnDomains]), AT_,
3213 for(
MInt i = 0; i < globalNoRespawnPart; ++i) {
3214 MInt thisDomain = -1;
3215 for(
MInt domain = 1; domain <= m_noRespawnDomains; ++domain) {
3216 if(cellIds.
p[i] < m_respawnGlobalDomainOffsets[domain]) {
3217 thisDomain = domain - 1;
3221 if(thisDomain == ownRankInRespawnDomains) {
3224 MInt address = countsPerDomain.
p[thisDomain];
3225 for(
MInt inner = 0; inner < respawnSize; ++inner) {
3226 sendbuf.
p[address + inner] = recvbuf.
p[i * respawnSize + inner];
3228 sendbuf.
p[address + respawnSize + 0] =
MFloat(cellIds.
p[i] - m_respawnGlobalDomainOffsets[thisDomain]);
3229 sendbuf.
p[address + respawnSize + 1] = randCoord1.
p[i];
3230 sendbuf.
p[address + respawnSize + 2] = randCoord2.
p[i];
3232 countsPerDomain.
p[thisDomain] += respawnSendSize;
3235 countsPerDomain.
p[0] = 0;
3236 for(
MInt domain = 1; domain < m_noRespawnDomains; ++domain) {
3237 countsPerDomain.
p[domain] = countsPerDomain.
p[domain - 1];
3238 if((domain - 1) != ownRankInRespawnDomains) {
3239 countsPerDomain.
p[domain] += bufferSizes.
p[domain - 1] * respawnSendSize;
3242 MPI_Waitall(m_noRespawnDomains, &receiveRequest[0], MPI_STATUSES_IGNORE, AT_);
3243 for(
MInt domain = 0; domain < m_noRespawnDomains; ++domain) {
3244 if((domain == ownRankInRespawnDomains) || (bufferSizes.
p[domain] == 0)) {
3245 receiveRequest[domain] = MPI_REQUEST_NULL;
3247 MInt address = countsPerDomain.
p[domain];
3248 MPI_Isend(&sendbuf.
p[address], bufferSizes.
p[domain] * respawnSendSize, MPI_DOUBLE,
3255 if(bufferSizes.
p[ownRankInRespawnDomains] != 0) {
3256 for(
MInt i = 0; i < globalNoRespawnPart; ++i) {
3257 if((cellIds.
p[i] < m_respawnGlobalDomainOffsets[ownRankInRespawnDomains + 1])
3258 && (cellIds.
p[i] >= m_respawnGlobalDomainOffsets[ownRankInRespawnDomains])) {
3263 counter = i * respawnSize;
3264 MInt id = cellIds.
p[i] - m_respawnGlobalDomainOffsets[ownRankInRespawnDomains];
3266 MFloat cellLength = c_cellLengthAtCell(m_respawnCells.at((
MUlong)
id));
3268 x[m_respawn - 1] = m_respawnPlane;
3271 cellLength * randCoord1.
p[i] + c_coordinate(m_respawnCells.at((
MUlong)
id), m_respawn % 3);
3272 x[(m_respawn + 1) % 3] =
3273 cellLength * randCoord2.
p[i] + c_coordinate(m_respawnCells.at((
MUlong)
id), (m_respawn + 1) % 3);
3281 interpolateVariablesLS<0, nDim>(m_respawnCells[
id], x, v);
3282 for(
MInt j = 0; j < nDim; j++) {
3286 v[j] + m_terminalVelocity[thisParticle.
m_diameter] * m_partList[0].s_Frm[j] / m_FrMag;
3288 v[j] + m_terminalVelocity[thisParticle.
m_diameter] * m_partList[0].s_Frm[j] / m_FrMag;
3295 m_partList.push_back(thisParticle);
3300 MPI_Waitall(m_noRespawnDomains, &receiveRequest[0], MPI_STATUSES_IGNORE, AT_);
3302 MInt noPartsToRespawn = 0;
3303 MPI_Request receiveRequest = MPI_REQUEST_NULL;
3304 if(!m_respawnCells.empty()) {
3306 &receiveRequest, AT_,
"noPartsToRespawn");
3309 auto localNoOfParts = (
MInt)partRespawnList.size();
3310 MPI_Gather(&localNoOfParts, 1, MPI_INT,
nullptr, 1, MPI_INT, m_respawnDomain, mpiComm(), AT_,
"localNoOfParts",
3314 while(!partRespawnList.empty()) {
3315 thisPart = partRespawnList.front();
3316 sendbuf.
p[counter++] =
MFloat(thisPart.partId);
3317 sendbuf.
p[counter++] = thisPart.diam;
3318 sendbuf.
p[counter++] = thisPart.densRatio;
3319 partRespawnList.pop_front();
3322 MPI_Gatherv(&sendbuf.
p[0], localNoOfParts * respawnSize, MPI_DOUBLE,
nullptr,
nullptr,
nullptr, MPI_DOUBLE,
3323 m_respawnDomain, mpiComm(), AT_,
"sendbuf",
"nullptr");
3325 if(!m_respawnCells.empty()) {
3326 MPI_Wait(&receiveRequest, MPI_STATUS_IGNORE, AT_);
3328 if(noPartsToRespawn != 0) {
3330 MPI_Recv(&recvbuf.
p[0], noPartsToRespawn * respawnSendSize, MPI_DOUBLE, m_respawnDomain,
3333 for(
MInt part = 0; part < noPartsToRespawn; ++part) {
3340 address = part * respawnSendSize;
3341 id =
MInt(recvbuf.
p[address + respawnSize + 0]);
3342 MFloat cellLength = c_cellLengthAtCell(m_respawnCells.at((
MUlong)
id));
3344 x[m_respawn - 1] = m_respawnPlane;
3346 x[m_respawn % 3] = cellLength * recvbuf.
p[address + respawnSize + 1]
3347 + c_coordinate(m_respawnCells.at((
MUlong)
id), m_respawn % 3);
3348 x[(m_respawn + 1) % 3] = cellLength * recvbuf.
p[address + respawnSize + 2]
3349 + c_coordinate(m_respawnCells.at((
MUlong)
id), (m_respawn + 1) % 3);
3356 interpolateVariablesLS<0, nDim>(m_respawnCells[
id], x, v);
3357 for(
MInt j = 0; j < nDim; j++) {
3361 v[j] + m_terminalVelocity[thisParticle.
m_diameter] * m_partList[0].s_Frm[j] / m_FrMag;
3363 v[j] + m_terminalVelocity[thisParticle.
m_diameter] * m_partList[0].s_Frm[j] / m_FrMag;
3368 m_partList.push_back(thisParticle);
3375 mTerm(-1, AT_,
"particleRespawn not implemented for ellipsoidal particle");
3396 MFloat particleDiametersDefaults[10] = {0.00001, 0.00002, 0.00003, 0.00004, 0.00005,
3397 0.00006, 0.00007, 0.00008, 0.00009, 0.00010};
3398 MInt noDiffParticles = 10;
3406 for(
MInt i = 0; i < noDiffParticles; i++) {
3407 particleDiameters[i] =
3408 Context::getSolverProperty<MFloat>(
"particleDiameters", solverId(), AT_, &particleDiametersDefaults[i], i);
3411 m_terminalVelocity.clear();
3413 if((m_FrMag > 1.0e-12) || (m_dragModelType == 0)) {
3414 for(
MInt i = 0; i < noDiffParticles; ++i) {
3415 const MFloat tmp = particleDiameters[i];
3416 m_terminalVelocity.insert(make_pair(tmp, 0.0));
3419 MInt const maxIter = 1000;
3420 MFloat epsilon = 1.0e-8, min = F0, max = F1;
3422 for(
MInt i = 0; i < noDiffParticles; ++i) {
3429 for(n = 1; n <= maxIter; n++) {
3430 value = (fs * max - ft * min) / (fs - ft);
3432 if(fabs(fr) < epsilon * value) {
3453 m_terminalVelocity.insert(pair<MFloat, MFloat>(particleDiameters[i], value));
3465 exchangeParticles_<LPTEllipsoidal<nDim>>(collOnly, offset, allowNonLeaf);
3467 exchangeParticles_<LPTSpherical<nDim>>(collOnly, offset, allowNonLeaf);
3477template <
class LPTParticle>
3481 if(noDomains() == 1)
return;
3483 MBool foundCell =
false;
3484 RECORD_TIMER_START(m_timers[Timers::Exchange]);
3486 vector<LPTParticle>& partList = a_particleList<LPTParticle>();
3487 MInt noParticles = a_noParticles<LPTParticle>();
3490 for(
MInt id = offset;
id < noParticles;
id++) {
3491 auto& part = partList[
id];
3492 if((collOnly && !part.hasCollided()) ||
3498 if(!collOnly && m_collisions > 0 && part.isWindow()) {
3502 foundCell = pushToQueue<LPTParticle>(m_pointsToHalo,
id);
3503 }
else if(!m_periodicBC && part.reqSend()) {
3507 if(a_isHalo(part.m_cellId)) {
3508 foundCell = pushToQueue<LPTParticle>(m_pointsToWindow,
id);
3509 if(allowNonLeaf && !foundCell) {
3511 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3512 for(
MInt i = 0; i < noHaloCells(d); i++) {
3513 if(part.m_cellId == grid().haloCell(d, i)) {
3516 thisSend.toCellId = i;
3517 m_queueToSend[d].push(thisSend);
3524 }
else if(m_periodicBC && part.reqSend()) {
3525 std::array<MFloat, nDim> tmpShift{F0};
3526 if(grid().isPeriodic(part.m_cellId)) {
3527 for(
MInt n = 0; n < nDim; n++) {
3528 if(grid().raw().periodicCartesianDir(n)) {
3529 MFloat cellCoordinate = c_coordinate(part.m_cellId, n);
3533 if(sign1 == 1 && sign2 == 1) {
3535 }
else if(sign1 == -1 && sign2 == -1) {
3540 tmpShift[n] = m_globDomainLength[n] * sign;
3544 for(
MInt n = 0; n < nDim; n++) {
3545 partList[
id].m_position[n] += tmpShift[n];
3549 if(a_isHalo(part.m_cellId)) {
3550 foundCell = pushToQueue<LPTParticle>(m_pointsToWindow,
id);
3551 if(allowNonLeaf && !foundCell) {
3553 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3554 for(
MInt i = 0; i < noHaloCells(d); i++) {
3555 if(part.m_cellId == grid().haloCell(d, i)) {
3558 thisSend.toCellId = i;
3559 m_queueToSend[d].push(thisSend);
3570 cerr << part.m_cellId << endl;
3571 cerr << a_isHalo(part.m_cellId) << endl;
3572 m_log <<
"TRANSFER status &1 or &4: no corresponding cell found! Original cell " << part.m_cellId
3573 <<
" properties " << endl;
3580 if(!m_nonBlockingComm || collOnly) {
3581 sendAndReceiveParticles<LPTParticle>(allowNonLeaf);
3584 sendParticles<true, LPTParticle>();
3585 m_nonBlockingStage = 2;
3587 sendParticles<false, LPTParticle>();
3588 m_nonBlockingStage = 1;
3591 RECORD_TIMER_STOP(m_timers[Timers::Exchange]);
3602template <
class LPTParticle>
3606 MPI_Status status{};
3609 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3610 m_sendSize[d] = m_queueToSend[d].size();
3611 if(m_sendSize[d] > m_exchangeBufferSize) {
3612 mTerm(1, AT_,
"Increase particle exchange buffer size!");
3614 MPI_Isend(&m_sendSize[d], 1, MPI_INT, grid().neighborDomain(d), mpiTag(
"PARTICLE_COUNT"), mpiComm(),
3615 &m_mpi_reqSendSize[d], AT_,
"noToSend");
3618 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3619 MPI_Recv(&m_recvSize[d], 1, MPI_INT, grid().neighborDomain(d), mpiTag(
"PARTICLE_COUNT"), mpiComm(), &status, AT_,
3623 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3624 MPI_Wait(&m_mpi_reqSendSize[d], &status, AT_);
3628 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3629 if(m_queueToSend[d].empty()) {
3631 m_mpi_reqSendFloat[d] = MPI_REQUEST_NULL;
3632 m_mpi_reqSendInt[d] = MPI_REQUEST_NULL;
3636 vector<LPTParticle*> particlesToSend;
3637 vector<MInt> cellIds;
3638 vector<LPTParticle>& partList = a_particleList<LPTParticle>();
3640 while(!m_queueToSend[d].empty()) {
3642 m_queueToSend[d].pop();
3645 particlesToSend.push_back(&partList[partPos]);
3646 cellIds.push_back(thisSend.
toCellId);
3649 MInt noParticles = particlesToSend.size();
3651 packParticles(particlesToSend, m_intSendBuffer[d].get(), m_sendBuffer[d].get(), cellIds);
3653 for(
auto* part : particlesToSend) {
3654 if(part->reqSend()) {
3655 part->wasSend() =
true;
3656 part->reqSend() =
false;
3657 part->isInvalid() =
true;
3662 MPI_Isend(&m_sendBuffer[d][0], noParticles * elemPerP<LPTParticle>(), MPI_DOUBLE, grid().neighborDomain(d),
3663 mpiTag(
"PARTICLE_FLOAT"), mpiComm(), &m_mpi_reqSendFloat[d], AT_,
"m_sendBuffer");
3665 MPI_Isend(&m_intSendBuffer[d][0], noParticles * intElemPerP<LPTParticle>(), MPI_INT, grid().neighborDomain(d),
3666 mpiTag(
"PARTICLE_INT"), mpiComm(), &m_mpi_reqSendInt[d], AT_,
"m_intSendBuffer");
3670 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3671 if(m_recvSize[d] > 0) {
3672 MPI_Recv(&m_recvBuffer[d][0],
mMin(bufSize<LPTParticle>(), m_recvSize[d] * elemPerP<LPTParticle>()), MPI_DOUBLE,
3673 grid().neighborDomain(d), mpiTag(
"PARTICLE_FLOAT"), mpiComm(), &status, AT_,
"m_recvBuffer");
3674 MPI_Recv(&m_intRecvBuffer[d][0],
mMin(intBufSize<LPTParticle>(), m_recvSize[d] * intElemPerP<LPTParticle>()),
3675 MPI_INT, grid().neighborDomain(d), mpiTag(
"PARTICLE_INT"), mpiComm(), &status, AT_,
"m_intRecvBuffer");
3680 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3681 MPI_Wait(&m_mpi_reqSendFloat[d], &status, AT_);
3682 MPI_Wait(&m_mpi_reqSendInt[d], &status, AT_);
3686 for(
MInt d = 0; d < grid().noNeighborDomains(); d++) {
3688 if(m_recvSize[d] == 0)
continue;
3691 unpackParticles<LPTParticle, false>(m_recvSize[d], &m_intRecvBuffer[d][0], &m_recvBuffer[d][0], d, allowNonLeaf);
3693 unpackParticles<LPTParticle, true>(m_recvSize[d], &m_intRecvBuffer[d][0], &m_recvBuffer[d][0], d, allowNonLeaf);
3704template <MBool allNeighbors,
class LPTParticle>
3708 if(m_nonBlockingStage != 0) {
3709 mTerm(1, AT_,
"Incorrect non-blocking stage!");
3712 RECORD_TIMER_START(m_timers[Timers::Exchange1]);
3715 if(m_openParticleInjSend) {
3716 MPI_Waitall(noNeighborDomains(), &m_mpi_reqSendFloat[0], MPI_STATUSES_IGNORE, AT_);
3717 MPI_Waitall(noNeighborDomains(), &m_mpi_reqSendInt[0], MPI_STATUSES_IGNORE, AT_);
3718 m_openParticleInjSend =
false;
3721 if(m_openParticleSend) {
3722 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_mpi_reqSendFloat[0], MPI_STATUSES_IGNORE, AT_);
3723 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_mpi_reqSendInt[0], MPI_STATUSES_IGNORE, AT_);
3724 m_openParticleSend =
false;
3727 const MInt noNeighborsSend = allNeighbors ? grid().noNeighborDomains() : grid().noLeafSendNeighborDomains();
3728 const MInt noNeighborsRecv = allNeighbors ? grid().noNeighborDomains() : grid().noLeafRecvNeighborDomains();
3731 for(
MInt n = 0; n < noNeighborsSend; n++) {
3734 d = grid().leafSendNeighborDomain(n);
3736 if(m_queueToSend[d].empty()) {
3738 m_mpi_reqSendFloat[n] = MPI_REQUEST_NULL;
3739 m_intSendBuffer[d][0] = -intElemPerP<LPTParticle>();
3740 MPI_Isend(&m_intSendBuffer[d][0], 0, MPI_INT, grid().neighborDomain(d), mpiTag(
"PARTICLE_INT"), mpiComm(),
3741 &m_mpi_reqSendInt[n], AT_,
"m_intSendBuffer");
3745 vector<LPTParticle*> particlesToSend;
3746 RECORD_TIMER_START(m_timers[Timers::Exchange3]);
3747 vector<MInt> cellIds;
3748 vector<LPTParticle>& partList = a_particleList<LPTParticle>();
3750 while(!m_queueToSend[d].empty()) {
3752 m_queueToSend[d].pop();
3755 particlesToSend.push_back(&partList[partPos]);
3756 cellIds.push_back(thisSend.
toCellId);
3759 MInt noParticles = particlesToSend.size();
3760 packParticles(particlesToSend, &m_intSendBuffer[d][0], &m_sendBuffer[d][0], cellIds);
3762 for(
auto& part : particlesToSend) {
3763 if(part->reqSend()) {
3764 part->wasSend() =
true;
3765 part->reqSend() =
false;
3766 part->isInvalid() =
true;
3769 RECORD_TIMER_STOP(m_timers[Timers::Exchange3]);
3772 MPI_Isend(&m_sendBuffer[d][0], noParticles * elemPerP<LPTParticle>(), MPI_DOUBLE, grid().neighborDomain(d),
3773 mpiTag(
"PARTICLE_FLOAT"), mpiComm(), &m_mpi_reqSendFloat[n], AT_,
"m_sendBuffer");
3775 MPI_Isend(&m_intSendBuffer[d][0], noParticles * intElemPerP<LPTParticle>(), MPI_INT, grid().neighborDomain(d),
3776 mpiTag(
"PARTICLE_INT"), mpiComm(), &m_mpi_reqSendInt[n], AT_,
"m_intSendBuffer");
3780 m_openParticleInjSend =
true;
3782 m_openParticleSend =
true;
3788 const MBool loadWasRunning = this->isLoadTimerRunning();
3789 if(loadWasRunning) {
3790 this->stopLoadTimer(AT_);
3791 this->startIdleTimer(AT_);
3792 this->disableDlbTimers();
3795 for(
MInt n = 0; n < noNeighborsRecv; n++) {
3798 d = grid().leafRecvNeighborDomain(n);
3800 m_recvSize[d] = -99;
3802 MPI_Iprobe(grid().neighborDomain(d), mpiTag(
"PARTICLE_INT"), mpiComm(), &flag, &m_mpi_statusProbe[n],
3805 MPI_Get_count(&m_mpi_statusProbe[n], MPI_INT, &m_recvSize[d],
"m_intRecvBuffer");
3807 m_recvSize[d] = m_recvSize[d] / intElemPerP<LPTParticle>();
3809 MPI_Irecv(&m_intRecvBuffer[d][0], m_recvSize[d] * intElemPerP<LPTParticle>(), MPI_INT, grid().neighborDomain(d),
3810 mpiTag(
"PARTICLE_INT"), mpiComm(), &m_mpi_reqRecvInt[n], AT_,
"m_intRecvBuffer");
3812 if(m_recvSize[d] > 0) {
3813 MPI_Irecv(&m_recvBuffer[d][0], m_recvSize[d] * elemPerP<LPTParticle>(), MPI_DOUBLE, grid().neighborDomain(d),
3814 mpiTag(
"PARTICLE_FLOAT"), mpiComm(), &m_mpi_reqRecvFloat[n], AT_,
"m_recvBuffer");
3816 m_mpi_reqRecvFloat[n] = MPI_REQUEST_NULL;
3821 if(loadWasRunning) {
3822 this->reEnableDlbTimers();
3823 this->stopIdleTimer(AT_);
3824 this->startLoadTimer(AT_);
3826 RECORD_TIMER_STOP(m_timers[Timers::Exchange1]);
3835template <MBool allNeighbors>
3838 receiveParticles_<allNeighbors, LPTEllipsoidal<nDim>>();
3840 receiveParticles_<allNeighbors, LPTSpherical<nDim>>();
3850template <MBool allNeighbors,
class LPTParticle>
3854 if(m_nonBlockingStage < 1) {
3855 mTerm(1, AT_,
"Incorrect non-blocking stage!");
3857 RECORD_TIMER_START(m_timers[Timers::Exchange]);
3858 RECORD_TIMER_START(m_timers[Timers::Exchange1]);
3860 RECORD_TIMER_START(m_timers[Timers::Exchange4]);
3866 const MBool loadWasRunning = this->isLoadTimerRunning();
3867 if(loadWasRunning) {
3868 this->stopLoadTimer(AT_);
3869 this->startIdleTimer(AT_);
3870 this->disableDlbTimers();
3873 const MInt noNeighborsRecv = allNeighbors ? grid().noNeighborDomains() : grid().noLeafRecvNeighborDomains();
3875 MBool allReceived =
false;
3876 while(!allReceived) {
3879 for(
MInt n = 0; n < noNeighborsRecv; n++) {
3882 d = grid().leafRecvNeighborDomain(n);
3884 if(m_recvSize[d] == -99) {
3885 allReceived =
false;
3891 for(
MInt n = 0; n < noNeighborsRecv; n++) {
3894 d = grid().leafRecvNeighborDomain(n);
3897 MPI_Iprobe(grid().neighborDomain(d), mpiTag(
"PARTICLE_INT"), mpiComm(), &flag, &m_mpi_statusProbe[n],
3900 MPI_Get_count(&m_mpi_statusProbe[n], MPI_INT, &m_recvSize[d],
"m_intRecvBuffer");
3901 m_recvSize[d] = m_recvSize[d] / intElemPerP<LPTParticle>();
3902 MPI_Irecv(&m_intRecvBuffer[d][0], m_recvSize[d] * intElemPerP<LPTParticle>(), MPI_INT, grid().neighborDomain(d),
3903 mpiTag(
"PARTICLE_INT"), mpiComm(), &m_mpi_reqRecvInt[n], AT_,
"m_intRecvBuffer");
3905 if(m_recvSize[d] > 0) {
3906 MPI_Irecv(&m_recvBuffer[d][0], m_recvSize[d] * elemPerP<LPTParticle>(), MPI_DOUBLE, grid().neighborDomain(d),
3907 mpiTag(
"PARTICLE_FLOAT"), mpiComm(), &m_mpi_reqRecvFloat[n], AT_,
"m_recvBuffer");
3909 m_mpi_reqRecvFloat[n] = MPI_REQUEST_NULL;
3915 if(loadWasRunning) {
3916 this->reEnableDlbTimers();
3917 this->stopIdleTimer(AT_);
3918 this->startLoadTimer(AT_);
3921 RECORD_TIMER_STOP(m_timers[Timers::Exchange4]);
3924 MPI_Waitall(noNeighborsRecv, &m_mpi_reqRecvFloat[0], MPI_STATUSES_IGNORE, AT_);
3925 MPI_Waitall(noNeighborsRecv, &m_mpi_reqRecvInt[0], MPI_STATUSES_IGNORE, AT_);
3928 RECORD_TIMER_START(m_timers[Timers::Exchange5]);
3930 MBool allowNonLeaf =
false;
3931 if(m_nonBlockingStage == 2) allowNonLeaf =
true;
3934 for(
MInt n = 0; n < noNeighborsRecv; n++) {
3937 d = grid().leafRecvNeighborDomain(n);
3940 if(m_recvSize[d] == 0)
continue;
3942 unpackParticles<LPTParticle, false>(m_recvSize[d], &m_intRecvBuffer[d][0], &m_recvBuffer[d][0], d, allowNonLeaf);
3944 unpackParticles<LPTParticle, true>(m_recvSize[d], &m_intRecvBuffer[d][0], &m_recvBuffer[d][0], d, allowNonLeaf);
3948 RECORD_TIMER_STOP(m_timers[Timers::Exchange5]);
3950 RECORD_TIMER_STOP(m_timers[Timers::Exchange1]);
3951 m_nonBlockingStage = 0;
3952 RECORD_TIMER_STOP(m_timers[Timers::Exchange]);
3964 if(noDomains() > 1) {
3966 MPI_Allgather(&noParticles, 1, MPI_INT, &offsets[0], 1, MPI_INT, mpiComm(), AT_,
"noParticles",
"offsets");
3969 const MInt totalOffset = accumulate(&offsets[0], &offsets[domainId()], 0);
3972 MLong maxPartId = accumulate(&offsets[0], &offsets[noDomains()], 0);
3975 if(domainId() == 0) {
3976 m_log <<
"Domain 0 has " << noParticles <<
" particles initialized, out of globally " << maxPartId <<
" particles"
3982 for(
auto& part : m_partList) {
3983 part.m_partId += totalOffset;
3985 for(
auto& ellip : m_partListEllipsoid) {
3986 ellip.m_partId += totalOffset;
4003 queue<partListIteratorConst<nDim>> ncmpiPartQueue;
4004 MInt ncmpiPartQueueSize = 0;
4006 auto i1 = m_partList.begin();
4007 while(i1 != m_partList.end()) {
4008 if(!(*i1).isInvalid() && ((*i1).m_position[0] > m_xCutOff)) {
4009 ++ncmpiPartQueueSize;
4010 ncmpiPartQueue.push(i1);
4017 MPI_Allgather(&ncmpiPartQueueSize, 1, MPI_INT, &ncmpiPartCount[0], 1, MPI_INT, mpiComm(), AT_,
"ncmpiPartQueueSize",
4021 ParallelIo::size_type ncmpiPartCountMax = 0;
4022 for(
MInt i = 0; i < noDomains(); ++i) {
4023 ncmpiPartCountMax += ncmpiPartCount[i];
4027 if(ncmpiPartCountMax != 0) {
4030 outputDir() +
"partData_" + getIdentifier() + to_string(
globalTimeStep) + ParallelIo::fileExt();
4033 ParallelIo parallelIo(ncmpiFileName, PIO_REPLACE, mpiComm());
4039 parallelIo.setAttribute(0.0,
"time");
4041 parallelIo.setAttribute(m_time,
"time");
4045 parallelIo.defineArray(PIO_INT,
"partCount", noDomains());
4049 parallelIo.defineArray(PIO_LONG,
"partId", ncmpiPartCountMax);
4053 parallelIo.defineArray(PIO_FLOAT,
"partPos", 3 * ncmpiPartCountMax);
4057 parallelIo.defineArray(PIO_FLOAT,
"partVel", 3 * ncmpiPartCountMax);
4061 parallelIo.defineArray(PIO_FLOAT,
"partDia", ncmpiPartCountMax);
4063 if(m_activePrimaryBUp || m_activeSecondaryBUp || m_wallCollisions) {
4064 parallelIo.defineArray(PIO_INT,
"partParceledNo", ncmpiPartCountMax);
4067 if(m_heatCoupling || m_evaporation) {
4068 parallelIo.defineArray(PIO_FLOAT,
"partTemp", ncmpiPartCountMax);
4071 if(m_domainIdOutput) {
4072 parallelIo.defineArray(PIO_INT,
"domainId", ncmpiPartCountMax);
4075 parallelIo.setOffset(1, domainId());
4076 parallelIo.writeArray(&ncmpiPartQueueSize,
"partCount");
4079 ParallelIo::size_type ncmpiStart = 0;
4080 ParallelIo::size_type ncmpiCount = ncmpiPartCount[domainId()];
4082 if(ncmpiPartCount[domainId()] > 0) {
4083 for(
MInt i = 0; i < domainId(); i++) {
4084 ncmpiStart += ncmpiPartCount[i];
4088 ASSERT(ncmpiStart + ncmpiPartCount[domainId()] <= ncmpiPartCountMax,
4089 "ERROR: Invalid number of particles " + to_string(ncmpiStart + ncmpiPartCount[domainId()]) +
" > "
4090 + to_string(ncmpiPartCountMax));
4092 ASSERT(ncmpiStart + ncmpiPartCount[domainId()] <= ncmpiPartCountMax,
4093 "ERROR: Invalid number of particles " + to_string(ncmpiStart + ncmpiPartCount[domainId()]) +
" > "
4094 + to_string(ncmpiPartCountMax));
4097 MIntScratchSpace ncmpiPartParceledNo(ncmpiPartCount[domainId()], AT_,
"ncmpiPartParceledNo");
4100 MFloatScratchSpace ncmpiPartCoords(3 * ncmpiPartCount[domainId()], AT_,
"ncmpiPartCoords");
4103 MFloatScratchSpace ncmpiPartVel(3 * ncmpiPartCount[domainId()], AT_,
"ncmpiPartVel");
4104 MInt ncmpiCountId = 0;
4107 while(!ncmpiPartQueue.empty()) {
4108 ncmpiPartId[ncmpiCountId] = (*(ncmpiPartQueue.front())).m_partId;
4109 ncmpiPartParceledNo[ncmpiCountId] = (*(ncmpiPartQueue.front())).m_noParticles;
4110 ncmpiDiameter[ncmpiCountId] = (*(ncmpiPartQueue.front())).m_diameter;
4111 ncmpiTemp[ncmpiCountId] = (*(ncmpiPartQueue.front())).m_temperature;
4112 ncmpiPartCoords[(3 * ncmpiCountId)] = (*(ncmpiPartQueue.front())).m_position[0];
4113 ncmpiPartCoords[(3 * ncmpiCountId) + 1] = (*(ncmpiPartQueue.front())).m_position[1];
4114 ncmpiPartCoords[(3 * ncmpiCountId) + 2] = (*(ncmpiPartQueue.front())).m_position[2];
4115 ncmpiPartVel[(3 * ncmpiCountId)] = (*(ncmpiPartQueue.front())).m_velocity[0];
4116 ncmpiPartVel[(3 * ncmpiCountId) + 1] = (*(ncmpiPartQueue.front())).m_velocity[1];
4117 ncmpiPartVel[(3 * ncmpiCountId) + 2] = (*(ncmpiPartQueue.front())).m_velocity[2];
4119 ncmpiPartQueue.pop();
4124 parallelIo.setOffset(ncmpiCount, ncmpiStart);
4125 parallelIo.writeArray(ncmpiPartId.
begin(),
"partId");
4126 parallelIo.writeArray(ncmpiDiameter.
begin(),
"partDia");
4128 if(m_activePrimaryBUp || m_activeSecondaryBUp || m_wallCollisions) {
4129 parallelIo.writeArray(ncmpiPartParceledNo.
begin(),
"partParceledNo");
4132 if(m_heatCoupling || m_evaporation) {
4133 parallelIo.writeArray(ncmpiTemp.
begin(),
"partTemp");
4135 if(m_domainIdOutput) {
4137 ncmpiDomain.
fill(domainId());
4138 parallelIo.writeArray(ncmpiDomain.
begin(),
"domainId");
4142 ParallelIo::size_type ncmpi3Start = 3 * ncmpiStart;
4143 parallelIo.setOffset(ncmpiCount, ncmpi3Start);
4144 parallelIo.writeArray(ncmpiPartCoords.
begin(),
"partPos");
4145 parallelIo.writeArray(ncmpiPartVel.
begin(),
"partVel");
4147 if(domainId() == 0) {
4148 cerr <<
"Writing particle solution file at time step " <<
globalTimeStep << endl;
4152 if(domainId() == 0) {
4153 m_log <<
"WARNING: Write called but no particles present!" << endl;
4154 cerr <<
"WARNING: Write called but no particles present!" << endl;
4160 queue<ellipsListIterator<nDim>> ncmpiEllipsoidQueue;
4161 ncmpiPartQueueSize = 0;
4163 auto i2 = m_partListEllipsoid.begin();
4164 while(i2 != m_partListEllipsoid.end()) {
4165 if(!(*i2).isInvalid() && ((*i2).m_position[0] > m_xCutOff)) {
4166 ++ncmpiPartQueueSize;
4167 ncmpiEllipsoidQueue.push(i2);
4173 MIntScratchSpace ncmpiEllipsoidCount(noDomains(), AT_,
"ncmpiEllipsoidCount");
4174 MPI_Allgather(&ncmpiPartQueueSize, 1, MPI_INT, &ncmpiEllipsoidCount[0], 1, MPI_INT, mpiComm(), AT_,
4175 "ncmpiPartQueueSize",
"ncmpiEllipsoidCount");
4178 ncmpiPartCountMax = 0;
4179 for(
MInt i = 0; i < noDomains(); ++i) {
4180 ncmpiPartCountMax += ncmpiEllipsoidCount[i];
4184 if(ncmpiPartCountMax != 0) {
4186 outputDir() +
"partEllipsoid_" + getIdentifier() + to_string(
globalTimeStep) + ParallelIo::fileExt();
4189 ParallelIo parallelIo(ncmpiFileName, PIO_REPLACE, mpiComm());
4194 parallelIo.setAttribute(m_time,
"time");
4196 parallelIo.defineArray(PIO_INT,
"partCount", noDomains());
4197 parallelIo.defineArray(PIO_LONG,
"partId", ncmpiPartCountMax);
4198 parallelIo.defineArray(PIO_FLOAT,
"partSemiMinorAxis", ncmpiPartCountMax);
4199 parallelIo.defineArray(PIO_FLOAT,
"partAspectRatio", ncmpiPartCountMax);
4200 parallelIo.defineArray(PIO_FLOAT,
"partDia", ncmpiPartCountMax);
4201 parallelIo.defineArray(PIO_FLOAT,
"partDensityRatio", ncmpiPartCountMax);
4202 parallelIo.defineArray(PIO_FLOAT,
"partPos", 3 * ncmpiPartCountMax);
4203 parallelIo.defineArray(PIO_FLOAT,
"partVel", 3 * ncmpiPartCountMax);
4204 parallelIo.defineArray(PIO_FLOAT,
"partAngVel", 3 * ncmpiPartCountMax);
4205 parallelIo.defineArray(PIO_FLOAT,
"partMajorAxis", 3 * ncmpiPartCountMax);
4206 parallelIo.defineArray(PIO_FLOAT,
"partQuat", 4 * ncmpiPartCountMax);
4209 ParallelIo::size_type ncmpiStart = domainId();
4210 ParallelIo::size_type ncmpiCount = 1;
4212 parallelIo.setOffset(ncmpiCount, ncmpiStart);
4215 parallelIo.writeArray(&ncmpiPartQueueSize,
"partCount");
4220 for(
MInt i = 0; i < domainId(); ++i) {
4221 ncmpiStart += ncmpiEllipsoidCount[i];
4223 ncmpiCount = ncmpiEllipsoidCount[domainId()];
4224 if(ncmpiStart >= ncmpiPartCountMax) {
4225 if(ncmpiCount == 0) {
4228 mTerm(1, AT_,
"Error in m_ellipsoids ncmpiStart >= ncmpiPartCountMax but ncmpiCount != 0");
4240 MFloatScratchSpace ncmpiPartMajorAxis(3 * ncmpiPartCountMax, AT_,
"ncmpiEllipsMajorAxis");
4242 MInt ncmpiCountId = 0;
4245 while(!ncmpiEllipsoidQueue.empty()) {
4247 ncmpiPartId[ncmpiCountId] = particle.
m_partId;
4252 std::array<MFloat, nDim> majorAxis{};
4254 for(
MInt n = 0; n < nDim; n++) {
4255 ncmpiPartCoords[(3 * ncmpiCountId) + n] = particle.
m_position[n];
4256 ncmpiPartVel[(3 * ncmpiCountId) + n] = particle.
m_velocity[n];
4257 ncmpiPartAngVel[(3 * ncmpiCountId) + n] = particle.
m_angularVel[n];
4258 ncmpiPartMajorAxis[(3 * ncmpiCountId) + n] = majorAxis[n];
4260 for(
MInt n = 0; n < 4; n++) {
4261 ncmpiPartQuat[(4 * ncmpiCountId) + n] = particle.
m_quaternion[n];
4264 ncmpiEllipsoidQueue.pop();
4269 parallelIo.setOffset(ncmpiCount, ncmpiStart);
4270 parallelIo.writeArray(&ncmpiPartId[0],
"partId");
4271 parallelIo.writeArray(&ncmpiSemiMinorAxis[0],
"partSemiMinorAxis");
4272 parallelIo.writeArray(&ncmpiAspectRatio[0],
"partAspectRatio");
4273 parallelIo.writeArray(&ncmpiEqDia[0],
"partDia");
4274 parallelIo.writeArray(&ncmpiDensityRatio[0],
"partDensityRatio");
4277 ParallelIo::size_type ncmpi3Start = 3 * ncmpiStart;
4278 parallelIo.setOffset(ncmpiCount, ncmpi3Start);
4279 parallelIo.writeArray(&ncmpiPartCoords[0],
"partPos");
4280 parallelIo.writeArray(&ncmpiPartVel[0],
"partVel");
4281 parallelIo.writeArray(&ncmpiPartAngVel[0],
"partAngVel");
4282 parallelIo.writeArray(&ncmpiPartMajorAxis[0],
"partMajorAxis");
4284 ParallelIo::size_type ncmpi4Start = 4 * ncmpiStart;
4287 parallelIo.setOffset(ncmpiCount, ncmpi4Start);
4288 parallelIo.writeArray(&ncmpiPartQuat[0],
"partQuat");
4291 while(!ncmpiEllipsoidQueue.empty()) {
4292 ncmpiEllipsoidQueue.pop();
4308 grid().updatePartitionCellOffsets();
4311 m_log <<
"Writing LPT restart file..." << endl;
4315 MInt noParticles = 0;
4316 for(
MInt id = 0;
id < a_noParticles();
id++) {
4317 if(m_partList[
id].isInvalid()) {
4318 cerr <<
"Invalid particle when writing restart-file!" << endl;
4324 const MInt noLocalPartitionCells =
4325 grid().localPartitionCellOffsetsRestart(1) - grid().localPartitionCellOffsetsRestart(0);
4326 const MInt noGlobalPartitionCells = grid().localPartitionCellOffsetsRestart(2);
4328 MIntScratchSpace noParticlesPerLocalPartitionCell(noLocalPartitionCells, AT_,
"noParticlesPerLocalPartitionCell");
4329 noParticlesPerLocalPartitionCell.
fill(0);
4331 MPI_Allgather(&noParticles, 1, MPI_INT, &ncmpiPartCount[0], 1, MPI_INT, mpiComm(), AT_,
"noParticles",
4335 ParallelIo::size_type globalNoParticles = 0;
4336 for(
MInt i = 0; i < noDomains(); ++i) {
4337 globalNoParticles += ncmpiPartCount[i];
4340 if(domainId() == 0) {
4341 cerr <<
"Write Particle Restart at Time step " <<
globalTimeStep << endl;
4342 cerr <<
"for number of particles: " << globalNoParticles <<
" and " << noGlobalPartitionCells <<
" partition cells!"
4346 m_log <<
"Time step " <<
globalTimeStep <<
" -- this proc. has " << noParticles <<
" particles of "
4347 << globalNoParticles << endl;
4350 ParallelIo::size_type ncmpiStart = grid().localPartitionCellOffsetsRestart(0);
4351 ParallelIo::size_type ncmpiCount = noLocalPartitionCells;
4355 const MLong globalId2 = c_globalId(j.m_cellId);
4356 if(globalId1 != globalId2) {
4357 return (c_globalId(i.
m_cellId) < c_globalId(j.m_cellId));
4363 if(a_noParticles() > 0) {
4364 own_sort(m_partList, sortByGId);
4366 MInt localPartitionCellCounter = 0;
4367 for(
auto i1 = m_partList.begin(); i1 != m_partList.end(); i1++) {
4368 MLong particleGlobalCellId = c_globalId(i1->m_cellId);
4369 if(i1->isInvalid())
continue;
4370 if(a_isHalo(i1->m_cellId)) {
4371 cerr <<
"Particle in halo-cell!" << endl;
4372 cerr << i1->hadWallColl() <<
" " << i1->isInvalid() <<
" " << i1->reqSend() <<
" " << i1->m_partId << endl;
4373 MInt cellId2 = grid().findContainingLeafCell(&i1->m_position[0]);
4374 MInt cellId3 = grid().findContainingLeafCell(&i1->m_position[0], i1->m_cellId,
true);
4375 cerr << i1->m_cellId <<
" " << cellId2 <<
" " << cellId3 << endl;
4377 if((localPartitionCellCounter + 1 < noLocalPartitionCells)) {
4378 ASSERT(grid().localPartitionCellGlobalIdsRestart(localPartitionCellCounter + 1) > -1,
"");
4379 while(particleGlobalCellId >= grid().localPartitionCellGlobalIdsRestart(localPartitionCellCounter + 1)) {
4380 localPartitionCellCounter++;
4381 if(localPartitionCellCounter + 1 >= noLocalPartitionCells) {
4386 noParticlesPerLocalPartitionCell[localPartitionCellCounter] += 1;
4391 outputDir() +
"restartPart_" + getIdentifier() + to_string(
globalTimeStep) + ParallelIo::fileExt();
4394 ParallelIo parallelIo(ncmpiFileName, PIO_REPLACE, mpiComm());
4398 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
4399 MInt globalInjStep = m_sprayModel->m_injStep;
4400 MPI_Allreduce(MPI_IN_PLACE, &globalInjStep, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"globalInjStep");
4401 parallelIo.setAttribute(globalInjStep,
"injStep");
4403 MFloat globalTimeSOI = m_sprayModel->timeSinceSOI();
4404 MPI_Allreduce(MPI_IN_PLACE, &globalTimeSOI, 1, MPI_DOUBLE, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"globalTimeSOI");
4405 parallelIo.setAttribute(globalTimeSOI,
"timeSinceSOI");
4407 parallelIo.setAttribute(m_time,
"time");
4410 MInt count = m_PRNGSpawnCount;
4411 MFloat particleResiduum = 0;
4413 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
4414 count = m_sprayModel->m_PRNGPrimBreakUpCount;
4415 particleResiduum = std::numeric_limits<MFloat>::max();
4418 if(m_spawnParticles) {
4419 particleResiduum = std::numeric_limits<MFloat>::max();
4422 if(domainId() != m_spawnDomainId) {
4423 ASSERT(count == 0,
"");
4429 if(m_spawnParticles) {
4430 PRNG.seed(m_spawnSeed);
4431 }
else if(m_sprayModel) {
4432 PRNG.seed(m_sprayModel->m_spraySeed);
4434 const MInt maxCount = 1000000000;
4435 for(
MInt i = 0; i < maxCount; i++) {
4436 if(!m_activePrimaryBUp && !m_activeSecondaryBUp && PRNG == m_PRNGSpawn) {
4438 }
else if((m_activePrimaryBUp || m_activeSecondaryBUp) && PRNG == m_sprayModel->m_PRNGPrimBreakUp) {
4445 ASSERT(m_PRNGSpawnCount == j
4446 || ((m_activePrimaryBUp || m_activeSecondaryBUp) && j == m_sprayModel->m_PRNGPrimBreakUpCount),
4449 cerr <<
"PRNG-state could not be checked" << endl;
4453 if(m_spawnCellId > -1) {
4454 particleResiduum = m_particleResiduum;
4457 MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"count");
4458 MPI_Allreduce(MPI_IN_PLACE, &particleResiduum, 1, MPI_DOUBLE, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"particleResiduum");
4460 parallelIo.setAttribute(count,
"spawnCount");
4461 parallelIo.setAttribute(particleResiduum,
"particleResiduum");
4463 parallelIo.defineArray(PIO_INT,
"partCount", noGlobalPartitionCells);
4465 if(globalNoParticles > 0) {
4466 parallelIo.defineArray(PIO_LONG,
"partId", globalNoParticles);
4467 parallelIo.defineArray(PIO_FLOAT,
"partDia", globalNoParticles);
4468 parallelIo.defineArray(PIO_INT,
"partStatus", globalNoParticles);
4469 parallelIo.defineArray(PIO_FLOAT,
"partPos", 3 * globalNoParticles);
4470 parallelIo.defineArray(PIO_FLOAT,
"partVel", 3 * globalNoParticles);
4471 parallelIo.defineArray(PIO_FLOAT,
"partAccel", 3 * globalNoParticles);
4472 parallelIo.defineArray(PIO_FLOAT,
"oldPos", 3 * globalNoParticles);
4473 parallelIo.defineArray(PIO_FLOAT,
"oldVel", 3 * globalNoParticles);
4474 parallelIo.defineArray(PIO_FLOAT,
"oldAccel", 3 * globalNoParticles);
4475 parallelIo.defineArray(PIO_INT,
"partParceledNo", globalNoParticles);
4476 parallelIo.defineArray(PIO_FLOAT,
"creationTime", globalNoParticles);
4477 parallelIo.defineArray(PIO_FLOAT,
"oldFluidVelocity", 3 * globalNoParticles);
4478 parallelIo.defineArray(PIO_FLOAT,
"oldFluidDensity", globalNoParticles);
4479 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
4480 parallelIo.defineArray(PIO_FLOAT,
"breakUpTime", globalNoParticles);
4482 if(m_heatCoupling || m_evaporation) {
4483 parallelIo.defineArray(PIO_FLOAT,
"partTemp", globalNoParticles);
4484 parallelIo.defineArray(PIO_FLOAT,
"partDM", globalNoParticles);
4486 if(m_activeSecondaryBUp) {
4487 parallelIo.defineArray(PIO_FLOAT,
"shedD", globalNoParticles);
4492 parallelIo.setOffset(ncmpiCount, ncmpiStart);
4493 parallelIo.writeArray(&noParticlesPerLocalPartitionCell[0],
"partCount");
4497 for(
MInt i = 0; i < domainId(); ++i) {
4498 ncmpiStart += ncmpiPartCount[i];
4500 ncmpiCount = ncmpiPartCount[domainId()];
4501 if(ncmpiStart >= globalNoParticles) {
4502 if(ncmpiCount == 0) {
4505 mTerm(1, AT_,
"Error ncmpiStart >= globalNoParticles but ncmpiCount != 0");
4508 ASSERT(ncmpiCount == a_noParticles(),
"");
4510 if(globalNoParticles > 0) {
4521 MIntScratchSpace ncmpiPartParceledNo(ncmpiCount, AT_,
"ncmpiPartParceledNo");
4527 MFloatScratchSpace ncmpiOldFluidVelocity(3 * ncmpiCount, AT_,
"ncmpiOldFluidVelocity");
4532 for(
MInt i = 0; i < a_noParticles(); i++) {
4533 if(m_partList[i].isInvalid())
continue;
4534 ncmpiPartId[
id] = m_partList[i].m_partId;
4535 ncmpiDiameter[
id] = m_partList[i].m_diameter;
4536 ncmpiDensityRatio[
id] = m_partList[i].m_densityRatio;
4537 ncmpiPartStatus[
id] = (m_partList[i].firstStep() ? 1 : 0);
4538 ncmpiPartCoords[(3 *
id)] = m_partList[i].m_position[0];
4539 ncmpiPartCoords[(3 *
id) + 1] = m_partList[i].m_position[1];
4540 ncmpiPartCoords[(3 *
id) + 2] = m_partList[i].m_position[2];
4541 ncmpiPartVel[(3 *
id)] = m_partList[i].m_velocity[0];
4542 ncmpiPartVel[(3 *
id) + 1] = m_partList[i].m_velocity[1];
4543 ncmpiPartVel[(3 *
id) + 2] = m_partList[i].m_velocity[2];
4544 ncmpiPartAccel[(3 *
id)] = m_partList[i].m_accel[0];
4545 ncmpiPartAccel[(3 *
id) + 1] = m_partList[i].m_accel[1];
4546 ncmpiPartAccel[(3 *
id) + 2] = m_partList[i].m_accel[2];
4547 ncmpiOldCoords[(3 *
id)] = m_partList[i].m_oldPos[0];
4548 ncmpiOldCoords[(3 *
id) + 1] = m_partList[i].m_oldPos[1];
4549 ncmpiOldCoords[(3 *
id) + 2] = m_partList[i].m_oldPos[2];
4550 ncmpiOldVel[(3 *
id)] = m_partList[i].m_oldVel[0];
4551 ncmpiOldVel[(3 *
id) + 1] = m_partList[i].m_oldVel[1];
4552 ncmpiOldVel[(3 *
id) + 2] = m_partList[i].m_oldVel[2];
4553 ncmpiOldAccel[(3 *
id)] = m_partList[i].m_oldAccel[0];
4554 ncmpiOldAccel[(3 *
id) + 1] = m_partList[i].m_oldAccel[1];
4555 ncmpiOldAccel[(3 *
id) + 2] = m_partList[i].m_oldAccel[2];
4556 ncmpiPartParceledNo[
id] = m_partList[i].m_noParticles;
4557 ncmpiTemp[
id] = m_partList[i].m_temperature;
4558 ncmpiDM[
id] = m_partList[i].m_dM;
4560 ncmpiBT[
id] = m_partList[i].m_breakUpTime;
4561 if(m_activeSecondaryBUp) {
4562 ncmpishedD[
id] = m_partList[i].m_shedDiam;
4565 ncmpiOldFluidDensity[
id] = m_partList[i].m_oldFluidDensity;
4566 ncmpiOldFluidVelocity[(3 *
id)] = m_partList[i].m_oldFluidVel[0];
4567 ncmpiOldFluidVelocity[(3 *
id) + 1] = m_partList[i].m_oldFluidVel[1];
4568 ncmpiOldFluidVelocity[(3 *
id) + 2] = m_partList[i].m_oldFluidVel[2];
4573 parallelIo.setOffset(ncmpiCount, ncmpiStart);
4575 parallelIo.writeArray(ncmpiPartId.
begin(),
"partId");
4576 parallelIo.writeArray(ncmpiDiameter.
begin(),
"partDia");
4577 parallelIo.writeArray(ncmpiPartStatus.
begin(),
"partStatus");
4578 parallelIo.writeArray(ncmpiPartParceledNo.
begin(),
"partParceledNo");
4579 parallelIo.writeArray(ncmpiCT.
begin(),
"creationTime");
4581 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
4582 parallelIo.writeArray(ncmpiBT.
begin(),
"breakUpTime");
4585 if(m_heatCoupling || m_evaporation) {
4586 parallelIo.writeArray(ncmpiTemp.
begin(),
"partTemp");
4587 parallelIo.writeArray(ncmpiDM.
begin(),
"partDM");
4590 if(m_activeSecondaryBUp) {
4591 parallelIo.writeArray(ncmpishedD.
begin(),
"shedD");
4594 parallelIo.writeArray(ncmpiOldFluidDensity.
begin(),
"oldFluidDensity");
4597 ParallelIo::size_type ncmpi3Start = 3 * ncmpiStart;
4598 parallelIo.setOffset(ncmpiCount, ncmpi3Start);
4599 parallelIo.writeArray(ncmpiPartCoords.
begin(),
"partPos");
4600 parallelIo.writeArray(ncmpiPartVel.
begin(),
"partVel");
4601 parallelIo.writeArray(ncmpiPartAccel.
begin(),
"partAccel");
4602 parallelIo.writeArray(ncmpiOldCoords.
begin(),
"oldPos");
4603 parallelIo.writeArray(ncmpiOldVel.
begin(),
"oldVel");
4604 parallelIo.writeArray(ncmpiOldAccel.
begin(),
"oldAccel");
4605 parallelIo.writeArray(ncmpiOldFluidVelocity.
begin(),
"oldFluidVelocity");
4611 MInt noEllipsoids = 0;
4612 for(
MInt id = 0;
id < a_noEllipsoidalParticles();
id++) {
4613 if(m_partListEllipsoid[
id].isInvalid()) {
4614 cerr <<
"Invalid particle when writing restart-file!" << endl;
4620 MIntScratchSpace noEllipsoidsPerLocalPartitionCell(noLocalPartitionCells, AT_,
"noEllipsoidsPerLocalPartitionCell");
4621 noEllipsoidsPerLocalPartitionCell.
fill(0);
4622 MIntScratchSpace ncmpiEllipsoidCount(noDomains(), AT_,
"ncmpiEllipsoidCount");
4623 MPI_Allgather(&noEllipsoids, 1, MPI_INT, &ncmpiEllipsoidCount[0], 1, MPI_INT, mpiComm(), AT_,
"noEllipsoids",
4624 "ncmpiEllipsoidCount");
4627 ParallelIo::size_type globalNoEllipsoids = 0;
4628 for(
MInt i = 0; i < noDomains(); ++i) {
4629 globalNoEllipsoids += ncmpiEllipsoidCount[i];
4632 m_log <<
"Time step " <<
globalTimeStep <<
" -- this proc. has " << noEllipsoids <<
" ellipsoids of "
4633 << globalNoEllipsoids << endl;
4634 if(domainId() == 0) {
4635 cerr <<
"Write Ellipsoid Restart at Time step " <<
globalTimeStep << endl;
4636 cerr <<
"for number of ellipsoids: " << globalNoEllipsoids <<
" and " << noGlobalPartitionCells
4637 <<
" partition cells!" << endl;
4640 ncmpiStart = grid().localPartitionCellOffsetsRestart(0);
4641 ncmpiCount = noLocalPartitionCells;
4644 if(a_noEllipsoidalParticles() > 0) {
4645 own_sort(m_partListEllipsoid, sortByGId);
4647 MInt localPartitionCellCounter = 0;
4648 for(
auto i1 = m_partListEllipsoid.begin(); i1 != m_partListEllipsoid.end(); i1++) {
4649 MLong particleGlobalCellId = c_globalId(i1->m_cellId);
4650 if(i1->isInvalid())
continue;
4651 if(a_isHalo(i1->m_cellId)) {
4652 cerr <<
"Particle in halo-cell!" << endl;
4653 cerr << i1->hadWallColl() <<
" " << i1->isInvalid() <<
" " << i1->reqSend() <<
" " << i1->m_partId << endl;
4654 MInt cellId2 = grid().findContainingLeafCell(&i1->m_position[0]);
4655 MInt cellId3 = grid().findContainingLeafCell(&i1->m_position[0], i1->m_cellId,
true);
4656 cerr << i1->m_cellId <<
" " << cellId2 <<
" " << cellId3 << endl;
4658 if((localPartitionCellCounter + 1 < noLocalPartitionCells)) {
4659 ASSERT(grid().localPartitionCellGlobalIdsRestart(localPartitionCellCounter + 1) > -1,
"");
4660 while(particleGlobalCellId >= grid().localPartitionCellGlobalIdsRestart(localPartitionCellCounter + 1)) {
4661 localPartitionCellCounter++;
4662 if(localPartitionCellCounter + 1 >= noLocalPartitionCells) {
4667 noEllipsoidsPerLocalPartitionCell[localPartitionCellCounter] += 1;
4672 outputDir() +
"restartPartEllipsoid_" + getIdentifier() + to_string(
globalTimeStep) + ParallelIo::fileExt();
4674 ParallelIo parallelIoE(ncmpiFileName, PIO_REPLACE, mpiComm());
4679 parallelIoE.defineArray(PIO_INT,
"partCount", noGlobalPartitionCells);
4682 if(globalNoEllipsoids > 0) {
4683 parallelIoE.defineArray(PIO_LONG,
"partId", globalNoEllipsoids);
4684 parallelIoE.defineArray(PIO_FLOAT,
"partSemiMinorAxis", globalNoEllipsoids);
4685 parallelIoE.defineArray(PIO_FLOAT,
"partDia", globalNoEllipsoids);
4686 parallelIoE.defineArray(PIO_FLOAT,
"partAspectRatio", globalNoEllipsoids);
4687 parallelIoE.defineArray(PIO_FLOAT,
"partDensityRatio", globalNoEllipsoids);
4688 parallelIoE.defineArray(PIO_INT,
"partStatus", globalNoEllipsoids);
4689 parallelIoE.defineArray(PIO_FLOAT,
"partPos", 3 * globalNoEllipsoids);
4690 parallelIoE.defineArray(PIO_FLOAT,
"partVel", 3 * globalNoEllipsoids);
4691 parallelIoE.defineArray(PIO_FLOAT,
"partAccel", 3 * globalNoEllipsoids);
4692 parallelIoE.defineArray(PIO_FLOAT,
"partAngVel", 3 * globalNoEllipsoids);
4693 parallelIoE.defineArray(PIO_FLOAT,
"partAngAccel", 3 * globalNoEllipsoids);
4694 parallelIoE.defineArray(PIO_FLOAT,
"partOldPos", 3 * globalNoEllipsoids);
4695 parallelIoE.defineArray(PIO_FLOAT,
"partOldVel", 3 * globalNoEllipsoids);
4696 parallelIoE.defineArray(PIO_FLOAT,
"partOldAccel", 3 * globalNoEllipsoids);
4697 parallelIoE.defineArray(PIO_FLOAT,
"partOldAngVel", 3 * globalNoEllipsoids);
4698 parallelIoE.defineArray(PIO_FLOAT,
"partOldAngAccel", 3 * globalNoEllipsoids);
4699 parallelIoE.defineArray(PIO_FLOAT,
"partQuat", 4 * globalNoEllipsoids);
4700 parallelIoE.defineArray(PIO_FLOAT,
"partOldQuat", 4 * globalNoEllipsoids);
4701 parallelIoE.defineArray(PIO_FLOAT,
"creationTime", globalNoEllipsoids);
4702 parallelIoE.defineArray(PIO_FLOAT,
"oldFluidVelocity", 3 * globalNoEllipsoids);
4703 parallelIoE.defineArray(PIO_FLOAT,
"oldFluidDensity", globalNoEllipsoids);
4704 if(m_heatCoupling) parallelIoE.defineArray(PIO_FLOAT,
"partTemp", globalNoEllipsoids);
4708 parallelIoE.setOffset(ncmpiCount, ncmpiStart);
4709 parallelIoE.writeArray(&noEllipsoidsPerLocalPartitionCell[0],
"partCount");
4713 for(
MInt i = 0; i < domainId(); ++i) {
4714 ncmpiStart += ncmpiEllipsoidCount[i];
4716 ncmpiCount = ncmpiEllipsoidCount[domainId()];
4717 if(ncmpiStart >= globalNoEllipsoids) {
4718 if(ncmpiCount == 0) {
4721 mTerm(1, AT_,
"m_ellipsoids ncmpiStart >= globalNoEllipsoids but ncmpiCount != 0");
4724 ASSERT(ncmpiCount == a_noEllipsoidalParticles(),
"");
4741 MFloatScratchSpace ncmpiPartOldAngAccel(3 * ncmpiCount, AT_,
"ncmpiPartOldAngAccel");
4747 MFloatScratchSpace ncmpiOldFluidVelocity(3 * ncmpiCount, AT_,
"ncmpiOldFluidVelocity");
4749 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
4750 if(m_partListEllipsoid[i].isInvalid())
continue;
4751 ncmpiPartId[i] = m_partListEllipsoid[i].m_partId;
4752 ncmpiSemiMinorAxis[i] = m_partListEllipsoid[i].m_semiMinorAxis;
4753 ncmpiEquivalentDia[i] = m_partListEllipsoid[i].equivalentDiameter();
4754 ncmpiAspectRatio[i] = m_partListEllipsoid[i].m_aspectRatio;
4755 ncmpiDensityRatio[i] = m_partListEllipsoid[i].m_densityRatio;
4756 ncmpiPartStatus[i] = (m_partListEllipsoid[i].firstStep() ? 1 : 0);
4757 for(
MInt n = 0; n < nDim; n++) {
4758 ncmpiPartCoords[(3 * i) + n] = m_partListEllipsoid[i].m_position[n];
4759 ncmpiPartVel[(3 * i) + n] = m_partListEllipsoid[i].m_velocity[n];
4760 ncmpiPartAccel[(3 * i) + n] = m_partListEllipsoid[i].m_accel[n];
4761 ncmpiPartAngVel[(3 * i) + n] = m_partListEllipsoid[i].m_angularVel[n];
4762 ncmpiPartAngAccel[(3 * i) + n] = m_partListEllipsoid[i].m_angularAccel[n];
4763 ncmpiPartOldCoords[(3 * i) + n] = m_partListEllipsoid[i].m_oldPos[n];
4764 ncmpiPartOldVel[(3 * i) + n] = m_partListEllipsoid[i].m_oldVel[n];
4765 ncmpiPartOldAccel[(3 * i) + n] = m_partListEllipsoid[i].m_oldAccel[n];
4766 ncmpiPartOldAngVel[(3 * i) + n] = m_partListEllipsoid[i].m_oldAngularVel[n];
4767 ncmpiPartOldAngAccel[(3 * i) + n] = m_partListEllipsoid[i].m_oldAngularAccel[n];
4768 ncmpiOldFluidVelocity[(3 * i) + n] = m_partListEllipsoid[i].m_oldFluidVel[n];
4770 for(
MInt n = 0; n < 4; n++) {
4771 ncmpiPartQuat[(4 * i) + n] = m_partListEllipsoid[i].m_quaternion[n];
4772 ncmpiPartOldQuat[(4 * i) + n] = m_partListEllipsoid[i].m_oldQuaternion[n];
4774 ncmpiOldFluidDensity[i] = m_partListEllipsoid[i].m_oldFluidDensity;
4775 ncmpiTemp[i] = m_partListEllipsoid[i].m_temperature;
4776 ncmpiCT[i] = m_partListEllipsoid[i].m_creationTime;
4779 parallelIoE.setOffset(ncmpiCount, ncmpiStart);
4780 parallelIoE.writeArray(ncmpiPartId.
begin(),
"partId");
4781 parallelIoE.writeArray(ncmpiSemiMinorAxis.
begin(),
"partSemiMinorAxis");
4782 parallelIoE.writeArray(ncmpiEquivalentDia.
begin(),
"partDia");
4783 parallelIoE.writeArray(ncmpiAspectRatio.
begin(),
"partAspectRatio");
4784 parallelIoE.writeArray(ncmpiDensityRatio.
begin(),
"partDensityRatio");
4785 parallelIoE.writeArray(ncmpiPartStatus.
begin(),
"partStatus");
4786 parallelIoE.writeArray(ncmpiCT.
begin(),
"creationTime");
4787 parallelIoE.writeArray(ncmpiOldFluidDensity.
begin(),
"oldFluidDensity");
4788 if(m_heatCoupling) parallelIoE.writeArray(ncmpiTemp.
begin(),
"partTemp");
4791 ParallelIo::size_type ncmpi3Start = 3 * ncmpiStart;
4792 parallelIoE.setOffset(ncmpiCount, ncmpi3Start);
4793 parallelIoE.writeArray(ncmpiPartCoords.
begin(),
"partPos");
4794 parallelIoE.writeArray(ncmpiPartVel.
begin(),
"partVel");
4795 parallelIoE.writeArray(ncmpiPartAccel.
begin(),
"partAccel");
4796 parallelIoE.writeArray(ncmpiPartAngVel.
begin(),
"partAngVel");
4797 parallelIoE.writeArray(ncmpiPartAngAccel.
begin(),
"partAngAccel");
4798 parallelIoE.writeArray(ncmpiPartOldCoords.
begin(),
"partOldPos");
4799 parallelIoE.writeArray(ncmpiPartOldVel.
begin(),
"partOldVel");
4800 parallelIoE.writeArray(ncmpiPartOldAccel.
begin(),
"partOldAccel");
4801 parallelIoE.writeArray(ncmpiPartOldAngVel.
begin(),
"partOldAngVel");
4802 parallelIoE.writeArray(ncmpiPartOldAngAccel.
begin(),
"partOldAngAccel");
4804 ParallelIo::size_type ncmpi4Start = 4 * ncmpiStart;
4807 parallelIoE.setOffset(ncmpiCount, ncmpi4Start);
4808 parallelIoE.writeArray(ncmpiPartQuat.
begin(),
"partQuat");
4809 parallelIoE.writeArray(ncmpiPartOldQuat.
begin(),
"partOldQuat");
4826 grid().updatePartitionCellOffsets();
4832 MString particleRestartFilename =
4833 restartDir() +
"restartPart_" + getIdentifier() + to_string(
globalTimeStep) + ParallelIo::fileExt();
4838 struct stat buffer {};
4839 if(stat(particleRestartFilename.c_str(), &buffer) != 0) {
4840 if(domainId() == 0) {
4841 cerr <<
"WARNING: starting with restart but no particle file present!" << endl;
4842 m_log <<
"WARNING: starting with restart but no particle file present!" << endl;
4844 m_restartFile =
false;
4849 MFloat loadedTimeStep = 0;
4850 ParallelIo parallelIo(particleRestartFilename, PIO_READ, mpiComm());
4852 parallelIo.getAttribute(&loadedTimeStep,
"timestep");
4854 stringstream errorMessage;
4855 errorMessage <<
"Error! restartTimeStep = " <<
globalTimeStep <<
" differs from timestep in restartPart"
4856 << ParallelIo::fileExt() <<
", which is " << loadedTimeStep << endl;
4857 mTerm(1, AT_, errorMessage.str());
4860 parallelIo.getAttribute(&m_time,
"time");
4862 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
4863 parallelIo.getAttribute(&m_sprayModel->m_injStep,
"injStep");
4864 parallelIo.getAttribute(&m_sprayModel->timeSinceSOI(),
"timeSinceSOI");
4869 const MInt noGlobalPartitionCells = grid().localPartitionCellOffsetsRestart(2);
4870 const MInt localPartitionCellBeginning = grid().localPartitionCellOffsetsRestart(0);
4871 const MInt localPartitionCellEnd = grid().localPartitionCellOffsetsRestart(1);
4873 MIntScratchSpace noParticlesPerPartitionCell(noGlobalPartitionCells, AT_,
"noParticlesPerPartitionCell");
4875 if(domainId() == 0) {
4876 cerr <<
"noGlobalPartitionCells " << noGlobalPartitionCells << endl;
4880 parallelIo.setOffset(noGlobalPartitionCells, 0);
4881 parallelIo.readArray(&noParticlesPerPartitionCell[0],
"partCount");
4884 MInt numberOfParts = 0;
4886#pragma omp parallel for reduction(+ : numberOfParts)
4888 for(
MInt i = 0; i < noGlobalPartitionCells; ++i) {
4889 numberOfParts += noParticlesPerPartitionCell[i];
4891 if(domainId() == 0) {
4892 cerr <<
"Global number of particles loaded from restart file " << numberOfParts << endl;
4894 m_log <<
"Global number of particles loaded from restart file " << numberOfParts << endl;
4897 m_PRNGSpawn.seed(m_spawnSeed);
4898 m_particleResiduum = 0;
4899 if(domainId() == m_spawnDomainId) {
4900 parallelIo.getAttribute(&m_PRNGSpawnCount,
"spawnCount");
4901 m_PRNGSpawn.discard(m_PRNGSpawnCount);
4903 m_log <<
"PRNG state after restart " << randomSpawn(0) << endl;
4905 parallelIo.getAttribute(&m_particleResiduum,
"particleResiduum");
4908 ParallelIo::size_type beginPartId = 0;
4910 for(
MInt i = 0; i < localPartitionCellBeginning; ++i) {
4911 beginPartId += noParticlesPerPartitionCell[i];
4914 MInt localNoPart = noParticlesPerPartitionCell[localPartitionCellBeginning];
4915 for(
MInt i = (localPartitionCellBeginning + 1); i < localPartitionCellEnd; ++i) {
4916 localNoPart += noParticlesPerPartitionCell[i];
4919 MInt checkPartCount = 0;
4920 MPI_Allreduce(&localNoPart, &checkPartCount, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"localNoPart");
4922 if(domainId() == 0 && checkPartCount != numberOfParts) {
4927 ASSERT(localNoPart >= 0,
"ERROR: Invalid number of particles to be loaded during restart");
4929 if(localNoPart == 0) {
4936 if(numberOfParts > 0) {
4947 MIntScratchSpace ncmpiPartParceledNo(localNoPart, AT_,
"ncmpiPartParceledNo");
4953 MFloatScratchSpace ncmpiOldFluidVelocity(3 * localNoPart, AT_,
"ncmpiOldFluidVelocity");
4956 parallelIo.setOffset(localNoPart, beginPartId);
4957 parallelIo.readArray(ncmpiPartId.
begin(),
"partId");
4958 parallelIo.readArray(ncmpiDiameter.
begin(),
"partDia");
4960 parallelIo.readArray(ncmpiPartStatus.
begin(),
"partStatus");
4961 parallelIo.readArray(ncmpiPartParceledNo.
begin(),
"partParceledNo");
4962 parallelIo.readArray(ncmpiCT.
begin(),
"creationTime");
4964 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
4965 parallelIo.readArray(ncmpiBT.
begin(),
"breakUpTime");
4969 if(m_activeSecondaryBUp) {
4970 parallelIo.readArray(ncmpiShedD.
begin(),
"shedD");
4973 if(m_heatCoupling || m_evaporation) {
4974 parallelIo.readArray(ncmpiTemp.
begin(),
"partTemp");
4975 parallelIo.readArray(ncmpiDM.
begin(),
"partDM");
4977 parallelIo.readArray(ncmpiOldFluidDensity.
begin(),
"oldFluidDensity");
4979 ParallelIo::size_type ncmpi3Start = 3 * beginPartId;
4980 ParallelIo::size_type ncmpi3Count = 3 * localNoPart;
4981 parallelIo.setOffset(ncmpi3Count, ncmpi3Start);
4982 parallelIo.readArray(ncmpiPartCoords.
begin(),
"partPos");
4983 parallelIo.readArray(ncmpiPartVel.
begin(),
"partVel");
4984 parallelIo.readArray(ncmpiPartAccel.
begin(),
"partAccel");
4985 parallelIo.readArray(ncmpiOldFluidVelocity.
begin(),
"oldFluidVelocity");
4989 for(
MLong i = 0; i < localNoPart; ++i) {
4990 thisParticle.
m_position[0] = ncmpiPartCoords[(3 * i)];
4991 thisParticle.
m_position[1] = ncmpiPartCoords[(3 * i) + 1];
4992 thisParticle.
m_position[2] = ncmpiPartCoords[(3 * i) + 2];
4994 const MInt cellId = grid().findContainingLeafCell(&thisParticle.
m_position[0]);
4995 if(cellId == -1)
continue;
4996 if(a_isHalo(cellId)) {
4997 cerr <<
"Particle in halo-cell!" << endl;
5002 ASSERT(thisParticle.
m_cellId >= 0,
"Invalid cellId! " + to_string(thisParticle.
m_cellId));
5003 ASSERT(c_isLeafCell(thisParticle.
m_cellId),
"No leaf cell... " + std::to_string(thisParticle.
m_cellId));
5005 thisParticle.
m_partId = ncmpiPartId[i];
5008 thisParticle.
m_dM = ncmpiDM[i];
5013 thisParticle.
firstStep() = (ncmpiPartStatus[i] > 0);
5014 thisParticle.
m_velocity[0] = ncmpiPartVel[(3 * i)];
5015 thisParticle.
m_velocity[1] = ncmpiPartVel[(3 * i) + 1];
5016 thisParticle.
m_velocity[2] = ncmpiPartVel[(3 * i) + 2];
5017 thisParticle.
m_accel[0] = ncmpiPartAccel[(3 * i)];
5018 thisParticle.
m_accel[1] = ncmpiPartAccel[(3 * i) + 1];
5019 thisParticle.
m_accel[2] = ncmpiPartAccel[(3 * i) + 2];
5022 for(
MInt j = 0; j < nDim; j++) {
5029 for(
MInt j = 0; j < nDim; j++) {
5030 thisParticle.
m_oldFluidVel[j] = ncmpiOldFluidVelocity[(3 * i) + j];
5034 if(m_activeSecondaryBUp) {
5040 m_partList.push_back(thisParticle);
5044 MLong noPart = a_noParticles();
5048 if(domainId() == 0) {
5049 cerr <<
"Loaded " << numberOfParts <<
" particles of " << sumPart << endl;
5051 if(numberOfParts != sumPart) {
5052 TERMM(-1,
"invalid number of particles loaded");
5059 particleRestartFilename =
5060 restartDir() +
"restartPartEllipsoid_" + getIdentifier() + to_string(
globalTimeStep) + ParallelIo::fileExt();
5062 ParallelIo parallelIo2(particleRestartFilename, PIO_READ, mpiComm());
5064 MInt loadedTimeStep2 = 0;
5065 parallelIo2.getAttribute(&loadedTimeStep2,
"timestep");
5067 stringstream errorMessage;
5068 errorMessage << endl
5070 <<
" differs from timestep in "
5071 "restartPartEllipsoid"
5072 << ParallelIo::fileExt() <<
", which is " << loadedTimeStep2 << endl;
5073 mTerm(1, AT_, errorMessage.str());
5077 const MInt noGlobalPartitionCells = grid().localPartitionCellOffsetsRestart(2);
5078 const MInt localPartitionCellBeginning = grid().localPartitionCellOffsetsRestart(0);
5079 const MInt localPartitionCellEnd = grid().localPartitionCellOffsetsRestart(1);
5081 MIntScratchSpace noEllipsoidsPerPartitionCell(noGlobalPartitionCells, AT_,
"noParticlesPerPartitionCell");
5083 if(domainId() == 0) {
5084 cerr <<
"noGlobalPartitionCells " << noGlobalPartitionCells << endl;
5088 parallelIo2.setOffset(noGlobalPartitionCells, 0);
5089 parallelIo2.readArray(&noEllipsoidsPerPartitionCell[0],
"partCount");
5092 MInt numberOfEllips = 0;
5094#pragma omp parallel for reduction(+ : numberOfEllips)
5096 for(
MInt i = 0; i < noGlobalPartitionCells; ++i) {
5097 numberOfEllips += noEllipsoidsPerPartitionCell[i];
5100 if(domainId() == 0) {
5101 cerr <<
"Global number of ellipsoids loaded from restart file " << numberOfEllips << endl;
5103 m_log <<
"Global number of ellipsoids loaded from restart file " << numberOfEllips << endl;
5105 ParallelIo::size_type beginPartId = 0;
5107 for(
MInt i = 0; i < localPartitionCellBeginning; ++i) {
5108 beginPartId += noEllipsoidsPerPartitionCell[i];
5111 MInt localNoPart = noEllipsoidsPerPartitionCell[localPartitionCellBeginning];
5112 for(
MInt i = (localPartitionCellBeginning + 1); i < localPartitionCellEnd; ++i) {
5113 localNoPart += noEllipsoidsPerPartitionCell[i];
5116 MInt checkPartCount = 0;
5117 MPI_Allreduce(&localNoPart, &checkPartCount, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"localNoPart");
5119 if(domainId() == 0 && checkPartCount != numberOfEllips) {
5124 ASSERT(localNoPart >= 0,
"ERROR: Invalid number of ellipsoids to be loaded during restart");
5126 if(localNoPart == 0) {
5131 if(numberOfEllips > 0) {
5146 MFloatScratchSpace ncmpiPartOldAngAccel(3 * localNoPart, AT_,
"ncmpiPartOldAngAccel");
5148 MFloatScratchSpace ncmpiPartOldQuat(4 * localNoPart, AT_,
"ncmpiPartOldQuaternions");
5152 MFloatScratchSpace ncmpiOldFluidVelocity(3 * localNoPart, AT_,
"ncmpiOldFluidVelocity");
5154 parallelIo2.setOffset(localNoPart, beginPartId);
5155 parallelIo2.readArray(ncmpiPartId.
begin(),
"partId");
5156 parallelIo2.readArray(ncmpiSemiMinorAxis.
begin(),
"partSemiMinorAxis");
5157 parallelIo2.readArray(ncmpiAspectRatio.
begin(),
"partAspectRatio");
5158 parallelIo2.readArray(ncmpiDensityRatio.
begin(),
"partDensityRatio");
5159 parallelIo2.readArray(ncmpiPartStatus.
begin(),
"partStatus");
5160 parallelIo2.readArray(ncmpiCT.
begin(),
"creationTime");
5161 if(m_heatCoupling) parallelIo2.readArray(ncmpiTemp.
begin(),
"partTemp");
5162 parallelIo2.readArray(ncmpiOldFluidDensity.
begin(),
"oldFluidDensity");
5164 ParallelIo::size_type localNoPart3 = 3 * localNoPart;
5165 ParallelIo::size_type beginPartId3 = 3 * beginPartId;
5166 parallelIo2.setOffset(localNoPart3, beginPartId3);
5167 parallelIo2.readArray(ncmpiPartCoords.
begin(),
"partPos");
5168 parallelIo2.readArray(ncmpiPartVel.
begin(),
"partVel");
5169 parallelIo2.readArray(ncmpiPartAccel.
begin(),
"partAccel");
5170 parallelIo2.readArray(ncmpiPartAngVel.
begin(),
"partAngVel");
5171 parallelIo2.readArray(ncmpiPartAngAccel.
begin(),
"partAngAccel");
5172 parallelIo2.readArray(ncmpiPartOldCoords.
begin(),
"partOldPos");
5173 parallelIo2.readArray(ncmpiPartOldVel.
begin(),
"partOldVel");
5174 parallelIo2.readArray(ncmpiPartOldAccel.
begin(),
"partOldAccel");
5175 parallelIo2.readArray(ncmpiPartOldAngVel.
begin(),
"partOldAngVel");
5176 parallelIo2.readArray(ncmpiPartOldAngAccel.
begin(),
"partOldAngAccel");
5178 ParallelIo::size_type localNoPart4 = 4 * localNoPart;
5179 ParallelIo::size_type beginPartId4 = 4 * beginPartId;
5180 parallelIo2.setOffset(localNoPart4, beginPartId4);
5181 parallelIo2.readArray(ncmpiPartQuat.
begin(),
"partQuat");
5182 parallelIo2.readArray(ncmpiPartOldQuat.
begin(),
"partOldQuat");
5187 for(
MLong i = 0; i < localNoPart; ++i) {
5188 for(
MInt n = 0; n < nDim; n++)
5189 thisParticleEllipsoid.
m_position[n] = ncmpiPartCoords[(3 * i) + n];
5191 const MInt cellId = grid().findContainingLeafCell(&thisParticleEllipsoid.
m_position[0]);
5192 if(cellId == -1)
continue;
5193 if(a_isHalo(cellId)) {
5194 cerr <<
"Particle in halo-cell!" << endl;
5198 thisParticleEllipsoid.
m_cellId = cellId;
5199 ASSERT(thisParticleEllipsoid.
m_cellId >= 0,
"Invalid cellId! " + to_string(thisParticleEllipsoid.
m_cellId));
5200 ASSERT(c_isLeafCell(thisParticleEllipsoid.
m_cellId),
5201 "No leaf cell... " + std::to_string(thisParticleEllipsoid.
m_cellId));
5202 thisParticleEllipsoid.
m_partId = ncmpiPartId[i];
5209 thisParticleEllipsoid.
firstStep() = (ncmpiPartStatus[i] > 0);
5213 for(
MInt n = 0; n < nDim; n++) {
5215 thisParticleEllipsoid.
m_velocity[n] = ncmpiPartVel[(3 * i) + n];
5216 thisParticleEllipsoid.
m_accel[n] = ncmpiPartAccel[(3 * i) + n];
5217 thisParticleEllipsoid.
m_angularVel[n] = ncmpiPartAngVel[(3 * i) + n];
5218 thisParticleEllipsoid.
m_angularAccel[n] = ncmpiPartAngAccel[(3 * i) + n];
5220 thisParticleEllipsoid.
m_oldPos[n] = ncmpiPartOldCoords[(3 * i) + n];
5221 thisParticleEllipsoid.
m_oldVel[n] = ncmpiPartOldVel[(3 * i) + n];
5222 thisParticleEllipsoid.
m_oldAccel[n] = ncmpiPartOldAccel[(3 * i) + n];
5223 thisParticleEllipsoid.
m_oldAngularVel[n] = ncmpiPartOldAngVel[(3 * i) + n];
5226 thisParticleEllipsoid.
m_oldFluidVel[n] = ncmpiOldFluidVelocity[(3 * i) + n];
5229 for(
MInt n = 0; n < 4; n++) {
5230 thisParticleEllipsoid.
m_quaternion[n] = ncmpiPartQuat[(4 * i) + n];
5231 thisParticleEllipsoid.
m_oldQuaternion[n] = ncmpiPartOldQuat[(4 * i) + n];
5235 m_partListEllipsoid.push_back(thisParticleEllipsoid);
5237 MLong sumEllips = 0;
5238 MLong noEllips = a_noEllipsoidalParticles();
5243 if(domainId() == 0) {
5244 cerr <<
"Loaded " << numberOfEllips <<
" particles of " << sumEllips << endl;
5246 if(numberOfEllips != sumEllips) {
5247 TERMM(-1,
"invalid number of ellipsoids loaded");
5253 MInt theSize = a_noParticles();
5256 theSize += a_noEllipsoidalParticles();
5260 m_addedParticle = 0;
5261 MLong noPart = a_noParticles();
5265 MLong noEllipsoids = a_noEllipsoidalParticles();
5268 noPart += noEllipsoids;
5271 if(domainId() == m_spawnDomainId) {
5272 m_addedParticle = noPart;
5289 MInt noInternalCellIds;
5290 std::vector<MInt> recalcIdsSolver(0);
5291 std::vector<MInt> reOrderedCells(0);
5292 this->calcRecalcCellIdsSolver(recalcIds, noCells, noInternalCellIds, recalcIdsSolver, reOrderedCells);
5293 MInt* pointerRecalcIds = (recalcIds ==
nullptr) ?
nullptr : recalcIdsSolver.data();
5295 if(grid().newMinLevel() > 0) {
5296 cerr0 <<
"Skipping LPT solution file when min-level changes are applied!" << endl;
5300 MBool debugOutput =
false;
5305 const MInt noIdParams = 0;
5306 const MInt noDbParams = 0;
5307 const MInt noIdVars = 1 + m_ellipsoids;
5309 MInt noDbVars = 1 + PV.noVars() + m_evaporation + m_massCoupling + m_heatCoupling + (nDim + 1) * m_momentumCoupling;
5311 noDbVars += (nDim * nDim);
5314 if(this->m_adaptation && this->m_noSensors > 0) {
5315 noDbVars = noDbVars + 1;
5317 if(m_wallCollisions) {
5318 noDbVars = noDbVars + 1;
5326 vector<MString> dbVariablesName;
5327 vector<MString> idVariablesName;
5328 vector<MString> dbParametersName;
5329 vector<MString> idParametersName;
5330 vector<MString> name;
5338 name.push_back(
"noParticlesInCell");
5339 for(
MInt cell = 0; cell < noCells; cell++) {
5340 tmp[cell] = a_noParticlesInCell(cell);
5342 this->collectVariables(tmp.
begin(), idVariables, name, idVariablesName, 1, noCells);
5346 name.push_back(
"noEllipsoidsInCell");
5347 for(
MInt cell = 0; cell < noCells; cell++) {
5348 tmp[cell] = a_noEllipsoidsInCell(cell);
5350 this->collectVariables(tmp.
begin(), idVariables, name, idVariablesName, 1, noCells);
5353 if(m_massCoupling) {
5355 name.push_back(
"massFlux");
5356 for(
MInt cell = 0; cell < noCells; cell++) {
5357 tmpW[cell] = a_massFlux(cell);
5359 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5362 if(m_momentumCoupling) {
5363 for(
MInt i = 0; i < nDim; i++) {
5365 string tmps =
"momentumFlux" + to_string(i);
5366 name.push_back(tmps);
5367 for(
MInt cell = 0; cell < noCells; cell++) {
5368 tmpW[cell] = a_momentumFlux(cell, i);
5370 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5373 name.push_back(
"workFlux");
5374 for(
MInt cell = 0; cell < noCells; cell++) {
5375 tmpW[cell] = a_workFlux(cell);
5377 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5380 if(m_heatCoupling) {
5382 name.push_back(
"heatFlux");
5383 for(
MInt cell = 0; cell < noCells; cell++) {
5384 tmpW[cell] = a_heatFlux(cell);
5386 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5390 name.push_back(
"volumeFraction");
5391 for(
MInt cell = 0; cell < noCells; cell++) {
5392 tmpW[cell] = a_volumeFraction(cell);
5394 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5396 for(
MInt i = 0; i < nDim; i++) {
5398 string tmps =
"fluidVelocity_" + to_string(i);
5399 name.push_back(tmps);
5400 for(
MInt cell = 0; cell < noCells; cell++) {
5401 tmpW[cell] = a_fluidVelocity(cell, i);
5403 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5407 name.push_back(
"fluidDensity");
5408 for(
MInt cell = 0; cell < noCells; cell++) {
5409 tmpW[cell] = a_fluidDensity(cell);
5411 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5414 name.push_back(
"fluidPressure");
5415 for(
MInt cell = 0; cell < noCells; cell++) {
5416 tmpW[cell] = a_fluidPressure(cell);
5418 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5421 name.push_back(
"fluidTemperture");
5422 for(
MInt cell = 0; cell < noCells; cell++) {
5423 tmpW[cell] = a_fluidTemperature(cell);
5425 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5429 name.push_back(
"fluidSpecies");
5430 for(
MInt cell = 0; cell < noCells; cell++) {
5431 tmpW[cell] = a_fluidSpecies(cell);
5433 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5437 for(
MInt i = 0; i < nDim; i++) {
5438 for(
MInt j = 0; j < nDim; j++) {
5440 string tmps =
"fluidVelocitySlopes_" + to_string(i) +
"_" + to_string(j);
5441 name.push_back(tmps);
5442 for(
MInt cell = 0; cell < noCells; cell++) {
5443 tmpW[cell] = a_velocitySlope(cell, i, j);
5445 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5450 if(debugOutput && this->m_adaptation && this->m_noSensors > 0) {
5452 name.push_back(
"regridTrigger");
5453 for(
MInt cell = 0; cell < noCells; cell++) {
5454 tmpW[cell] = a_regridTrigger(cell);
5456 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5459 if(debugOutput && m_wallCollisions) {
5461 name.push_back(
"boundaryCellId");
5462 for(
MInt cell = 0; cell < noCells; cell++) {
5463 tmpW[cell] = a_bndryCellId(cell);
5464 if(!a_isValidCell(cell)) {
5468 this->collectVariables(tmpW.
begin(), dbVariables, name, dbVariablesName, 1, noCells);
5472 stringstream fileName;
5475 fileName << outputDir() <<
"solutionLPT_" << getIdentifier(
true) <<
globalTimeStep << ParallelIo::fileExt();
5477 this->saveGridFlowVars((fileName.str()).c_str(), gridFileName.c_str(), noCells, noInternalCellIds, dbVariables,
5478 dbVariablesName, 0, idVariables, idVariablesName, 0, dbParameters, dbParametersName,
5479 idParameters, idParametersName, pointerRecalcIds, -1);
5497 const MInt parcelSize) {
5499 const MFloat epsilon = MFloatEps;
5500 const MFloat cellLength = 0.5 * c_cellLengthAtCell(cellId) - epsilon;
5508 thisParticle.
m_dM = 0;
5511 for(
MInt j = 0; j < nDim; j++) {
5516 c_coordinate(cellId, j) + epsilon - random * (-cellLength + 2 * cellLength * rand() / (RAND_MAX + 1.0));
5519 thisParticle.
m_accel[j] = 0.0;
5521 }
else if(addMode == 1 || addMode == 2) {
5523 for(
MInt j = 0; j < nDim; j++) {
5525 thisParticle.
m_oldPos[j] = c_coordinate(cellId, j);
5527 thisParticle.
m_oldVel[j] = velocity[j];
5528 thisParticle.
m_accel[j] = 0.0;
5530 }
else if(addMode == 3) {
5531 for(
MInt j = 0; j < nDim; j++) {
5532 thisParticle.
m_position[j] = position[j] + MFloatEps;
5533 thisParticle.
m_oldPos[j] = c_coordinate(cellId, j);
5535 thisParticle.
m_oldVel[j] = velocity[j];
5536 thisParticle.
m_accel[j] = 0.0;
5539 mTerm(1, AT_,
"Unknown particle add-Mode!");
5543 for(
MInt i = 0; i < nDim; i++) {
5555 }
else if(addMode == 2) {
5556 ASSERT(m_activePrimaryBUp,
"");
5557 thisParticle.
checkCellChange(m_spawnCoord, !m_sprayModel->m_broadcastInjected);
5558 uniform_real_distribution<MFloat> randMov(0, m_timeStep);
5559 time = randMov(m_sprayModel->randomPrimBreakUp(1));
5560 if(time > m_timeStep) {
5563 }
else if(addMode == 3) {
5564 ASSERT(m_activeSecondaryBUp,
"");
5567 for(
MInt i = 0; i < nDim; i++) {
5577 mTerm(1, AT_,
"Invalid creation Time");
5585 thisParticle.
m_partId =
static_cast<MLong>(domainId() * 1E9 + m_addedParticle);
5587 m_partList.push_back(thisParticle);
5590 return a_noParticles() - 1;
5608 const MFloat particleDensityRatio,
const MInt random,
const MInt addMode,
5611 const MFloat epsilon = MFloatEps;
5612 const MFloat cellLength = 0.5 * c_cellLengthAtCell(cellId) - epsilon;
5622 for(
MInt j = 0; j < nDim; j++) {
5627 c_coordinate(cellId, j) + epsilon - random * (-cellLength + 2 * cellLength * rand() / (RAND_MAX + 1.0));
5633 }
else if(addMode == 1 || addMode == 2) {
5635 for(
MInt j = 0; j < nDim; j++) {
5637 thisParticle.
m_oldPos[j] = c_coordinate(cellId, j);
5639 thisParticle.
m_oldVel[j] = velocity[j];
5644 }
else if(addMode == 3) {
5645 for(
MInt j = 0; j < nDim; j++) {
5646 thisParticle.
m_position[j] = position[j] + MFloatEps;
5647 thisParticle.
m_oldPos[j] = c_coordinate(cellId, j);
5649 thisParticle.
m_oldVel[j] = velocity[j];
5655 mTerm(1, AT_,
"Unknown particle add-Mode!");
5658 if(m_ellipsoidRandomOrientation != 0) {
5659 mt19937_64 randomOrientationGen(m_ellipsoidRandomOrientationSeed);
5660 uniform_real_distribution<MFloat>
dist(F0, F1);
5664 thisParticle.
m_quaternion[0] = sqrt(1 - alpha) * sin(2 * PI * beta);
5665 thisParticle.
m_quaternion[1] = sqrt(1 - alpha) * cos(2 * PI * beta);
5666 thisParticle.
m_quaternion[2] = sqrt(alpha) * sin(2 * PI * gamma);
5667 thisParticle.
m_quaternion[3] = sqrt(alpha) * cos(2 * PI * gamma);
5670 normFactor += k * k;
5672 normFactor = sqrt(normFactor);
5673 for(
MInt i = 0; i < 4; i++) {
5680 for(
MInt i = 0; i < nDim; i++) {
5692 }
else if(addMode == 2) {
5693 ASSERT(m_activePrimaryBUp,
"");
5694 thisParticle.
checkCellChange(m_spawnCoord, !m_sprayModel->m_broadcastInjected);
5695 uniform_real_distribution<MFloat> randMov(0, m_timeStep);
5696 time = randMov(m_sprayModel->randomPrimBreakUp(1));
5697 if(time > m_timeStep) {
5700 }
else if(addMode == 3) {
5701 ASSERT(m_activeSecondaryBUp,
"");
5704 for(
MInt i = 0; i < nDim; i++) {
5712 mTerm(1, AT_,
"Invalid creation Time");
5720 thisParticle.
m_partId =
static_cast<MLong>(domainId() * 1E9 + m_addedParticle);
5722 m_partListEllipsoid.push_back(thisParticle);
5725 return a_noParticles() + a_noEllipsoidalParticles() - 1;
5736 if(m_spawnCellId < 0) {
5737 if(noDomains() > 1) {
5741 m_particleResiduum = 0.0;
5744 ASSERT(domainId() == m_spawnDomainId,
"");
5746 const MInt prevNo = m_partList.empty() ? 0 : m_partList.size() - 1;
5747 const auto noNewParticles = (
MInt)(m_spawnParticlesCount * m_timeStep + m_particleResiduum);
5748 m_particleResiduum += m_spawnParticlesCount * m_timeStep - noNewParticles;
5750 if(noNewParticles >= 1) {
5753 for(
MInt i = 0; i < noNewParticles; i++) {
5755 randomVectorInCone(&spawnParticlesInitVelo[0], &m_spawnDir[0], m_spawnVelocity, m_spawnParticlesConeAngle,
5756 m_spawnEmittDist, randomSpawn(0), m_spawnDistSigmaCoeff, 0);
5758 addParticle(m_spawnCellId, m_spawnDiameter, m_material->densityRatio(), 0, 1, &spawnParticlesInitVelo[0],
5759 &m_spawnCoord[0], 1);
5763 broadcastInjected(prevNo);
5766 cerr <<
"added " << noNewParticles <<
" new particles with diameter " << m_spawnDiameter <<
" to cell "
5767 << m_spawnCellId << endl;
5779 cerr <<
"//////////////////////////////////////////////////////////////////////////////////////////////////" << endl;
5780 cerr <<
"/// Particle INITIALISATION INFORMATION ///" << endl;
5781 cerr <<
"///--------------------------------------------------------------------------------------------///" << endl;
5782 cerr <<
"spawning is active : " << boolalpha << m_spawnParticles << endl;
5783 cerr <<
"spray model is active : " << boolalpha << (
MBool)(m_activePrimaryBUp || m_activeSecondaryBUp)
5785 cerr <<
"momentum coupling is active : " << boolalpha << m_momentumCoupling << endl;
5786 if(m_momentumCoupling || m_heatCoupling) {
5787 cerr <<
" * redistribution is active : " << boolalpha << m_couplingRedist << endl;
5788 cerr <<
" - redistribution area is set to : " << m_noRedistLayer << endl;
5790 cerr <<
"drag modelling is active : " << boolalpha << (m_dragModelType > 0) << endl;
5791 if(m_dragModelType > 0) {
5792 cerr <<
" * drag model used is : " << m_dragModelType << endl;
5795 cerr <<
"lift modelling is active : " << boolalpha << (m_liftModelType > 0) << endl;
5796 if(m_liftModelType > 0) {
5797 cerr <<
" * lift model used is : " << m_liftModelType << endl;
5799 cerr <<
"torque modelling is active : " << boolalpha << (m_torqueModelType > 0) << endl;
5800 if(m_torqueModelType > 0) {
5801 cerr <<
" * torque model used is : " << m_torqueModelType << endl;
5804 cerr <<
"//////////////////////////////////////////////////////////////////////////////////////////////////" << endl;
5815 MInt partReceive = 0;
5818 MPI_Bcast(&partReceive, 1, MPI_INT, m_spawnDomainId, mpiComm(), AT_,
"part");
5821 MInt noParticlesB = -
static_cast<MInt>(m_partList.size());
5825 if(partReceive > 0) {
5828 mpiComm(), AT_,
"buffer");
5833 unpackParticles<LPTSpherical<nDim>,
true>(partReceive, m_intRecvBuffer[0].get(), m_recvBuffer[0].get());
5837 noParticlesB += m_partList.size();
5838 MPI_Allreduce(MPI_IN_PLACE, &noParticlesB, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noParticlesB");
5851 vector<MInt> cellIds) {
5853 if(intBuffer !=
nullptr) {
5854 for(
MInt id = 0;
id < (
MInt)particlesToSend.size();
id++) {
5856 intBuffer[z++] = (
MInt)(particlesToSend[
id]->m_partId >> 32);
5857 intBuffer[z++] =
MInt(particlesToSend[
id]->m_partId);
5858 intBuffer[z++] = cellIds[
id];
5859 intBuffer[z++] = particlesToSend[
id]->m_noParticles;
5860 intBuffer[z++] = ((particlesToSend[
id]->firstStep() ? 1 : 0) + (particlesToSend[
id]->reqSend() ? 2 : 0));
5861 intBuffer[z++] = (
MInt)(particlesToSend[
id]->hadWallColl());
5863 mTerm(1, AT_,
"Buffersize missmatching!");
5868 if(floatBuffer !=
nullptr) {
5870 for(
MInt id = 0;
id < (
MInt)particlesToSend.size();
id++) {
5871 floatBuffer[z++] = particlesToSend[
id]->m_diameter;
5872 floatBuffer[z++] = particlesToSend[
id]->m_densityRatio;
5873 for(
MInt j = 0; j < nDim; j++) {
5874 floatBuffer[z++] = particlesToSend[
id]->m_position.at(j);
5876 for(
MInt j = 0; j < nDim; j++) {
5877 floatBuffer[z++] = particlesToSend[
id]->m_velocity.at(j);
5879 for(
MInt j = 0; j < nDim; j++) {
5880 floatBuffer[z++] = particlesToSend[
id]->m_accel.at(j);
5882 for(
MInt j = 0; j < nDim; j++) {
5883 floatBuffer[z++] = particlesToSend[
id]->m_oldPos.at(j);
5885 for(
MInt j = 0; j < nDim; j++) {
5886 floatBuffer[z++] = particlesToSend[
id]->m_oldVel.at(j);
5888 for(
MInt j = 0; j < nDim; j++) {
5889 floatBuffer[z++] = particlesToSend[
id]->m_oldAccel.at(j);
5891 floatBuffer[z++] = particlesToSend[
id]->m_creationTime;
5892 floatBuffer[z++] = particlesToSend[
id]->m_breakUpTime;
5893 floatBuffer[z++] = particlesToSend[
id]->m_shedDiam;
5894 floatBuffer[z++] = particlesToSend[
id]->m_temperature;
5895 floatBuffer[z++] = particlesToSend[
id]->m_dM;
5896 floatBuffer[z++] = particlesToSend[
id]->m_heatFlux;
5897 floatBuffer[z++] = particlesToSend[
id]->m_fluidVelMag;
5899 floatBuffer[z++] = particlesToSend[
id]->m_oldFluidDensity;
5900 for(
MInt j = 0; j < nDim; j++) {
5901 floatBuffer[z++] = particlesToSend[
id]->m_oldFluidVel[j];
5904 mTerm(1, AT_,
"Buffersize missmatching!");
5906 if(intBuffer ==
nullptr) {
5907 MInt partId1 = (
MInt)(particlesToSend[
id]->m_partId >> 32);
5908 MInt partId2 =
MInt(particlesToSend[
id]->m_partId);
5909 floatBuffer[z++] = (
MFloat)partId1;
5910 floatBuffer[z++] = (
MFloat)partId2;
5911 floatBuffer[z++] = particlesToSend[
id]->m_noParticles;
5916 if(intBuffer !=
nullptr && floatBuffer !=
nullptr) {
5917 m_noSendParticles++;
5929 vector<MInt> cellIds) {
5931 mTerm(1, AT_,
"Only allowed for ellipsoids!");
5935 if(intBuffer !=
nullptr) {
5936 for(
MInt id = 0;
id < (
MInt)particlesToSend.size();
id++) {
5938 intBuffer[z++] = (
MInt)(particlesToSend[
id]->m_partId >> 32);
5939 intBuffer[z++] =
MInt(particlesToSend[
id]->m_partId);
5940 intBuffer[z++] = cellIds[
id];
5941 intBuffer[z++] = ((particlesToSend[
id]->firstStep() ? 1 : 0) + (particlesToSend[
id]->reqSend() ? 2 : 0));
5942 intBuffer[z++] = (
MInt)(particlesToSend[
id]->hadWallColl());
5944 mTerm(1, AT_,
"Buffersize missmatching!");
5949 if(floatBuffer !=
nullptr) {
5951 for(
MInt id = 0;
id < (
MInt)particlesToSend.size();
id++) {
5952 floatBuffer[z++] = particlesToSend[
id]->m_semiMinorAxis;
5953 floatBuffer[z++] = particlesToSend[
id]->m_aspectRatio;
5954 floatBuffer[z++] = particlesToSend[
id]->m_densityRatio;
5955 for(
MInt j = 0; j < nDim; j++)
5956 floatBuffer[z++] = particlesToSend[
id]->m_position.at(j);
5957 for(
MInt j = 0; j < nDim; j++)
5958 floatBuffer[z++] = particlesToSend[
id]->m_velocity.at(j);
5959 for(
MInt j = 0; j < nDim; j++)
5960 floatBuffer[z++] = particlesToSend[
id]->m_accel.at(j);
5961 for(
MInt j = 0; j < nDim; j++)
5962 floatBuffer[z++] = particlesToSend[
id]->m_angularVel.at(j);
5963 for(
MInt j = 0; j < nDim; j++)
5964 floatBuffer[z++] = particlesToSend[
id]->m_angularAccel.at(j);
5965 for(
MInt j = 0; j < 4; j++)
5966 floatBuffer[z++] = particlesToSend[
id]->m_quaternion.at(j);
5967 for(
MInt j = 0; j < nDim; j++)
5968 floatBuffer[z++] = particlesToSend[
id]->m_oldPos.at(j);
5969 for(
MInt j = 0; j < nDim; j++)
5970 floatBuffer[z++] = particlesToSend[
id]->m_oldVel.at(j);
5971 for(
MInt j = 0; j < nDim; j++)
5972 floatBuffer[z++] = particlesToSend[
id]->m_oldAccel.at(j);
5973 for(
MInt j = 0; j < nDim; j++)
5974 floatBuffer[z++] = particlesToSend[
id]->m_oldAngularVel.at(j);
5975 for(
MInt j = 0; j < nDim; j++)
5976 floatBuffer[z++] = particlesToSend[
id]->m_oldAngularAccel.at(j);
5977 for(
MInt j = 0; j < 4; j++)
5978 floatBuffer[z++] = particlesToSend[
id]->m_oldQuaternion.at(j);
5979 floatBuffer[z++] = particlesToSend[
id]->m_creationTime;
5980 floatBuffer[z++] = particlesToSend[
id]->m_temperature;
5981 floatBuffer[z++] = particlesToSend[
id]->m_heatFlux;
5982 floatBuffer[z++] = particlesToSend[
id]->m_fluidVelMag;
5983 floatBuffer[z++] = particlesToSend[
id]->m_oldFluidDensity;
5984 for(
MInt j = 0; j < nDim; j++)
5985 floatBuffer[z++] = particlesToSend[
id]->m_oldFluidVel[j];
5988 mTerm(1, AT_,
"Buffersize missmatching!");
5990 if(intBuffer ==
nullptr) {
5991 MInt partId1 = (
MInt)(particlesToSend[
id]->m_partId >> 32);
5992 MInt partId2 =
MInt(particlesToSend[
id]->m_partId);
5993 floatBuffer[z++] = (
MFloat)partId1;
5994 floatBuffer[z++] = (
MFloat)partId2;
6007template <
class LPTParticle, MBool t_search>
6009 const MBool allowNonLeaf) {
6013 for(
MInt id = 0;
id < num;
id++) {
6014 LPTParticle thisParticle;
6015 unpackParticle(thisParticle, intBuffer, z, floatBuffer, h, i,
id);
6017 thisParticle.initProperties();
6018 thisParticle.firstStep() = ((i % 2) > 0);
6022 const MInt cellId = grid().findContainingLeafCell(&thisParticle.m_position[0]);
6024 if(cellId >= 0 && !a_isHalo(cellId)) {
6025 thisParticle.m_cellId = cellId;
6033 thisParticle.m_cellId = haloCellId(domain, thisParticle.m_cellId);
6034 thisParticle.wasSend() =
true;
6035 thisParticle.toBeDeleted() =
false;
6036 thisParticle.isInvalid() =
true;
6037 ASSERT(m_collisions > 0,
"");
6041 if(thisParticle.m_cellId < 0) {
6042 mTerm(1, AT_,
"ERROR: Particle has no valid cell!");
6046 thisParticle.m_cellId = windowCellId(domain, thisParticle.m_cellId);
6048 thisParticle.updateProperties(
false);
6052 if(allowNonLeaf && !c_isLeafCell(thisParticle.m_cellId)) {
6053 thisParticle.m_cellId = grid().findContainingLeafCell(&thisParticle.m_position[0], thisParticle.m_cellId);
6054 thisParticle.m_oldCellId = thisParticle.m_cellId;
6056 if(!thisParticle.firstStep()) {
6057 mTerm(1, AT_,
"Non-leaf comm. of not injected cell requested!");
6060 if(!c_isLeafCell(thisParticle.m_cellId)) {
6061 mTerm(1, AT_,
"ERROR: Particle has no valid leaf-cell!");
6066 if(thisParticle.m_cellId < 0) {
6067 mTerm(1, AT_,
"ERROR: Particle has no valid cell!");
6070 if(thisParticle.m_oldCellId < 0) {
6071 thisParticle.m_oldCellId = grid().findContainingLeafCell(&thisParticle.m_oldPos[0], thisParticle.m_cellId);
6074 std::vector<LPTParticle>& particleList = a_particleList<LPTParticle>();
6075 particleList.push_back(thisParticle);
6086 if(intBuffer !=
nullptr) {
6087 MInt a = intBuffer[z++];
6088 MInt b = intBuffer[z++];
6091 thisParticle.
m_cellId = intBuffer[z++];
6093 step = intBuffer[z++];
6096 mTerm(1, AT_,
"Buffersize missmatching!");
6102 for(
MInt l = 0; l < nDim; l++) {
6103 thisParticle.
m_position.at(l) = floatBuffer[h++];
6105 for(
MInt l = 0; l < nDim; l++) {
6106 thisParticle.
m_velocity.at(l) = floatBuffer[h++];
6108 for(
MInt l = 0; l < nDim; l++) {
6109 thisParticle.
m_accel.at(l) = floatBuffer[h++];
6111 for(
MInt l = 0; l < nDim; l++) {
6112 thisParticle.
m_oldPos.at(l) = floatBuffer[h++];
6114 for(
MInt l = 0; l < nDim; l++) {
6115 thisParticle.
m_oldVel.at(l) = floatBuffer[h++];
6117 for(
MInt l = 0; l < nDim; l++) {
6118 thisParticle.
m_oldAccel.at(l) = floatBuffer[h++];
6124 thisParticle.
m_dM = floatBuffer[h++];
6129 for(
MInt l = 0; l < nDim; l++) {
6133 mTerm(1, AT_,
"Buffersize missmatching!");
6136 if(intBuffer ==
nullptr) {
6154 if(intBuffer !=
nullptr) {
6155 MInt a = intBuffer[z++];
6156 MInt b = intBuffer[z++];
6159 thisParticle.
m_cellId = intBuffer[z++];
6160 step = intBuffer[z++];
6163 mTerm(1, AT_,
"Buffersize missmatching!");
6170 for(
MInt l = 0; l < nDim; l++)
6171 thisParticle.
m_position.at(l) = floatBuffer[h++];
6172 for(
MInt l = 0; l < nDim; l++)
6173 thisParticle.
m_velocity.at(l) = floatBuffer[h++];
6174 for(
MInt l = 0; l < nDim; l++)
6175 thisParticle.
m_accel.at(l) = floatBuffer[h++];
6176 for(
MInt l = 0; l < nDim; l++)
6178 for(
MInt l = 0; l < nDim; l++)
6180 for(
MInt l = 0; l < 4; l++)
6182 for(
MInt l = 0; l < nDim; l++)
6183 thisParticle.
m_oldPos.at(l) = floatBuffer[h++];
6184 for(
MInt l = 0; l < nDim; l++)
6185 thisParticle.
m_oldVel.at(l) = floatBuffer[h++];
6186 for(
MInt l = 0; l < nDim; l++)
6187 thisParticle.
m_oldAccel.at(l) = floatBuffer[h++];
6188 for(
MInt l = 0; l < nDim; l++)
6190 for(
MInt l = 0; l < nDim; l++)
6192 for(
MInt l = 0; l < 4; l++)
6199 for(
MInt l = 0; l < nDim; l++)
6203 mTerm(1, AT_,
"Buffersize missmatching!");
6206 if(intBuffer ==
nullptr) {
6224 if(noDomains() > 1) {
6225 vector<LPTSpherical<nDim>*> particlesToSend;
6226 vector<MInt> cellIds;
6227 vector<LPTSpherical<nDim>>& partList = a_particleList<LPTSpherical<nDim>>();
6229 for(
MInt i = prevNumPart; i < a_noParticles(); i++) {
6230 if(partList[i].toBeDeleted() || partList[i].toBeRespawn()) {
6231 particlesToSend.push_back(&partList[i]);
6232 cellIds.push_back(-1);
6234 cerr << domainId() <<
": " << partList[i].m_partId <<
" is to be send to all domains during injection." << endl;
6240 MInt numPartSend =
static_cast<MInt>(particlesToSend.size());
6243 MPI_Bcast(&numPartSend, 1, MPI_INT, m_spawnDomainId, mpiComm(), AT_,
"intSendBuffer");
6246 cerr << domainId() <<
": "
6247 <<
" is sending " << numPartSend <<
" particles" << endl;
6250 ASSERT(m_exchangeBufferSize
6252 "ERROR: Exchange buffer to small");
6254 if(numPartSend > 0) {
6256 packParticles(particlesToSend, m_intSendBuffer[0].get(), m_sendBuffer[0].get(), cellIds);
6260 m_spawnDomainId, mpiComm(), AT_,
"intSendBuffer");
6263 m_spawnDomainId, mpiComm(), AT_,
"intSendBuffer");
6266 for(
auto part : particlesToSend) {
6267 part->wasSend() =
true;
6268 part->reqSend() =
false;
6269 part->isInvalid() =
true;
6274 MInt particlesReceived = 0;
6275 MPI_Allreduce(MPI_IN_PLACE, &particlesReceived, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
6276 "particlesReceived");
6277 if(particlesReceived != numPartSend) {
6278 cerr <<
"total particles received: " << particlesReceived << endl;
6279 TERMM(-1,
"ERROR: Inconsistent number of particles");
6291 for(
MInt cellId = 0; cellId < a_noCells(); cellId++) {
6292 a_volumeFraction(cellId) = 0.0;
6298 for(
MInt i = 0; i < a_noParticles(); i++) {
6299 const MInt cellId = m_partList[i].m_cellId;
6301 a_volumeFraction(cellId) +=
6302 4.0 / 3.0 * PI *
POW3(0.5 * m_partList[i].m_diameter) * m_partList[i].m_noParticles / c_cellVolume(cellId);
6308 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
6309 const MInt cellId = m_partListEllipsoid[i].m_cellId;
6311 a_volumeFraction(cellId) += m_partListEllipsoid[i].particleVolume() / c_cellVolume(cellId);
6316 for(
MInt bndryCellId = 0; bndryCellId < m_bndryCells->size(); bndryCellId++) {
6317 const MInt lptCellId = m_bndryCells->a[bndryCellId].m_cellId;
6318 a_volumeFraction(lptCellId) *= c_cellVolume(lptCellId) / m_bndryCells->a[bndryCellId].m_volume;
6342 const MInt noNghbrDomains = grid().noNeighborDomains();
6345 for(
MInt cellId = 0; cellId < a_noCells(); cellId++) {
6346 a_isHalo(cellId) =
false;
6347 a_isWindow(cellId) =
false;
6350 m_pointsToHalo.clear();
6351 m_pointsToHalo.resize(noNghbrDomains);
6353 for(
MInt i = 0; i < noNghbrDomains; i++) {
6354 for(
MInt j = 0; j < noWindowCells(i); j++) {
6355 const MInt windowCell = windowCellId(i, j);
6356 a_isWindow(windowCell) =
true;
6357 if(!c_isLeafCell(windowCell))
continue;
6362 const MInt windowCellOffset = j;
6363 m_pointsToHalo[i].insert(make_pair(windowCell, windowCellOffset));
6367 m_pointsToWindow.clear();
6368 m_pointsToWindow.resize(noNghbrDomains);
6370 for(
MInt i = 0; i < noNghbrDomains; i++) {
6371 for(
MInt j = 0; j < noHaloCells(i); j++) {
6372 const MInt haloCell = haloCellId(i, j);
6373 a_isHalo(haloCell) =
true;
6374 if(!c_isLeafCell(haloCell))
continue;
6375 const MInt HaloCellOffset = j;
6381 m_pointsToWindow[i].insert(make_pair(haloCell, HaloCellOffset));
6395 ASSERT(a_bndryCellId(cellId) < 0,
"ERROR, cell already has a bndryCell!");
6408 const MInt bndryCellId = m_bndryCells->size();
6409 m_bndryCells->append();
6410 a_bndryCellId(cellId) = bndryCellId;
6412 m_bndryCells->a[bndryCellId].m_cellId = cellId;
6413 m_bndryCells->a[bndryCellId].m_volume = bndryData[0];
6414 for(
MInt n = 0; n < nDim; n++) {
6415 m_bndryCells->a[bndryCellId].m_coordinates[n] = c_coordinate(cellId, n) + bndryData[1 + n];
6417 const MInt noSurf = 1;
6418 m_bndryCells->a[bndryCellId].m_noSrfcs = noSurf;
6419 for(
MInt s = 0; s < noSurf; s++) {
6420 for(
MInt n = 0; n < nDim; n++) {
6421 m_bndryCells->a[bndryCellId].m_srfcs[s]->m_normal[n] = surfaceN[noSurf * s + n];
6422 m_bndryCells->a[bndryCellId].m_srfcs[s]->m_velocity[n] = surfaceV[noSurf * s + n];
6423 m_bndryCells->a[bndryCellId].m_srfcs[s]->m_coordinates[n] = surfaceC(noSurf * s + n, 0);
6424 for(
MInt i = 0; i < nDim; i++) {
6425 m_bndryCells->a[bndryCellId].m_srfcs[s]->m_planeCoordinates[i][n] = surfaceC(noSurf * s + n, i + 1);
6434 m_collisionModel->writeCollData();
6447#if defined LPT_DEBUG || !defined NDEBUG
6450 static const MInt noOfRedistLayers = m_couplingRedist ? ((m_noRedistLayer > 0) ? m_noRedistLayer : 2) : 0;
6452 if(!((noOfRedistLayers == 0 && !m_couplingRedist) || m_couplingRedist)) {
6453 mTerm(1, AT_,
"Invalid configuration noOfRedistLayers > 0, but coupling redistribution is not active!");
6456 for(
MInt part = 0; part < a_noParticles(); part++) {
6457 if(m_partList[part].isInvalid()) {
6458 if(!m_partList[part].fullyEvaporated())
continue;
6459 if(m_partList[part].m_noParticles < 1) {
6460 cerr <<
"Fully evap. has neg. parcel count! " << m_partList[part].m_noParticles <<
" " << m_partList[part].m_dM
6463 if(m_partList[part].m_dM > 0.000001) {
6464 cerr <<
"Fully evap. has large mass-transfer " << m_partList[part].m_dM <<
" " << m_partList[part].m_partId
6469 const MInt cellId = m_partList[part].m_cellId;
6470 if(cellId < 0 || cellId > noInternalCells()) {
6471 cerr <<
"Invalid cellId " << cellId <<
" " << m_partList[part].hasCollided() <<
" " << a_isHalo(cellId) <<
" "
6472 << c_isLeafCell(cellId) <<
" " << m_partList[part].isWindow() <<
" " << m_partList[part].reqSend() <<
" "
6473 << m_partList[part].wasSend() <<
" " << m_partList[part].hadWallColl() <<
" " << part <<
" "
6474 << m_partList[part].m_partId << endl;
6475 mTerm(1, AT_,
"Invalid cellId!");
6477 if(m_partList[part].isInvalid()) {
6478 mTerm(1, AT_,
"Invalid particle at TS beginning!");
6480 if(a_isHalo(cellId) || !c_isLeafCell(cellId) || c_noChildren(cellId) > 0) {
6481 mTerm(1, AT_,
"Particle in halo or non-leaf cell!");
6484 const MFloat halfCellLength = c_cellLengthAtLevel(c_level(cellId) + 1);
6485 std::array<MFloat, 3> origCellC = {};
6486 for(
MInt i = 0; i < nDim; i++) {
6487 origCellC[i] = c_coordinate(cellId, i);
6490 if(fabs(origCellC[0] - m_partList[part].m_position[0]) > halfCellLength
6491 || fabs(origCellC[1] - m_partList[part].m_position[1]) > halfCellLength
6492 || fabs(origCellC[2] - m_partList[part].m_position[2]) > halfCellLength) {
6493 cerr <<
"ERROR: particle " << m_partList[part].m_partId <<
" is in on incorrect cell "
6494 << sqrt(
POW2(origCellC[0] - m_partList[part].m_position[0])
6495 +
POW2(origCellC[1] - m_partList[part].m_position[1])
6496 +
POW2(origCellC[2] - m_partList[part].m_position[2]))
6497 <<
" > " << halfCellLength <<
" and particle location: " << m_partList[part].m_position[0] <<
" "
6498 << m_partList[part].m_position[1] <<
" " << m_partList[part].m_position[2] <<
" "
6499 << m_partList[part].hadWallColl() <<
" " << a_level(cellId) <<
" " << origCellC[0] <<
" " << origCellC[1]
6500 <<
" " << origCellC[2] << endl;
6502 m_partList[part].isInvalid() =
true;
6506 if(m_partList[part].m_diameter < m_sizeLimit || isnan(m_partList[part].m_diameter)) {
6507 cerr <<
"Invalid particle diameter : " << m_partList[part].m_diameter <<
" " << m_sizeLimit <<
" "
6508 << m_partList[part].firstStep() <<
" " << m_partList[part].hadWallColl() <<
" "
6509 << m_partList[part].m_breakUpTime <<
" " << m_partList[part].m_partId << endl;
6513 if(m_partList[part].m_noParticles < 0) {
6514 cerr << m_partList[part].m_noParticles << endl;
6515 mTerm(1, AT_,
"Invalid parcel count!");
6518 if(m_partList[part].m_oldCellId < 0 && !m_periodicBC) {
6519 mTerm(1, AT_,
"Invalid old CellId!");
6522 if(m_partList[part].m_oldFluidDensity < MFloatEps) {
6523 cerr << a_fluidDensity(cellId) <<
" " << a_isHalo(cellId) <<
" " << a_level(cellId) <<
" " << part <<
" "
6524 << a_noParticles() <<
" " << m_partList[part].m_oldFluidDensity << endl;
6525 mTerm(1, AT_,
"Invalid old fluid density!");
6528 for(
MInt i = 0; i < nDim; i++) {
6529 if(isnan(m_partList[part].m_oldFluidVel[i])) {
6530 mTerm(1, AT_,
"Invalid old fluid velocity!");
6534 if(m_partList[part].m_temperature < 0) {
6535 mTerm(1, AT_,
"Invalid particle temperature!");
6538 if(m_partList[part].m_shedDiam < 0) {
6539 cerr <<
"Negative shed-diameter " << m_partList[part].m_partId << endl;
6541 if(m_partList[part].m_noParticles < 1) {
6542 cerr <<
"Negative parcel-count " << m_partList[part].m_partId << endl;
6544 if(m_partList[part].m_dM > 0.000001) {
6545 cerr <<
"Large mass-transfer " << m_partList[part].m_dM <<
" " << m_partList[part].m_partId << endl;
6550 for(
MInt part = 0; part < a_noEllipsoidalParticles(); part++) {
6551 const MInt cellId = m_partListEllipsoid[part].m_cellId;
6553 if(cellId < 0 || cellId > noInternalCells()) {
6554 cerr <<
"Invalid cellId " << cellId <<
" " << m_partListEllipsoid[part].hasCollided() <<
" " << a_isHalo(cellId)
6555 <<
" " << c_isLeafCell(cellId) <<
" " << m_partListEllipsoid[part].isWindow() <<
" "
6556 << m_partListEllipsoid[part].reqSend() <<
" " << m_partListEllipsoid[part].wasSend() <<
" "
6557 << m_partListEllipsoid[part].hadWallColl() <<
" " << part <<
" " << m_partListEllipsoid[part].m_partId
6559 mTerm(1, AT_,
"Invalid cellId!");
6561 if(m_partListEllipsoid[part].isInvalid()) {
6562 mTerm(1, AT_,
"Invalid ellipsoid at TS beginning!");
6566 if(a_isHalo(cellId) || !c_isLeafCell(cellId) || c_noChildren(cellId) > 0) {
6567 mTerm(1, AT_,
"Ellipsoid in halo or non-leaf cell!");
6571 const MFloat halfCellLength = c_cellLengthAtLevel(c_level(cellId) + 1);
6573 std::array<MFloat, 3> origCellC = {};
6574 for(
MInt i = 0; i < nDim; i++) {
6575 origCellC[i] = c_coordinate(cellId, i);
6579 if(fabs(origCellC[0] - m_partListEllipsoid[part].m_position[0]) > halfCellLength
6580 || fabs(origCellC[1] - m_partListEllipsoid[part].m_position[1]) > halfCellLength
6581 || fabs(origCellC[2] - m_partListEllipsoid[part].m_position[2]) > halfCellLength) {
6582 cerr <<
"ERROR: ellipsoid " << m_partListEllipsoid[part].m_partId <<
" is in on incorrect cell "
6583 << sqrt(
POW2(origCellC[0] - m_partListEllipsoid[part].m_position[0])
6584 +
POW2(origCellC[1] - m_partListEllipsoid[part].m_position[1])
6585 +
POW2(origCellC[2] - m_partListEllipsoid[part].m_position[2]))
6586 <<
" > " << halfCellLength <<
" and ellipsoid location: " << m_partListEllipsoid[part].m_position[0] <<
" "
6587 << m_partListEllipsoid[part].m_position[1] <<
" " << m_partListEllipsoid[part].m_position[2] <<
" "
6588 << m_partListEllipsoid[part].hadWallColl() <<
" " << a_level(cellId) <<
" " << origCellC[0] <<
" "
6589 << origCellC[1] <<
" " << origCellC[2] << endl;
6591 m_partListEllipsoid[part].isInvalid() =
true;
6595 if(m_partListEllipsoid[part].equivalentDiameter() < m_sizeLimit
6596 || isnan(m_partListEllipsoid[part].equivalentDiameter())) {
6597 cerr <<
"Invalid ellipsoid diameter : " << m_partListEllipsoid[part].equivalentDiameter() <<
" " << m_sizeLimit
6598 <<
" " << m_partListEllipsoid[part].firstStep() <<
" " << m_partListEllipsoid[part].hadWallColl() <<
" "
6599 << m_partListEllipsoid[part].m_partId << endl;
6603 if(m_partListEllipsoid[part].m_oldCellId < 0 && !m_periodicBC) {
6604 mTerm(1, AT_,
"Invalid old CellId!");
6607 if(m_partListEllipsoid[part].m_oldFluidDensity < 0) {
6608 cerr << a_fluidDensity(cellId) <<
" " << a_isHalo(cellId) <<
" " << a_level(cellId) <<
"" << part <<
" "
6609 << a_noEllipsoidalParticles() << endl;
6610 mTerm(1, AT_,
"Invalid old fluid density!");
6613 for(
MInt i = 0; i < nDim; i++) {
6614 if(isnan(m_partListEllipsoid[part].m_oldFluidVel[i])) {
6615 mTerm(1, AT_,
"Invalid old fluid velocity!");
6619 if(m_partListEllipsoid[part].m_temperature < 0) {
6620 mTerm(1, AT_,
"Invalid particle temperature!");
6636 for(
MInt cellId = 0; cellId < a_noCells(); cellId++) {
6637 if(!c_isLeafCell(cellId))
continue;
6638 if(a_fluidDensity(cellId) < MFloatEps) {
6639 mTerm(1, AT_,
"Invalid density in cell!");
6642 if(isnan(a_massFlux(cellId))) {
6643 mTerm(1, AT_,
"Invalid mass-flux in cell!");
6662 receiveVelocitySlopes();
6667 MInt vectorSize = 0;
6668 if(m_activePrimaryBUp && grid().hasInactiveRanks()) {
6669 if(!m_injData.empty()) {
6670 vectorSize = m_injData.size();
6672 MPI_Allreduce(MPI_IN_PLACE, &vectorSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD, AT_,
"INPLACE",
"vectorSize");
6674 if(vectorSize > 0 && !isActive()) {
6676 const MInt count = m_sprayModel->m_injDataSize + m_addInjDataCnt;
6677 MInt dummyTimeStep = -1;
6678 for(
MInt i = 0; i < vectorSize; i++) {
6679 vector<MFloat> tmp(count);
6680 for(
MInt j = 0; j < count; j++) {
6683 m_injData.insert(make_pair(dummyTimeStep, tmp));
6686 }
else if(isActive() && domainId() == m_spawnDomainId) {
6687 if(vectorSize != (
signed)m_injData.size()) {
6688 cerr <<
"Injector size differs! " << vectorSize <<
" " << m_injData.size() << endl;
6697 cerr0 <<
"LPT-time step is : " << m_timeStep << endl;
6699 removeInvalidParticles(
true);
6703 countParticlesInCells();
6706 if(m_wallCollisions) {
6707 m_bndryCells->resetSize(0);
6708 m_noOuterBndryCells = 0;
6721 MPI_Allreduce(MPI_IN_PLACE, &m_particleResiduum, 1, MPI_DOUBLE, MPI_MAX, mpiComm(), AT_,
"INPLACE",
6722 "m_particleResiduum");
6724 MPI_Allreduce(MPI_IN_PLACE, &m_PRNGSpawnCount, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"m_PRNGSpawnCount");
6726 MPI_Allreduce(MPI_IN_PLACE, &m_spawnDomainId, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"m_spawnDomainId");
6728 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
6729 MInt count = m_sprayModel->m_PRNGPrimBreakUpCount;
6730 MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"m_PRNGPrimBreakUpCount");
6731 m_sprayModel->m_PRNGPrimBreakUpCount = count;
6734 if(m_spawnCellId > -1) {
6735 timeSinceSOI = m_sprayModel->timeSinceSOI();
6737 MPI_Allreduce(MPI_IN_PLACE, &timeSinceSOI, 1, MPI_DOUBLE, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"timeSinceSOI");
6738 m_sprayModel->timeSinceSOI() = timeSinceSOI;
6742 if(m_activePrimaryBUp) {
6743 MInt vectorSizeBU = vectorSize;
6744 vectorSize = m_injData.size();
6745 const MInt count = m_sprayModel->m_injDataSize + m_addInjDataCnt;
6747 MPI_Allreduce(MPI_IN_PLACE, &vectorSize, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"vectorSize");
6748 const MInt totalSize = vectorSize * (count + 1);
6750 if(vectorSize > 0) {
6753 if(vectorSize != vectorSizeBU && grid().hasInactiveRanks()) {
6754 cerr << domainId() <<
"Vector size differs " << vectorSizeBU <<
" " << vectorSize << endl;
6757 if(domainId() == m_spawnDomainId) {
6758 ASSERT((
signed)m_injData.size() == vectorSize,
"");
6760 for(
auto it = m_injData.begin(); it != m_injData.end(); it++) {
6762 injData(i++) = timeStep;
6763 for(
MInt j = 0; j < count; j++) {
6764 const MFloat data = (it->second)[j];
6765 injData(i++) = data;
6770 for(
auto it = m_injData.begin(); it != m_injData.end(); it++) {
6771 i = i + m_sprayModel->m_injDataSize + 1;
6772 const MFloat sumEvap = (it->second)[m_sprayModel->m_injDataSize];
6773 const MInt numRT = (it->second)[m_sprayModel->m_injDataSize + 1];
6774 const MInt numKH = (it->second)[m_sprayModel->m_injDataSize + 2];
6775 const MInt numSend = (it->second)[m_sprayModel->m_injDataSize + 3];
6776 injData(i++) = sumEvap;
6777 injData(i++) = numRT;
6778 injData(i++) = numKH;
6779 injData(i++) = numSend;
6783 MPI_Allreduce(MPI_IN_PLACE, &injData[0], totalSize, MPI_DOUBLE, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"injData");
6788 for(
MInt i = 0; i < totalSize; i = i + (count + 1)) {
6789 const MInt timeStep = (
MInt)injData(i);
6790 vector<MFloat> tmp(count);
6791 for(
MInt j = 0; j < count; j++) {
6792 tmp[j] = injData[i + j + 1];
6794 m_injData.insert(make_pair(timeStep, tmp));
6800 MInt noParticles = a_noParticles();
6802 MPI_Allreduce(MPI_IN_PLACE, &noParticles, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noParticles");
6804 if(domainId() == 0) {
6805 cerr << noParticles <<
" #particles before balancing!" << endl;
6809 MInt noEllipsoids = a_noEllipsoidalParticles();
6810 MPI_Allreduce(MPI_IN_PLACE, &noEllipsoids, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noEllipsoids");
6811 cerr0 << noEllipsoids <<
" #ellipsoids before balancing!" << endl;
6823 m_queueToSend.clear();
6824 m_sendBuffer.clear();
6825 m_recvBuffer.clear();
6826 m_intSendBuffer.clear();
6827 m_intRecvBuffer.clear();
6828 m_mpi_reqSendFloat.clear();
6829 m_mpi_reqSendInt.clear();
6830 m_mpi_reqSendSize.clear();
6831 m_mpi_statusProbe.clear();
6834 m_partListEllipsoid.clear();
6836 if(m_wallCollisions) {
6837 m_bndryCells->resetSize(0);
6840 m_cellToNghbrHood.clear();
6841 m_cellToNghbrHoodInterpolation.clear();
6843 m_pointsToHalo.clear();
6844 m_pointsToWindow.clear();
6846 m_cellToPartMap.clear();
6847 m_cellToEllipsMap.clear();
6857 countParticlesInCells();
6869 m_loadBalancingReinitStage = 0;
6874 if(!grid().isActive()) {
6876 updateDomainInfo(-1, -1, MPI_COMM_NULL, AT_);
6879 updateDomainInfo(grid().domainId(), grid().noDomains(), grid().mpiComm(), AT_);
6886 ASSERT(m_cells.size() == 0,
"");
6887 ASSERT(a_noParticles() == 0,
"");
6888 ASSERT(a_noEllipsoidalParticles() == 0,
"");
6891 if(!grid().isActive()) {
6892 m_cells.reset(grid().maxNoCells());
6893 this->setHaloCellsOnInactiveRanks();
6900 for(
MInt cellId = 0; cellId < grid().noInternalCells(); cellId++) {
6901 if(grid().domainOffset(domainId()) + (
MLong)cellId != c_globalId(cellId)) {
6902 TERMM(1,
"Global id mismatch.");
6906 updateExchangeCells();
6907 grid().updateLeafCellExchange();
6912 this->checkNoHaloLayers();
6923 m_loadBalancingReinitStage = 1;
6925 ASSERT(m_cells.size() == c_noCells() && c_noCells() == a_noCells(),
"");
6940 const MInt cellId = grid().tree().grid2solver(gridCellId);
6941 if(cellId < 0 || cellId >= noInternalCells()) {
6946 if(dataId == 0)
return 2;
6948 if(a_noParticlesInCell(cellId) == 0 && a_noEllipsoidsInCell(cellId) == 0)
return 0;
6954 dataSize = (elemPerP<LPTSpherical<nDim>>() + 3) * a_noParticlesInCell(cellId)
6955 + (elemPerP<LPTEllipsoidal<nDim>>() + 2) * a_noEllipsoidsInCell(cellId);
6958 dataSize = 3 * a_noParticlesInCell(cellId) + 2 * a_noEllipsoidsInCell(cellId);
6961 TERMM(1,
"Unknown data id.");
6977 if(dataId != 1)
mTerm(1, AT_,
"Only Type 1 should be FLOAT");
6979 MInt localBufferId = 0;
6980 for(
MInt i = 0; i < oldNoCells; i++) {
6981 const MInt gridCellId = bufferIdToCellId[i];
6983 if(gridCellId < 0)
continue;
6985 const MInt cellId = grid().tree().grid2solver(gridCellId);
6986 if(cellId < 0 || cellId >= noInternalCells()) {
6990 if(a_noParticlesInCell(cellId) > 0) {
6991 auto particlesInCell = m_cellToPartMap.equal_range(cellId);
6993 vector<LPTSpherical<nDim>*> particlesToSend;
6994 vector<MInt> cellIds;
6995 for(
auto it = particlesInCell.first; it != particlesInCell.second; it++) {
6996 auto& particle = it->second;
6997 particlesToSend.push_back(particle);
6999 if((
MInt)particlesToSend.size() != a_noParticlesInCell(cellId)) {
7000 cerr << particlesToSend.size() <<
" " << a_noParticlesInCell(cellId) <<
" miss-count!" << endl;
7001 mTerm(1, AT_,
"Particle count not matching!");
7003 packParticles(particlesToSend,
nullptr, &data[localBufferId], cellIds);
7004 localBufferId += ((elemPerP<LPTSpherical<nDim>>() + 3) * particlesToSend.size());
7007 if(a_noEllipsoidsInCell(cellId) > 0) {
7008 auto ellipsoidsInCell = m_cellToEllipsMap.equal_range(cellId);
7009 vector<LPTEllipsoidal<nDim>*> ellipsoidsToSend;
7010 vector<MInt> cellIds;
7011 for(
auto it = ellipsoidsInCell.first; it != ellipsoidsInCell.second; it++) {
7012 auto& particle = it->second;
7013 ellipsoidsToSend.push_back(particle);
7015 if((
MInt)ellipsoidsToSend.size() != a_noEllipsoidsInCell(cellId)) {
7016 cerr << ellipsoidsToSend.size() <<
" " << a_noEllipsoidsInCell(cellId) <<
" miss-count!" << endl;
7017 mTerm(1, AT_,
"Ellipsoids count not matching!");
7019 packParticles(ellipsoidsToSend,
nullptr, &data[localBufferId], cellIds);
7020 localBufferId += ((elemPerP<LPTEllipsoidal<nDim>>() + 2) * ellipsoidsToSend.size());
7030 if(dataId != 2 && dataId != 0)
mTerm(1, AT_,
"Type 0 or 2 should be INT");
7032 MInt localBufferId = 0;
7033 for(
MInt i = 0; i < oldNoCells; i++) {
7034 const MInt gridCellId = bufferIdToCellId[i];
7036 if(gridCellId < 0)
continue;
7038 const MInt cellId = grid().tree().grid2solver(gridCellId);
7039 if(cellId < 0 || cellId >= noInternalCells()) {
7044 data[localBufferId++] = a_noParticlesInCell(cellId);
7045 data[localBufferId++] = a_noEllipsoidsInCell(cellId);
7049 if(a_noParticlesInCell(cellId) > 0) {
7050 auto particlesInCell = m_cellToPartMap.equal_range(cellId);
7052 vector<LPTSpherical<nDim>*> particlesToSend;
7053 for(
auto it = particlesInCell.first; it != particlesInCell.second; it++) {
7054 auto& particle = it->second;
7055 data[localBufferId] = (
MInt)(particle->m_partId >> 32);
7056 data[localBufferId + 1] =
MInt(particle->m_partId);
7057 data[localBufferId + 2] = particle->m_noParticles;
7059 if(data[localBufferId + 2] < 0) {
7060 cerr <<
"Setting strange exchange buffer!" << endl;
7066 if(a_noEllipsoidsInCell(cellId) > 0) {
7067 auto ellipsoidsInCell = m_cellToEllipsMap.equal_range(cellId);
7068 vector<LPTEllipsoidal<nDim>*> ellipsoidsToSend;
7069 for(
auto it = ellipsoidsInCell.first; it != ellipsoidsInCell.second; it++) {
7070 auto& particle = it->second;
7071 data[localBufferId] = (
MInt)(particle->m_partId >> 32);
7072 data[localBufferId + 1] =
MInt(particle->m_partId);
7092 if(dataId != 1)
mTerm(1, AT_,
"Only Type 1 should be FLOAT");
7095 if(m_loadBalancingReinitStage == 0) {
7097 MInt noParticlesOnRank = 0;
7098 MInt noEllipsoidsOnRank = 0;
7099 for(
MInt cellId = 0; cellId < noInternalCells(); cellId++) {
7100 noParticlesOnRank += a_noParticlesInCell(cellId);
7101 noEllipsoidsOnRank += a_noEllipsoidsInCell(cellId);
7104 unpackParticles<LPTSpherical<nDim>,
true>(noParticlesOnRank,
nullptr, &data[0]);
7105 if(a_noParticles() != noParticlesOnRank) {
7106 cerr << domainId() <<
" has incorrect count " << a_noSphericalParticles() <<
" " << noParticlesOnRank << endl;
7107 mTerm(1, AT_,
"Particle gone missing!");
7111 unpackParticles<LPTEllipsoidal<nDim>,
true>(noEllipsoidsOnRank,
nullptr,
7112 &data[(elemPerP<LPTSpherical<nDim>>() + 3) * noParticlesOnRank]);
7113 if(a_noEllipsoidalParticles() != noEllipsoidsOnRank) {
7114 cerr << domainId() <<
" has incorrect count " << a_noParticles<LPTEllipsoidal<nDim>>() <<
" "
7115 << noEllipsoidsOnRank << endl;
7116 mTerm(1, AT_,
"Ellipsoid gone missing!");
7131 if(dataId != 2 && dataId != 0)
mTerm(1, AT_,
"Type 0 or 2 should be INT");
7133 if(m_loadBalancingReinitStage == 0) {
7136 for(
MInt cellId = 0; cellId < noInternalCells(); cellId++) {
7137 a_noParticlesInCell(cellId) = data[counter++];
7138 a_noEllipsoidsInCell(cellId) = data[counter++];
7140 }
else if(dataId == 2) {
7170 globalIntVars.push_back(m_PRNGSpawnCount);
7172 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
7173 globalIntVars.push_back(m_sprayModel->m_PRNGPrimBreakUpCount);
7176 globalFloatVars.push_back(m_particleResiduum);
7177 globalFloatVars.push_back(m_time);
7178 globalFloatVars.push_back(m_timeStep);
7180 if(m_activePrimaryBUp) {
7181 globalFloatVars.push_back(m_sprayModel->timeSinceSOI());
7184 if(m_activePrimaryBUp && !m_injData.empty()) {
7185 for(
auto it = m_injData.begin(); it != m_injData.end(); it++) {
7187 globalFloatVars.push_back(timeStep);
7188 for(
MInt i = 0; i < m_sprayModel->m_injDataSize + m_addInjDataCnt; i++) {
7189 const MFloat data = (it->second)[i];
7190 globalFloatVars.push_back(data);
7193 globalIntVars.push_back(m_injData.size());
7194 }
else if(m_activePrimaryBUp) {
7195 globalIntVars.push_back(0);
7207 m_PRNGSpawnCount = globalIntVars[0];
7209 m_particleResiduum = globalFloatVars[0];
7210 m_time = globalFloatVars[1];
7211 m_timeStep = globalFloatVars[2];
7213 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
7214 m_sprayModel->m_PRNGPrimBreakUpCount = globalIntVars[1];
7216 if(m_activePrimaryBUp) {
7217 m_sprayModel->timeSinceSOI() = globalFloatVars[3];
7221 if(m_activePrimaryBUp) {
7222 const MInt vectorSize = globalIntVars[2];
7225 const MInt count = m_sprayModel->m_injDataSize + m_addInjDataCnt;
7226 for(
MInt i = 0; i < vectorSize * (count + 1); i = i + 1 + count) {
7227 const MInt timeStep = (
MInt)globalFloatVars[i + m_addInjDataCnt];
7228 vector<MFloat> tmp(count);
7229 for(
MInt j = 0; j < count; j++) {
7230 tmp[j] = globalFloatVars[i + j + 1 + m_addInjDataCnt];
7232 m_injData.insert(make_pair(timeStep, tmp));
7246 if(!isActive())
return;
7248 grid().findCartesianNghbIds();
7253 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
7254 m_sprayModel->m_PRNGPrimBreakUp.seed(m_sprayModel->m_spraySeed);
7257 if(domainId() != m_spawnDomainId) {
7258 m_particleResiduum = 0;
7259 if(m_activePrimaryBUp || m_activeSecondaryBUp) {
7260 m_sprayModel->m_PRNGPrimBreakUpCount = 0;
7263 m_sprayModel->m_PRNGPrimBreakUp.discard(m_sprayModel->m_PRNGPrimBreakUpCount);
7266 m_cellToNghbrHood.clear();
7267 m_cellToNghbrHoodInterpolation.clear();
7274 updateFluidFraction();
7279 if(m_spawnCellId == -1 && m_sprayModel !=
nullptr) {
7280 const MInt vectorSize = m_injData.size();
7282 const MInt count = m_sprayModel->m_injDataSize + m_addInjDataCnt;
7283 vector<MFloat> tmp(count);
7284 for(
MInt i = 0; i < count; i++) {
7287 for(
MInt i = 0; i < vectorSize; i++) {
7289 m_injData.insert(make_pair(stepId, tmp));
7294 MInt noParticles = a_noParticles();
7296 MPI_Allreduce(MPI_IN_PLACE, &noParticles, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noParticles");
7298 if(domainId() == 0) {
7299 cerr << noParticles <<
" #particles after balancing!" << endl;
7303 MInt noEllipsoids = a_noEllipsoidalParticles();
7304 MPI_Allreduce(MPI_IN_PLACE, &noEllipsoids, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noEllipsoids");
7305 cerr0 << noEllipsoids <<
" #ellipsoids after balancing!" << endl;
7319 if(m_spawnCellId > -1) {
7320 a_noParticlesInCell(m_spawnCellId) += 1;
7321 if(m_ellipsoids) a_noEllipsoidsInCell(m_spawnCellId) += 1;
7325 for(
MInt i = 0; i < a_noParticles(); i++) {
7326 m_partList[i].resetWeights();
7328 for(
MInt i = 0; i < a_noEllipsoidalParticles(); i++) {
7329 m_partListEllipsoid[i].resetWeights();
7332 resetSourceTerms(0);
7337 if(m_nonBlockingComm) {
7340 receiveSourceTerms();
7342 exchangeSourceTerms();
7348 MInt noCells = a_noCells();
7349 MInt minCells = noCells;
7350 MInt maxCells = noCells;
7351 MInt noPart = a_noParticles();
7352 MInt minParts = noPart;
7353 MInt maxParts = noPart;
7355 MPI_Allreduce(MPI_IN_PLACE, &noCells, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noCells");
7356 MPI_Allreduce(MPI_IN_PLACE, &noPart, 1, MPI_INT, MPI_SUM, mpiComm(), AT_,
"INPLACE",
"noPart");
7357 MPI_Allreduce(MPI_IN_PLACE, &maxCells, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"maxCells");
7358 MPI_Allreduce(MPI_IN_PLACE, &minCells, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"minCells");
7359 MPI_Allreduce(MPI_IN_PLACE, &maxParts, 1, MPI_INT, MPI_MAX, mpiComm(), AT_,
"INPLACE",
"maxParts");
7360 MPI_Allreduce(MPI_IN_PLACE, &minParts, 1, MPI_INT, MPI_MIN, mpiComm(), AT_,
"INPLACE",
"minParts");
7362 if(domainId() == 0) {
7363 cerr <<
"LPT cell-statistics : " << minCells <<
" - " << maxCells <<
" avg " << noCells / noDomains() << endl;
7364 cerr <<
"LPT particle-statistics : " << minParts <<
" - " << maxParts <<
" avg " << noPart / noDomains() << endl;
7376 if(!this->m_adaptation || this->m_noSensors == 0)
return;
7383 vector<MInt> lastLayer;
7387 for(
MInt cellId = 0; cellId < a_noCells(); cellId++) {
7388 a_regridTrigger(cellId) =
false;
7389 if(a_noParticlesInCell(cellId) > 0 || a_noEllipsoidsInCell(cellId) > 0 || cellId == m_spawnCellId) {
7390 inShadowLayer(cellId) =
true;
7391 lastLayer.emplace_back(cellId);
7396 MInt layerCount = 0;
7397 while(layerCount < m_bandWidth[maxRefinementLevel() - 1]) {
7400 for(
MInt c = 0; c < (signed)lastLayer.size(); c++) {
7401 const MInt cellId = lastLayer[c];
7402 for(
MInt d = 0; d < m_noDirs; d++) {
7403 if(c_hasNeighbor(cellId, d) == 0) {
7405 if(c_parentId(cellId) > -1 && c_hasNeighbor(c_parentId(cellId), d) && !a_isHalo(cellId)) {
7406 if(a_isValidCell(cellId)) {
7407 m_forceAdaptation =
true;
7412 const MInt nghbrId = c_neighborId(cellId, d);
7413 if(!inShadowLayer[nghbrId]) {
7414 tmp.emplace_back(nghbrId);
7415 if(layerCount > m_bandWidth[maxRefinementLevel() - 1] - m_innerBound) {
7416 a_regridTrigger(nghbrId) =
true;
7419 inShadowLayer[nghbrId] =
true;
7424 for(
MInt i = 0; i < grid().noNeighborDomains(); i++) {
7425 for(
MInt j = 0; j < noWindowCells(i); j++) {
7426 tmp_data(windowCellId(i, j)) = inShadowLayer(windowCellId(i, j));
7430 this->exchangeData(&tmp_data(0), 1);
7432 for(
MInt i = 0; i < grid().noNeighborDomains(); i++) {
7433 for(
MInt j = 0; j < noHaloCells(i); j++) {
7434 const MInt haloCell = haloCellId(i, j);
7435 if(!inShadowLayer[haloCell]) {
7436 inShadowLayer[haloCell] = tmp_data(haloCell);
7437 if(inShadowLayer[haloCell]) {
7438 tmp.emplace_back(haloCell);
7439 if(layerCount > m_bandWidth[maxRefinementLevel() - 1] - m_innerBound) {
7440 a_regridTrigger(haloCell) =
true;
7450 for(
MInt c = 0; c < (signed)tmp.size(); c++) {
7451 lastLayer.emplace_back(tmp[c]);
7457 MPI_Allreduce(MPI_IN_PLACE, &m_forceAdaptation, 1, MPI_C_BOOL, MPI_LOR, mpiComm(), AT_,
"MPI_IN_PLACE",
"status");
7459 if(domainId() == 0 && m_forceAdaptation) {
7460 cerr <<
"LPT-Solver is forcing a mesh-adaptation at time step " <<
globalTimeStep << endl;
7471 if(!this->m_adaptation || this->m_noSensors < 1)
return;
7473 for(
MInt id = 0;
id < a_noParticles();
id++) {
7474 if(m_partList[
id].isInvalid())
continue;
7475 const MInt cellId = m_partList[
id].m_cellId;
7476 if(a_isHalo(cellId))
continue;
7477 if(a_regridTrigger(cellId)) {
7478 m_forceAdaptation =
true;
7482 for(
MInt id = 0;
id < a_noEllipsoidalParticles();
id++) {
7483 if(m_partListEllipsoid[
id].isInvalid())
continue;
7484 const MInt cellId = m_partListEllipsoid[
id].m_cellId;
7485 if(a_isHalo(cellId))
continue;
7486 if(a_regridTrigger(cellId)) {
7487 m_forceAdaptation =
true;
7492 if(!m_nonBlockingComm) {
7494 MPI_Allreduce(MPI_IN_PLACE, &m_forceAdaptation, 1, MPI_C_BOOL, MPI_LOR, mpiComm(), AT_,
"MPI_IN_PLACE",
"status");
7496 if(domainId() == 0 && m_forceAdaptation) {
7497 cerr <<
"LPT-Solver is forcing a mesh-adaptation at time step " <<
globalTimeStep << endl;
7500 MPI_Iallreduce(MPI_IN_PLACE, &m_forceAdaptation, 1, MPI_C_BOOL, MPI_LOR, mpiComm(),
7501 &m_checkAdaptationRecvRequest[0], AT_,
"MPI_IN_PLACE",
"status");
7502 m_openRegridSend =
true;
7508 if(!m_nonBlockingComm) {
7516 if(m_openRegridSend) {
7517 RECORD_TIMER_START(m_timers[Timers::Exchange]);
7518 RECORD_TIMER_START(m_timers[Timers::Exchange2]);
7519 MPI_Wait(&m_checkAdaptationRecvRequest[0], MPI_STATUS_IGNORE, AT_);
7520 m_openRegridSend =
false;
7521 RECORD_TIMER_STOP(m_timers[Timers::Exchange2]);
7522 RECORD_TIMER_STOP(m_timers[Timers::Exchange]);
7536 m_timeStepUpdated =
true;
7538 MFloat timeStep = std::numeric_limits<MFloat>::max();
7540 for(
MInt id = 0;
id < a_noParticles();
id++) {
7541 if(m_partList[
id].isInvalid())
continue;
7542 const MFloat dx = c_cellLengthAtCell(m_partList[
id].m_cellId) / m_partList[
id].s_lengthFactor;
7543 for(
MInt d = 0; d < nDim; ++d) {
7544 const MFloat u_d = fabs(m_partList[
id].m_velocity[d]);
7546 if(abs(m_cfl + 1.0) < MFloatEps && abs(m_Ma + 1.0) > MFloatEps) {
7547 dt_dir = m_Ma / SQRT3 * dx / u_d;
7548 }
else if(abs(m_cfl + 1.0) > MFloatEps)
7549 dt_dir = m_cfl * dx / u_d;
7550 timeStep =
mMin(timeStep, dt_dir);
7555 for(
MInt id = 0;
id < a_noEllipsoidalParticles();
id++) {
7556 if(m_partListEllipsoid[
id].isInvalid())
continue;
7557 const MFloat dx = c_cellLengthAtCell(m_partListEllipsoid[
id].m_cellId) / m_partListEllipsoid[
id].s_lengthFactor;
7558 for(
MInt d = 0; d < nDim; ++d) {
7559 const MFloat u_d = fabs(m_partListEllipsoid[
id].m_velocity[d]);
7561 if(abs(m_cfl + 1.0) < MFloatEps && abs(m_Ma + 1.0) > MFloatEps) {
7562 dt_dir = m_Ma / SQRT3 * dx / u_d;
7563 }
else if(abs(m_cfl + 1.0) > MFloatEps)
7564 dt_dir = m_cfl * dx / u_d;
7565 timeStep =
mMin(timeStep, dt_dir);
7570 if(m_activePrimaryBUp && m_spawnCellId > -1) {
7571 const MFloat dx = c_cellLengthAtCell(maxRefinementLevel()) / m_partList[0].s_lengthFactor;
7572 const MFloat u_d = fabs(m_sprayModel->injectionSpeed());
7573 MFloat dt_dir = m_cfl * dx / u_d;
7574 timeStep =
mMin(timeStep, dt_dir);
7578 if(m_spawnParticles && m_spawnCellId > -1) {
7579 const MFloat dx = c_cellLengthAtCell(m_spawnCellId) / m_partList[0].s_lengthFactor;
7580 const MFloat u_d = fabs(m_spawnVelocity);
7581 MFloat dt_dir = m_cfl * dx / u_d;
7582 timeStep =
mMin(timeStep, dt_dir);
7587 MBool useFixed =
false;
7588 if(a_noParticles() > 0) useFixed =
true;
7589 if(a_noEllipsoidalParticles() > 0) useFixed =
true;
7590 if(m_spawnCellId > -1 && m_sprayModel !=
nullptr && m_sprayModel->soonInjection(m_time + timeStep * 100)) {
7596 timeStep = Context::getSolverProperty<MFloat>(
"fixedTimeStep", m_solverId, AT_);
7597 if(domainId() == m_spawnDomainId) {
7598 cerr <<
"Applying LPT fixed timeStep of " << timeStep << endl;
7602 MPI_Allreduce(MPI_IN_PLACE, &timeStep, 1, MPI_DOUBLE, MPI_MIN, mpiComm(), AT_,
"MPI_IN_PLACE",
"timeStep");
7604 if(domainId() == 0) {
7605 cerr <<
"LPT-timestep could be " << timeStep <<
" is " << m_timeStep << endl;
7621 if(m_nonBlockingComm)
return;
7622 if(noDomains() > 1) {
7623 if(m_massCoupling) {
7627 if(m_momentumCoupling) {
7629 &a_momentumFlux(0, 0), nDim);
7633 if(m_heatCoupling) {
7640 resetSourceTerms(noInternalCells());
7649 if(!m_nonBlockingComm) {
7650 mTerm(1, AT_,
"Calling non-Blocking exchange for blocking setup!");
7654 for(
MInt n = 0; n < grid().noLeafSendNeighborDomains(); n++) {
7655 const MInt d = grid().leafSendNeighborDomain(n);
7656 MPI_Irecv(&m_sourceRecv[d][0], m_noSourceTerms * grid().noLeafWindowCells(d), MPI_DOUBLE, grid().neighborDomain(d),
7657 mpiTag(
"SOURCE_TERMS"), mpiComm(), &m_sourceRecvRequest[n], AT_,
"m_sourceRecv");
7661 if(m_openSourceSend) {
7662 MPI_Waitall(grid().noLeafRecvNeighborDomains(), &m_sourceSendRequest[0], MPI_STATUSES_IGNORE, AT_);
7663 m_openSourceSend =
false;
7667 for(
MInt n = 0; n < grid().noLeafRecvNeighborDomains(); n++) {
7668 const MInt d = grid().leafRecvNeighborDomain(n);
7670 for(
MInt j = 0; j < grid().noLeafHaloCells(d); j++) {
7671 const MInt cellId = grid().leafHaloCell(d, j);
7672 if(m_massCoupling) {
7673 m_sourceSend[d][buffId++] = a_massFlux(cellId);
7675 if(m_momentumCoupling) {
7676 for(
MInt i = 0; i < nDim; i++) {
7677 m_sourceSend[d][buffId++] = a_momentumFlux(cellId, i);
7679 m_sourceSend[d][buffId++] = a_workFlux(cellId);
7681 if(m_heatCoupling) {
7682 m_sourceSend[d][buffId++] = a_heatFlux(cellId);
7685 MPI_Isend(&m_sourceSend[d][0], m_noSourceTerms * grid().noLeafHaloCells(d), MPI_DOUBLE, grid().neighborDomain(d),
7686 mpiTag(
"SOURCE_TERMS"), mpiComm(), &m_sourceSendRequest[n], AT_,
"m_sourceSend");
7688 m_openSourceSend =
true;
7691 resetSourceTerms(noInternalCells());
7700 if(!m_nonBlockingComm)
return;
7702 RECORD_TIMER_START(m_timers[Timers::Exchange]);
7705 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_sourceRecvRequest[0], MPI_STATUSES_IGNORE, AT_);
7708 RECORD_TIMER_START(m_timers[Timers::Exchange5]);
7710 for(
MInt n = 0; n < grid().noLeafSendNeighborDomains(); n++) {
7711 const MInt d = grid().leafSendNeighborDomain(n);
7713 for(
MInt j = 0; j < grid().noLeafWindowCells(d); j++) {
7714 const MInt cellId = grid().leafWindowCell(d, j);
7715 if(m_massCoupling) {
7716 a_massFlux(cellId) += m_sourceRecv[d][buffId++];
7718 if(m_momentumCoupling) {
7719 for(
MInt i = 0; i < nDim; i++) {
7720 a_momentumFlux(cellId, i) += m_sourceRecv[d][buffId++];
7722 a_workFlux(cellId) += m_sourceRecv[d][buffId++];
7724 if(m_heatCoupling) {
7725 a_heatFlux(cellId) += m_sourceRecv[d][buffId++];
7730 RECORD_TIMER_STOP(m_timers[Timers::Exchange5]);
7731 RECORD_TIMER_STOP(m_timers[Timers::Exchange]);
7740 if(!m_nonBlockingComm) {
7743 m_receiveFlowField =
true;
7746 for(
MInt n = 0; n < grid().noLeafRecvNeighborDomains(); n++) {
7747 const MInt d = grid().leafRecvNeighborDomain(n);
7748 MPI_Irecv(&m_flowRecv[d][0], (PV.noVars() + m_evaporation) * grid().noLeafHaloCells(d), MPI_DOUBLE,
7749 grid().neighborDomain(d), mpiTag(
"FLOW_FIELD"), mpiComm(), &m_flowRecvRequest[n], AT_,
"m_flowRecv");
7753 if(m_openFlowSend) {
7754 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_flowSendRequest[0], MPI_STATUSES_IGNORE, AT_);
7755 m_openFlowSend =
false;
7760 for(
MInt n = 0; n < grid().noLeafSendNeighborDomains(); n++) {
7761 const MInt d = grid().leafSendNeighborDomain(n);
7763 for(
MInt j = 0; j < grid().noLeafWindowCells(d); j++) {
7764 const MInt cellId = grid().leafWindowCell(d, j);
7765 std::copy_n(&a_fluidVariable(cellId, 0), PV.noVars(), &m_flowSend[d][buffId]);
7766 buffId += PV.noVars();
7768 std::copy_n(&a_fluidSpecies(cellId), 1, &m_flowSend[d][buffId]);
7772 MPI_Isend(&m_flowSend[d][0], (PV.noVars() + m_evaporation) * grid().noLeafWindowCells(d), MPI_DOUBLE,
7773 grid().neighborDomain(d), mpiTag(
"FLOW_FIELD"), mpiComm(), &m_flowSendRequest[n], AT_,
"m_flowSend");
7776 m_openFlowSend =
true;
7785 if(!m_nonBlockingComm)
return;
7786 if(!m_receiveFlowField)
return;
7788 RECORD_TIMER_START(m_timers[Timers::Exchange]);
7790 MPI_Waitall(grid().noLeafRecvNeighborDomains(), &m_flowRecvRequest[0], MPI_STATUSES_IGNORE, AT_);
7793 RECORD_TIMER_START(m_timers[Timers::Exchange5]);
7794 for(
MInt n = 0; n < grid().noLeafRecvNeighborDomains(); n++) {
7795 const MInt d = grid().leafRecvNeighborDomain(n);
7797 for(
MInt j = 0; j < grid().noLeafHaloCells(d); j++) {
7798 const MInt cellId = grid().leafHaloCell(d, j);
7799 std::copy_n(&m_flowRecv[d][buffId], PV.noVars(), &a_fluidVariable(cellId, 0));
7800 buffId += PV.noVars();
7802 std::copy_n(&m_flowRecv[d][buffId], 1, &a_fluidSpecies(cellId));
7808 RECORD_TIMER_STOP(m_timers[Timers::Exchange5]);
7809 RECORD_TIMER_STOP(m_timers[Timers::Exchange]);
7810 m_receiveFlowField =
false;
7819 if(!m_nonBlockingComm) {
7823 if(m_openParticleInjSend) {
7824 MPI_Waitall(noNeighborDomains(), &m_mpi_reqSendFloat[0], MPI_STATUSES_IGNORE, AT_);
7825 MPI_Waitall(noNeighborDomains(), &m_mpi_reqSendInt[0], MPI_STATUSES_IGNORE, AT_);
7826 m_openParticleInjSend =
false;
7829 if(m_openParticleSend) {
7830 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_mpi_reqSendFloat[0], MPI_STATUSES_IGNORE, AT_);
7831 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_mpi_reqSendInt[0], MPI_STATUSES_IGNORE, AT_);
7832 m_openParticleSend =
false;
7835 if(m_openSourceSend) {
7836 MPI_Waitall(grid().noLeafRecvNeighborDomains(), &m_sourceSendRequest[0], MPI_STATUSES_IGNORE, AT_);
7837 m_openSourceSend =
false;
7840 if(m_openFlowSend) {
7841 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_flowSendRequest[0], MPI_STATUSES_IGNORE, AT_);
7842 m_openFlowSend =
false;
7845 if(m_openRegridSend) {
7846 MPI_Wait(&m_checkAdaptationRecvRequest[0], MPI_STATUS_IGNORE, AT_);
7847 m_openRegridSend =
false;
7850 if(m_openSlopesSend) {
7851 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_slopesSendRequest[0], MPI_STATUSES_IGNORE, AT_);
7852 m_openSlopesSend =
false;
7862 if(!m_ellipsoids)
return;
7863 if(!m_nonBlockingComm)
return;
7865 m_receiveVelocitySlopes =
true;
7868 for(
MInt n = 0; n < grid().noLeafRecvNeighborDomains(); n++) {
7869 const MInt d = grid().leafRecvNeighborDomain(n);
7870 MPI_Irecv(&m_slopesRecv[d][0], (nDim * nDim) * grid().noLeafHaloCells(d), MPI_DOUBLE, grid().neighborDomain(d),
7871 mpiTag(
"VELOCITY_SLOPES"), mpiComm(), &m_slopesRecvRequest[n], AT_,
"m_slopesRecv");
7875 if(m_openSlopesSend) {
7876 MPI_Waitall(grid().noLeafSendNeighborDomains(), &m_slopesSendRequest[0], MPI_STATUSES_IGNORE, AT_);
7877 m_openSlopesSend =
false;
7881 for(
MInt n = 0; n < grid().noLeafSendNeighborDomains(); n++) {
7882 const MInt d = grid().leafSendNeighborDomain(n);
7884 for(
MInt j = 0; j < grid().noLeafWindowCells(d); j++) {
7885 const MInt cellId = grid().leafWindowCell(d, j);
7886 std::copy_n(&a_velocitySlope(cellId, 0, 0), nDim * nDim, &m_slopesSend[d][buffId]);
7887 buffId += nDim * nDim;
7889 MPI_Isend(&m_slopesSend[d][0], (nDim * nDim) * grid().noLeafWindowCells(d), MPI_DOUBLE, grid().neighborDomain(d),
7890 mpiTag(
"VELOCITY_SLOPES"), mpiComm(), &m_slopesSendRequest[n], AT_,
"m_slopesSend");
7893 m_openSlopesSend =
true;
7902 if(!m_ellipsoids)
return;
7903 if(!m_nonBlockingComm)
return;
7904 if(!m_receiveVelocitySlopes)
return;
7906 RECORD_TIMER_START(m_timers[Timers::Exchange]);
7908 MPI_Waitall(grid().noLeafRecvNeighborDomains(), &m_slopesRecvRequest[0], MPI_STATUSES_IGNORE, AT_);
7911 RECORD_TIMER_START(m_timers[Timers::Exchange5]);
7912 for(
MInt n = 0; n < grid().noLeafRecvNeighborDomains(); n++) {
7913 const MInt d = grid().leafRecvNeighborDomain(n);
7915 for(
MInt j = 0; j < grid().noLeafHaloCells(d); j++) {
7916 const MInt cellId = grid().leafHaloCell(d, j);
7917 std::copy_n(&m_slopesRecv[d][buffId], nDim * nDim, &a_velocitySlope(cellId, 0, 0));
7918 buffId += nDim * nDim;
7922 RECORD_TIMER_STOP(m_timers[Timers::Exchange5]);
7923 RECORD_TIMER_STOP(m_timers[Timers::Exchange]);
7925 m_receiveVelocitySlopes =
false;
7938 if(m_sprayModel ==
nullptr)
return;
7940 static const MInt cadStart = m_sprayModel->m_injectionCrankAngle;
7941 static const MInt cadEnd = m_sprayModel->m_injectionCrankAngle + 90;
7942 static const MInt cadInterval = 1;
7943 const MInt cadMaxIter = (cadEnd - cadStart) / cadInterval;
7951 if(cad > cadEnd)
return;
7952 if(cad < (cadStart - cadInterval))
return;
7955 for(
MInt i = 0; i < cadMaxIter; i++) {
7956 const MFloat cadTarget = cadStart + i * cadInterval;
7958 if(cad < cadTarget && cad_next > cadTarget) {
7959 if(fabs(cad - cadTarget) <= fabs(cad_next - cadTarget)) {
7960 if(domainId() == 0) {
7961 cerr <<
"Saving particle output at crankAngle " << cad << endl;
7966 }
else if(cad > cadTarget && cad_prev < cadTarget) {
7967 if(abs(cad - cadTarget) < fabs(cad_prev - cadTarget)) {
7968 if(domainId() == 0) {
7969 cerr <<
"Saving particle output at crankAngle " << cad << endl;
7987 std::fill(m_timers.begin(), m_timers.end(), -1);
7990 NEW_TIMER_GROUP_NOCREATE(m_timerGroup,
"LPT (solverId = " + to_string(m_solverId) +
")");
7991 NEW_TIMER_NOCREATE(m_timers[Timers::SolverType],
"total object lifetime", m_timerGroup);
7992 RECORD_TIMER_START(m_timers[Timers::SolverType]);
7994 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::TimeInt],
"Time-integration", m_timers[Timers::SolverType]);
7995 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::PreTime],
"PreTimeStep", m_timers[Timers::SolverType]);
7997 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Motion],
"Motion", m_timers[Timers::TimeInt]);
7998 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Energy],
"Energy", m_timers[Timers::TimeInt]);
7999 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Wall],
"Wall", m_timers[Timers::TimeInt]);
8000 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::SourceTerms],
"Sources", m_timers[Timers::TimeInt]);
8002 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Injection],
"injection", m_timers[Timers::TimeInt]);
8003 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Breakup],
"breakup", m_timers[Timers::TimeInt]);
8005 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Exchange],
"exchange", m_timers[Timers::SolverType]);
8007 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Exchange1],
"mpi", m_timers[Timers::Exchange]);
8008 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Exchange2],
"prepare", m_timers[Timers::Exchange]);
8009 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Exchange3],
"pack", m_timers[Timers::Exchange]);
8010 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Exchange4],
"receive", m_timers[Timers::Exchange]);
8011 NEW_SUB_TIMER_NOCREATE(m_timers[Timers::Exchange5],
"unpack", m_timers[Timers::Exchange]);
8021 MFloat vol = a_bndryCellId(cellId) < 0 ? c_cellVolume(cellId) : m_bndryCells->a[a_bndryCellId(cellId)].m_volume;
8023 if(c_noChildren(cellId) > 0) {
8025 for(
MInt d = 0; d < dataBlockSize; d++) {
8026 data[dataBlockSize * cellId + d] = F0;
8028 for(
MInt child = 0; child < c_noChildren(cellId); child++) {
8029 MInt childId = c_childId(cellId, child);
8033 if(c_noChildren(childId) == 0 && !a_isValidCell(childId)) {
8036 MFloat volc = reduceData(childId, data, dataBlockSize);
8037 for(
MInt d = 0; d < dataBlockSize; d++) {
8038 data[dataBlockSize * cellId + d] += volc * data[dataBlockSize * childId + d];
8043 for(
MInt d = 0; d < dataBlockSize; d++) {
8044 data[dataBlockSize * cellId + d] /=
mMax(1e-14, vol);
8062template <MInt a, MInt b, MBool
interpolateVelocitySlopes>
8064 ASSERT((
b -
a) > 0,
"ERROR: Difference between b and a needs to be positive!");
8065 ASSERT(cellId >= 0,
"ERROR: Invalid cellId!");
8067 const MFloat originX = c_coordinate(cellId, 0);
8068 const MFloat originY = c_coordinate(cellId, 1);
8069 const MFloat originZ = c_coordinate(cellId, 2);
8074 std::vector<MInt> nghbrList;
8079 if(m_cellToNghbrHoodInterpolation.count(cellId) > 0) {
8080 nghbrList = m_cellToNghbrHoodInterpolation[cellId];
8083 grid().findDirectNghbrs(cellId, nghbrList);
8085 if(nghbrList.size() < 12) {
8087 grid().findNeighborHood(cellId, 2, nghbrList);
8089 if(nghbrList.size() < 14) {
8090 std::cout <<
"WARNING: Only found " << nghbrList.size() <<
" neighbors during interpolation!" << std::endl;
8094 m_cellToNghbrHoodInterpolation.emplace(make_pair(cellId, nghbrList));
8098 const MInt pseudoVarCount = (interpolateVelocitySlopes ? nDim * nDim +
b -
a :
b -
a);
8099 std::vector<std::array<MFloat, pseudoVarCount>> pseudoCellVar{};
8100 std::vector<std::array<MFloat, nDim>> pseudoCellPos{};
8102 auto nghbrIt = nghbrList.begin();
8103 while(nghbrIt != nghbrList.end()) {
8104 const MInt nghbrId = *nghbrIt;
8105 const MInt bndryCellId = a_bndryCellId(nghbrId);
8106 if(bndryCellId > -1) {
8107 const auto& bndryCell = m_bndryCells->a[bndryCellId];
8108 for(
MInt srfcId = 0; srfcId < bndryCell.m_noSrfcs; srfcId++) {
8109 auto& srfc = *bndryCell.m_srfcs[srfcId];
8110 pseudoCellPos.emplace_back();
8111 auto& pos = pseudoCellPos.back();
8112 for(
MInt n = 0; n < nDim; n++) {
8113 pos[n] = srfc.m_coordinates[n];
8116 pseudoCellVar.emplace_back();
8117 auto& vars = pseudoCellVar.back();
8118 for(
MInt i =
a; i <
b; i++) {
8119 vars[i] = a_fluidVariable(nghbrId, i);
8121 if(interpolateVelocitySlopes) {
8122 for(
MInt i = 0; i < nDim; i++) {
8123 for(
MInt j = 0; j < nDim; j++) {
8124 vars[
b + nDim * i + j] = a_velocitySlope(nghbrId, i, j);
8129 nghbrIt = nghbrList.erase(nghbrIt);
8135 const MUint noInterpolationPoints = nghbrList.size() + pseudoCellPos.size();
8140 if(noInterpolationPoints < 10) {
8141 std::vector<MFloat>
dist(noInterpolationPoints, 0);
8143 for(
MUint nId = 0; nId < nghbrList.size(); nId++) {
8144 const MInt nghbrId = nghbrList[nId];
8146 for(
MInt i = 0; i < nDim; i++) {
8147 dist[nId] +=
POW2(c_coordinate(nghbrId, i) - position[i]);
8151 for(
MUint pId = 0; pId < pseudoCellPos.size(); pId++) {
8152 IF_CONSTEXPR(nDim == 2) {
8153 dist[pId + nghbrList.size()] =
8154 POW2(pseudoCellPos[pId][0] - position[0]) +
POW2(pseudoCellPos[pId][1] - position[1]);
8157 dist[pId + nghbrList.size()] =
POW2(pseudoCellPos[pId][0] - position[0])
8158 +
POW2(pseudoCellPos[pId][1] - position[1])
8159 +
POW2(pseudoCellPos[pId][2] - position[2]);
8165 for(
MInt k =
a; k <
b; k++) {
8167 interpolatedVar[k] = 0.0;
8170 for(
MUint nId = 0; nId < nghbrList.size(); nId++) {
8171 const MInt nghbrId = nghbrList[nId];
8172 var = a_fluidVariable(nghbrId, k);
8173 interpolatedVar[k] +=
dist[nId] / sum * var;
8176 for(
MUint pId = 0; pId < pseudoCellPos.size(); pId++) {
8177 var = pseudoCellVar[pId][k -
a];
8178 interpolatedVar[k] +=
dist[pId + nghbrList.size()] / sum * var;
8183 if(interpolateVelocitySlopes) {
8184 for(
MInt i = 0; i < nDim; i++) {
8185 for(
MInt j = 0; j < nDim; j++) {
8187 interpolatedVar[
b + nDim * i + j -
a] = 0.0;
8189 for(
MUint nId = 0; nId < nghbrList.size(); nId++) {
8190 const MInt nghbrId = nghbrList[nId];
8191 var = a_velocitySlope(nghbrId, i, j);
8192 interpolatedVar[
b + nDim * i + j -
a] +=
dist[nId] / sum * var;
8195 for(
MUint pId = 0; pId < pseudoCellPos.size(); pId++) {
8196 var = pseudoCellVar[pId][nDim * i + j +
b];
8197 interpolatedVar[
b + nDim * i + j -
a] +=
dist[pId + nghbrList.size()] / sum * var;
8205 m_log <<
"WARNING: Not enough neighbors found to use LS using weighted averaging instead!" << std::endl;
8211 MFloatTensor LSMatrix(2 * nDim + pow(2, nDim - 1), 2 * nDim + pow(2, nDim - 1));
8214 MFloatTensor matInv(2 * nDim + pow(2, nDim - 1), 2 * nDim + pow(2, nDim - 1));
8257 for(
MUint nId = 1; nId < noInterpolationPoints; nId++) {
8261 if(nId >= nghbrList.size()) {
8262 const MUint pId = nId - nghbrList.size();
8263 x = pseudoCellPos[pId][0] - originX;
8264 y = pseudoCellPos[pId][1] - originY;
8265 IF_CONSTEXPR(nDim == 3) { z = pseudoCellPos[pId][2] - originZ; }
8267 const MInt nghbrId = nghbrList[nId];
8268 x = c_coordinate(nghbrId, 0) - originX;
8269 y = c_coordinate(nghbrId, 1) - originY;
8270 IF_CONSTEXPR(nDim == 3) { z = c_coordinate(nghbrId, 2) - originZ; }
8282 sumXYZ += x *
y * z;
8283 sumX2Y += x * x *
y;
8284 sumX2Z += x * x * z;
8285 sumXY2 += x *
y *
y;
8286 sumY2Z +=
y *
y * z;
8287 sumXZ2 += z * z * x;
8288 sumYZ2 += z * z *
y;
8292 sumX3Y += x * x * x *
y;
8293 sumX3Z += x * x * x * z;
8294 sumXY3 +=
y *
y *
y * x;
8295 sumY3Z +=
y *
y *
y * z;
8296 sumXZ3 += z * z * z * x;
8297 sumYZ3 += z * z * z *
y;
8298 sumX2Y2 += x * x *
y *
y;
8299 sumX2Z2 += x * x * z * z;
8300 sumX2YZ += x * x *
y * z;
8301 sumY2Z2 +=
y *
y * z * z;
8302 sumXY2Z += x *
y *
y * z;
8303 sumXYZ2 += x *
y * z * z;
8304 sumX4 += x * x * x * x;
8305 sumY4 +=
y *
y *
y *
y;
8306 sumZ4 += z * z * z * z;
8310 IF_CONSTEXPR(nDim == 2) {
8312 LSMatrix(0, 0) = sumX4;
8313 LSMatrix(1, 1) = sumY4;
8314 LSMatrix(2, 2) = sumX2Y2;
8315 LSMatrix(3, 3) = sumX2;
8316 LSMatrix(4, 4) = sumY2;
8317 LSMatrix(5, 5) = noInterpolationPoints;
8320 LSMatrix(0, 1) = LSMatrix(1, 0) = sumX2Y2;
8321 LSMatrix(0, 2) = LSMatrix(2, 0) = sumX3Y;
8322 LSMatrix(0, 3) = LSMatrix(3, 0) = sumX3;
8323 LSMatrix(0, 4) = LSMatrix(4, 0) = sumX2Y;
8324 LSMatrix(0, 5) = LSMatrix(5, 0) = sumX2;
8327 LSMatrix(1, 2) = LSMatrix(2, 1) = sumXY3;
8328 LSMatrix(1, 3) = LSMatrix(3, 1) = sumXY2;
8329 LSMatrix(1, 4) = LSMatrix(4, 1) = sumY3;
8330 LSMatrix(1, 5) = LSMatrix(5, 1) = sumY2;
8333 LSMatrix(2, 3) = LSMatrix(3, 2) = sumX2Y;
8334 LSMatrix(2, 4) = LSMatrix(4, 2) = sumXY2;
8335 LSMatrix(2, 5) = LSMatrix(5, 2) = sumXY;
8338 LSMatrix(3, 4) = LSMatrix(4, 3) = sumXY;
8339 LSMatrix(3, 5) = LSMatrix(5, 3) = sumX;
8342 LSMatrix(4, 5) = LSMatrix(5, 4) = sumY;
8346 LSMatrix(0, 0) = sumX4;
8347 LSMatrix(1, 1) = sumY4;
8348 LSMatrix(2, 2) = sumZ4;
8349 LSMatrix(3, 3) = sumX2Y2;
8350 LSMatrix(4, 4) = sumX2Z2;
8351 LSMatrix(5, 5) = sumY2Z2;
8352 LSMatrix(6, 6) = sumX2;
8353 LSMatrix(7, 7) = sumY2;
8354 LSMatrix(8, 8) = sumZ2;
8355 LSMatrix(9, 9) = noInterpolationPoints;
8358 LSMatrix(0, 1) = LSMatrix(1, 0) = sumX2Y2;
8359 LSMatrix(0, 2) = LSMatrix(2, 0) = sumX2Z2;
8360 LSMatrix(0, 3) = LSMatrix(3, 0) = sumX3Y;
8361 LSMatrix(0, 4) = LSMatrix(4, 0) = sumX3Z;
8362 LSMatrix(0, 5) = LSMatrix(5, 0) = sumX2YZ;
8363 LSMatrix(0, 6) = LSMatrix(6, 0) = sumX3;
8364 LSMatrix(0, 7) = LSMatrix(7, 0) = sumX2Y;
8365 LSMatrix(0, 8) = LSMatrix(8, 0) = sumX2Z;
8366 LSMatrix(0, 9) = LSMatrix(9, 0) = sumX2;
8369 LSMatrix(1, 2) = LSMatrix(2, 1) = sumY2Z2;
8370 LSMatrix(1, 3) = LSMatrix(3, 1) = sumXY3;
8371 LSMatrix(1, 4) = LSMatrix(4, 1) = sumXY2Z;
8372 LSMatrix(1, 5) = LSMatrix(5, 1) = sumY3Z;
8373 LSMatrix(1, 6) = LSMatrix(6, 1) = sumXY2;
8374 LSMatrix(1, 7) = LSMatrix(7, 1) = sumY3;
8375 LSMatrix(1, 8) = LSMatrix(8, 1) = sumY2Z;
8376 LSMatrix(1, 9) = LSMatrix(9, 1) = sumY2;
8379 LSMatrix(2, 3) = LSMatrix(3, 2) = sumXYZ2;
8380 LSMatrix(2, 4) = LSMatrix(4, 2) = sumXZ3;
8381 LSMatrix(2, 5) = LSMatrix(5, 2) = sumYZ3;
8382 LSMatrix(2, 6) = LSMatrix(6, 2) = sumXZ2;
8383 LSMatrix(2, 7) = LSMatrix(7, 2) = sumYZ2;
8384 LSMatrix(2, 8) = LSMatrix(8, 2) = sumZ3;
8385 LSMatrix(2, 9) = LSMatrix(9, 2) = sumZ2;
8388 LSMatrix(3, 4) = LSMatrix(4, 3) = sumX2YZ;
8389 LSMatrix(3, 5) = LSMatrix(5, 3) = sumXY2Z;
8390 LSMatrix(3, 6) = LSMatrix(6, 3) = sumX2Y;
8391 LSMatrix(3, 7) = LSMatrix(7, 3) = sumXY2;
8392 LSMatrix(3, 8) = LSMatrix(8, 3) = sumXYZ;
8393 LSMatrix(3, 9) = LSMatrix(9, 3) = sumXY;
8396 LSMatrix(4, 5) = LSMatrix(5, 4) = sumXYZ2;
8397 LSMatrix(4, 6) = LSMatrix(6, 4) = sumX2Z;
8398 LSMatrix(4, 7) = LSMatrix(7, 4) = sumXYZ;
8399 LSMatrix(4, 8) = LSMatrix(8, 4) = sumXZ2;
8400 LSMatrix(4, 9) = LSMatrix(9, 4) = sumXZ;
8403 LSMatrix(5, 6) = LSMatrix(6, 5) = sumXYZ;
8404 LSMatrix(5, 7) = LSMatrix(7, 5) = sumY2Z;
8405 LSMatrix(5, 8) = LSMatrix(8, 5) = sumYZ2;
8406 LSMatrix(5, 9) = LSMatrix(9, 5) = sumYZ;
8409 LSMatrix(6, 7) = LSMatrix(7, 6) = sumXY;
8410 LSMatrix(6, 8) = LSMatrix(8, 6) = sumXZ;
8411 LSMatrix(6, 9) = LSMatrix(9, 6) = sumX;
8414 LSMatrix(7, 8) = LSMatrix(8, 7) = sumYZ;
8415 LSMatrix(7, 9) = LSMatrix(9, 7) = sumY;
8418 LSMatrix(8, 9) = LSMatrix(9, 8) = sumZ;
8422 const MFloat normalizationFactor =
FPOW2(2 * c_level(cellId)) / c_cellLengthAtLevel(0);
8423 for(
MInt i = 0; i < 2 * nDim + pow(2, nDim - 1); i++) {
8424 for(
MInt j = 0; j < 2 * nDim + pow(2, nDim - 1); j++) {
8425 LSMatrix(i, j) *= normalizationFactor;
8430 maia::math::invert(LSMatrix, matInv, 2 * nDim + pow(2, nDim - 1), 2 * nDim + pow(2, nDim - 1));
8435 const MInt totalNumberOfVars = interpolateVelocitySlopes ?
b + nDim * nDim :
b;
8436 for(
MInt k =
a; k < totalNumberOfVars; k++) {
8439 sumVar = a_fluidVariable(cellId, k);
8441 sumVar = a_velocitySlope(cellId, std::floor((k -
b) / 3), (k -
b) % 3);
8456 for(
MUint nId = 1; nId < noInterpolationPoints; nId++) {
8461 if(nId >= nghbrList.size()) {
8462 const MUint pId = nId - nghbrList.size();
8463 x = pseudoCellPos[pId][0] - originX;
8464 y = pseudoCellPos[pId][1] - originY;
8465 var = pseudoCellVar[pId][k -
a];
8466 IF_CONSTEXPR(nDim == 3) { z = pseudoCellPos[pId][2] - originZ; }
8468 const MInt nghbrId = nghbrList[nId];
8469 x = c_coordinate(nghbrId, 0) - originX;
8470 y = c_coordinate(nghbrId, 1) - originY;
8472 var = a_fluidVariable(nghbrId, k);
8474 var = a_velocitySlope(nghbrId, std::floor((k -
b) / 3), (k -
b) % 3);
8476 IF_CONSTEXPR(nDim == 3) { z = c_coordinate(nghbrId, 2) - originZ; }
8483 sumVarXY += var * x *
y;
8484 sumVarXZ += var * x * z;
8485 sumVarYZ += var *
y * z;
8486 sumVarX2 += var * x * x;
8487 sumVarY2 += var *
y *
y;
8488 sumVarZ2 += var * z * z;
8492 IF_CONSTEXPR(nDim == 2) {
8514 for(
MInt i = 0; i < 2 * nDim + pow(2, nDim - 1); i++) {
8515 rhs(i) *= normalizationFactor;
8522 for(
MInt i = 0; i < 2 * nDim + pow(2, nDim - 1); i++) {
8523 for(
MInt j = 0; j < 2 * nDim + pow(2, nDim - 1); j++) {
8524 coeff(i) += matInv(i, j) * rhs(j);
8531 MFloat positionX = position[0] - originX;
8532 MFloat positionY = position[1] - originY;
8534 IF_CONSTEXPR(nDim == 3) { positionZ = position[2] - originZ; }
8535 MFloat result = coeff((
MInt)(2 * nDim + pow(2, nDim - 1) - 1));
8536 IF_CONSTEXPR(nDim == 2) {
8537 result += coeff(0) * positionX * positionX;
8538 result += coeff(1) * positionY * positionY;
8539 result += coeff(2) * positionX * positionY;
8540 result += coeff(3) * positionX;
8541 result += coeff(4) * positionY;
8544 result += coeff(0) * positionX * positionX;
8545 result += coeff(1) * positionY * positionY;
8546 result += coeff(2) * positionZ * positionZ;
8547 result += coeff(3) * positionX * positionY;
8548 result += coeff(4) * positionX * positionZ;
8549 result += coeff(5) * positionY * positionZ;
8550 result += coeff(6) * positionX;
8551 result += coeff(7) * positionY;
8552 result += coeff(8) * positionZ;
8555 interpolatedVar[k -
a] = result;
void mAlloc(T *&a, const MLong N, const MString &objectName, MString function)
allocates memory for one-dimensional array 'a' of size N
MBool mDeallocate(T *&a)
deallocates the memory previously allocated for element 'a'
static MInt propertyLength(const MString &name, MInt solverId=m_noSolvers)
Returns the number of elements of a property.
static MBool propertyExists(const MString &name, MInt solver=m_noSolvers)
This function checks if a property exists in general.
void open(const MString &filename, const MString &projectName, MInt fileType=0, MPI_Comm mpiComm=MPI_COMM_WORLD, MBool rootOnlyHardwired=false)
Opens a file by passing the parameters to InfoOut_<xyz>FileBuffer::open(...).
MFloat m_creationTime
creation time modifier
std::array< MFloat, nDim > m_position
std::array< MFloat, nDim > m_velocity
BitsetType::reference firstStep()
std::array< MFloat, nDim > m_oldPos
particle position of the last time step
BitsetType::reference hadWallColl()
void updateProperties(const MBool init=true)
Update particle properties.
std::array< MFloat, nDim > m_oldVel
particle velocity of the last time step
void checkCellChange(const MFloat *oldPosition, const MBool allowHaloNonLeaf=false)
Checks whether position is within cell with number cellId if not, cellId is updated.
std::array< MFloat, nDim > m_accel
std::array< MFloat, 4 > m_oldQuaternion
std::array< MFloat, nDim > m_oldAccel
particle acceleration of the last time step
void calculateMajorAxisOrientation(MFloat *majorAxis)
Calculate the orientation of the major axis in the world coordinate system.
MFloat m_fluidVelMag
fluid velocity magnitude
std::array< MFloat, nDim > m_oldAngularAccel
particle angular acceleration of the last time step
MFloat m_oldFluidDensity
old fluid density
MFloat equivalentDiameter() const
Returns the equivalent diameter.
std::array< MFloat, nDim > m_fluidVel
fluid velocity
std::array< MFloat, nDim > m_angularAccel
particle angular acceleration
MFloat m_temperature
temperature
std::array< MFloat, nDim > m_oldFluidVel
fluid velocity of the last time step
MFloat m_aspectRatio
aspect ratio a/c of ellipsoidal: 0 < oblate < 1, =1 spherical, >1 prolate
std::array< MFloat, nDim > m_oldAngularVel
particle angular velocity of the last time step
std::array< MFloat, nDim > m_angularVel
particle angular velocity
std::array< MFloat, 4 > m_quaternion
void initEllipsoialProperties()
MFloat m_fluidDensity
fluid density
MFloat m_heatFlux
heat flux to cell
MFloat m_semiMinorAxis
semi minor axis a
void sendSourceTerms()
exchanges the source terms on the LPT grid, non-Blocking version from above
void cancelMpiRequests() override
actually doing same pre-partitioneing stuff during balcne
void checkCells()
some debug check for cells
void broadcastInjected(const MUint prevNumPart)
Send newly created particles.
void perCellStats()
set cell-particle mapping which is used to merge particle in reduceParticles!
void receiveParticles_()
receive particles that have been sent non-blocking before
void sendParticles()
send particles non-blocking
void spawnParticles()
Spawn new particles (used for testing)
void interpolateVariablesLS(const MInt cellId, const MFloat *const position, MFloat *const interpolatedVar)
calculates interpolated variables (in the range a, b) for a given position in a given cell
void getCellDataDlb(const MInt dataId, const MInt oldNoCells, const MInt *const bufferIdToCellId, MInt *const data) override
Return solver data for DLB.
void setCellDataDlb(const MInt dataId, const MInt *const data) override
Set solver data for DLB.
void sendAndReceiveParticles(const MBool allowNonLeaf)
Particle transfer between solvers The particles to be sent are in the queueToSend array MBool mayExis...
void motionEquation(const MInt offset)
void checkRegridTrigger()
check if any of the cells with the regrid Trigger also have particles inside
void unpackParticle(LPTSpherical< nDim > &thisParticle, const MInt *intBuffer, MInt &z, const MFloat *floatBuffer, MInt &h, MInt &step, MInt id)
Unpack spherical particle from buffers.
MFloat getCellLoad(const MInt cellId, const MFloat *const weights) const override
Return the load of a single cell (given computational weights).
void getLoadQuantities(MInt *const loadQuantities) const override
Return the cumulative load quantities on this domain.
void balancePre() override
reset the solver during balancing
void checkParticles()
some debug check for partcles
MBool prepareRestart(MBool force, MBool &) override
before writing a restart file
void setCellWeights(MFloat *) override
set cell weight for partitioning
void initCollector()
init LPT cell collector
void initGridProperties()
calculate Grid Properties for periodic boundaries, adapted from rigedbodies
void finalizeAdaptation() override
reinit the solver after the full adaptation loop!
void resizeGridMap() override
Swap the given cells.
void postAdaptation() override
post adaptation for split adaptation within the adaptation loop
MInt cellOutside(const MFloat *, const MInt, const MInt) override
checks if a child lies outSide of the domain! necessary for refinement at the bndry!
MInt loadParticleRestartFile()
Read particle restart file and create new particle instances accordingly.
void refineCell(const MInt) override
refining a cartesian cell
void crankAngleSolutionOutput()
save a full solutionOutput at timeSteps closesed to a specified crankAngle Interval save a solution s...
void getDomainDecompositionInformation(std::vector< std::pair< MString, MInt > > &domainInfo) override
Return decomposition information, i.e. number of local elements,...
void resetSourceTerms(const MInt)
reset all source terms with cellId larger than the offset
void exchangeParticles(const MBool collOnly, const MInt offset, const MBool allowNonLeaf=false)
Calls exchange for the different particle types.
void resetSolver() override
actually doing some pre-balance-stuff
void evaporation(const MInt offset)
MBool fiveStageSolutionStep()
Splitting the solutionStep into 5 separate parts to match the 5-stage Runge-Kutta time-stepping for i...
void receiveSourceTerms()
exchanges the source terms on the LPT grid, non-Blocking version from above
void removeCell(const MInt) override
Remove the given cell.
void particleRespawn()
respawn of Particles
MInt addEllipsoid(const MInt cellId, const MFloat semiMinorAxis, const MFloat aspectRatio, const MFloat particleDensityRatio, const MInt random=1, const MInt addMode=0, const MFloat *velocity=nullptr, const MFloat *position=nullptr)
Add new ellipsoidal particle.
void resetSolverFull()
reset the solver during balancing
void writePartData()
write particle snapshot to Netcdf file (using parallel output)
void wallCollision()
collision step with LPT wall
void removeInvalidParticles(const MBool)
void balancePost() override
Reinitialize solver after setting solution data in DLB.
void removeChilds(const MInt) override
coarseining a cartesian cell
void packParticles(std::vector< LPTSpherical< nDim > * > &particlesToSend, MInt *intBuffer, MFloat *floatBuffer, std::vector< MInt >)
Pack particles for sending.
LPT(const MInt solverId, GridProxy &gridProxy_, Geom &geometry_, const MPI_Comm comm)
Solver constructor.
void getGlobalSolverVars(std::vector< MFloat > &globalFloatVars, std::vector< MInt > &globalIdVars) override
Get/set global solver variables during DLB.
void sendFlowField()
exchanges the flow Field data on the LPT grid, non-Blocking version
void countParticlesInCells()
set accessor for a_noParticlesInCell which is used for the particle-sensor during adaptation
void initializeTimers()
Initializes the communication timers.
void spawnTimeStep()
spawn new particles and compute their timeStep NOTE: this is an older functionality which is only use...
void setGlobalSolverVars(std::vector< MFloat > &globalFloatVars, std::vector< MInt > &globalIdVars) override
Set global solver variables for DLB (same on each domain)
void receiveVelocitySlopes()
exchanges the velocity slopes on the LPT grid, non-Blocking version
void initialCondition()
Initialize particles.
void coupling(MInt offset)
void writeRestartFile(const MBool, const MBool, const MString, MInt *) override
writing a restart File as regularly called from the grid-controller!
MBool oneStageSolutionStep()
single solution step for pure LPT computation or without interleafed coupling!
void sensorInterface(std::vector< std::vector< MFloat > > &sensors, std::vector< std::bitset< 64 > > &sensorCellFlag, std::vector< MFloat > &sensorWeight, MInt sensorOffset, MInt sen) override
set the sensors around the boundary positions to get the markedCells
void initMPI(const MBool=true)
Init MPI buffers and communication datastructures.
void saveDebugRestartFile()
writing a restart File NOTE: for debugging purposes only!
void limitWeights(MFloat *) override
Limit weight of base cell.
void mergeParticles(LPTSpherical< nDim > *partA, LPTSpherical< nDim > *partB)
void writeParticleRestartFile()
Write particle restart file.
void updateFluidFraction()
void exchangeOffset(MInt noParticles)
In case of multisolver, communicate particle id offset to ensure consistent particle ids across the s...
void finalizeBalance() override
re-inits the solver after the balance
void unpackParticles(const MInt num, const MInt *intBuffer, const MFloat *floatBuffer, const MInt domainId=-1, const MBool allowNonLeaf=false)
Unpack particles.
void setSensors(std::vector< std::vector< MFloat > > &, std::vector< MFloat > &, std::vector< std::bitset< 64 > > &, std::vector< MInt > &) override
set the sensors for a single adaptation step
void initSolver() override
MBool solutionStep() override
Time stepping of particles NOTE: the solutionStep is split into sub-steps, to allow for an interleafe...
void getDefaultWeights(MFloat *weights, std::vector< MString > &names) const override
Return the default weights for all load quantities.
void exchangeSourceTerms()
exchanges the source terms on the LPT grid, this is necessary a) before writing the cell solution fil...
void receiveFlowField()
exchanges the flow Field data on the LPT grid, non-Blocking version
void waitForSendReqs()
wait on all mpi send calls, prior to balance, adaptation or IO
void initBndryCells()
init LPT boundary-cells used for the wall-collision
void initParticleVector()
init LPT spherical and ellipsoids particle vectors (i.e. set length and read non-dimensionalisation P...
void prepareAdaptation() override
prepare solver adaptation
void postTimeStep() override
after the LPT time-Step!
void preTimeStep() override
prepare the LPT timeStep
void getSolverTimings(std::vector< std::pair< std::string, MFloat > > &, const MBool allTimings) override
Get solver timings.
void swapProxy(MInt, MInt) override
MInt cellDataSizeDlb(const MInt dataId, const MInt cellId) override
Return data size to be communicated during DLB for a grid cell and given data id.
void exchangeParticles_(const MBool collOnly, const MInt offset, const MBool allowNonLeaf=false)
exchange particles to neighboring domains, can be either non-blocking or blocking communication!
void receiveParticles()
Calls particle receive function for different particle types.
MFloat computeTimeStep()
computes the non-dimensional LPT time-step on the assumption that a particle may only travel for a co...
void initSummary()
Print information at the start of simulation.
void recvInjected()
receive particles generated during injection
void finalizeInitSolver() override
void sendVelocitySlopes()
exchanges the velocity slopes on the LPT grid, non-Blocking version
void sensorParticle(std::vector< std::vector< MFloat > > &sensors, std::vector< std::bitset< 64 > > &sensorCellFlag, std::vector< MFloat > &sensorWeight, MInt sensorOffset, MInt sen) override
set the sensors around the particle positions to get the markedCells
void advanceParticles(const MInt offset)
void updateExchangeCells()
Set exchange properties in LPT cell collector (a_isHalo/a_isWindow) and set m_pointsToHalo and m_poin...
void calculateTerminalVelocities()
calculates the terminal velocity of particles used for initialization
MFloat reduceData(const MInt cellId, MFloat *data, const MInt dataBlockSize=1, const MBool average=true)
reduce data to lower level
void swapCells(const MInt, const MInt) override
swap Cell information when the grid collector is restructured during adaptation cleanup
MInt addParticle(const MInt cellId, const MFloat diameter, const MFloat particleDensityRatio, const MInt random=1, const MInt addMode=0, const MFloat *velocity=nullptr, const MFloat *position=nullptr, const MInt parcelSize=1)
Add new particle.
void writeCellSolutionFile(const MString &, MInt *)
Write lpt cell based solution file currently ony used for debugging!
void setRegridTrigger()
set a_regridTriggerG around all particles to allow for an adaptation check
void addBndryCell(const MInt, MFloat *, MFloat *, MFloatScratchSpace &, MFloat *)
add an LPT bndryCell to the collector and fill the data
MFloat m_shedDiam
shedding diameter
std::array< MFloat, nDim > m_oldFluidVel
fluid velocity of the last time step
MFloat m_temperature
temperature
MFloat m_oldFluidDensity
old fluid density
MFloat m_dM
mass evaporation rate
MFloat sphericalMass() const
Calculate the current mass.
MInt m_noParticles
parceled particles
std::array< MFloat, nDim > m_oldAccel
particle acceleration of the last time step
MFloat m_diameter
particle diameter
MFloat m_heatFlux
heat flux to cell
MFloat m_breakUpTime
time since last breakUp
MFloat m_fluidVelMag
fluid velocity magnitude
MFloat m_fluidDensity
fluid density
std::array< MFloat, nDim > m_fluidVel
fluid velocity
This class is a ScratchSpace.
void fill(T val)
fill the scratch with a given value
pointer p
Deprecated: use [] instead!
void set(const T &value)
Initializes tensor to constant value.
@ LPT_MPI_PARTICLE_RESPAWN_TAG
void mTerm(const MInt errorCode, const MString &location, const MString &message)
constexpr Real POW3(const Real x)
constexpr Real POW2(const Real x)
constexpr T mMin(const T &x, const T &y)
constexpr T mMax(const T &x, const T &y)
constexpr MFloat FPOW2(MInt x)
std::basic_string< char > MString
int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status, const MString &name, const MString &varname)
same as MPI_Recv
int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request, const MString &name, const MString &varname)
same as MPI_Isend
int MPI_Get_count(const MPI_Status *status, MPI_Datatype datatype, int *count, const MString &name)
same as MPI_Get_count
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request, const MString &name, const MString &varname)
same as MPI_Irecv
int MPI_Wait(MPI_Request *request, MPI_Status *status, const MString &name)
same as MPI_Wait
int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Gatherv
int MPI_Waitall(int count, MPI_Request *request, MPI_Status *status, const MString &name)
same as MPI_Waitall
int MPI_Iallreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Iallreduce
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allreduce
int MPI_Iprobe(int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status, const MString &name)
Iprobe MPI to get status without actually receiving the message.
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, const MString &name, const MString &varname)
same as MPI_Bcast
int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Gather
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgather
MInt randomVectorInCone(MFloat *vec, const MFloat *coneAxis, const MFloat length, const MFloat openingAngle, const MInt dist, std::mt19937_64 &PRNG, const MFloat distCoeff=0.0, const MFloat nozzleAngle=0.0)
Generate a random vector in a cone defined by its opening angle.
void rotation2quaternion(MFloat *rotation, MFloat *quaternion)
void invert(MFloat *A, const MInt m, const MInt n)
void normalize(std::array< T, N > &u)
MFloat crankAngle(const MFloat time, const MFloat Strouhal, const MFloat offset, const MInt mode)
help-function for engine calculations which returns the crank-angle for a given time,...
MFloat norm(const std::array< T, N > &u)
void reverseExchangeAddData(const std::vector< MInt > &nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, U **const data, const MInt noDat=1)
Generic exchange from halo to window cells, however in this case the value in the halo-cell is added ...
Namespace for auxiliary functions/classes.
PARALLELIO_DEFAULT_BACKEND ParallelIo
MFloat dist(const Point< DIM > &p, const Point< DIM > &q)