MAIA bb96820c
Multiphysics at AIA
Loading...
Searching...
No Matches
fvstg.h
Go to the documentation of this file.
1// Copyright (C) 2024 The m-AIA AUTHORS
2//
3// This file is part of m-AIA (https://git.rwth-aachen.de/aia/m-AIA/m-AIA)
4//
5// SPDX-License-Identifier: LGPL-3.0-only
6
7#ifndef STG_H_
8#define STG_H_
9
10#include <cassert>
11#include <cmath>
12#include <fstream>
13#include <iostream>
14#include <string>
15#include "enums.h"
16
17#include "fvcartesiansolverxd.h"
18#include "fvstructuredsolver.h"
19
20
21template <class SolverType>
23template <class SolverType>
25
26template <MInt nDim, class SysEqn>
28
29// Tag class with some type traits
30template <MInt nDim, SolverType _>
31struct SolverTraits {};
32
33template <MInt nDim>
35 // using Accessor = AccessorUnstructured;
37};
38
39template <MInt nDim>
41 // using Accessor = AccessorStructured;
43};
44
45// Accessor type trait
46template <MInt nDim, SolverType SolverType_>
47struct AccessorTrait {};
48
49template <MInt nDim>
52};
53
54template <MInt nDim>
57};
58
61template <class base_iterator, class SolverType>
62class nDim_iterator_t : public base_iterator {
63 public:
64 using value_type = typename base_iterator::value_type;
65
66 // Constructors (default one for creation of invalid object)
67 nDim_iterator_t() = default;
68 nDim_iterator_t(base_iterator it, const AccessorUnstructured<SolverType>* parent) : base_iterator(it), p(parent) {}
69
70 // public member functions
71 // deprecated, since the new convention is to forward the call to the Accessor, i.e., instead of
72 // 'it.getCellId()', use 'a->getCellId(it)', when 'it' is the iterator and 'a' is a pointer to the Accessor
73 value_type getCellId() const;
74 value_type getStgId() const { return *(*this); }
75 value_type getNghbr(MInt dir) const;
76
77 private:
78 // to access member function of enclosing object
80};
81
82
83// Base class for CRTP; Defines the interface; All functions where only declarations are given, must be
84// implemented by the derived class;
85// The Accessor class is created for the LES solver; It manages the solver specific accesses;
86
87template <class Derived, class SolverType_>
88class Accessor {
89 private:
90 // static const SolverType solvertype = Derived::solvertyp;
91 // using SolverType = typename SolverTraits<solvertype>::SolverType;
92 using SolverType = SolverType_; // typename SolverTraits<3, SolverType_>::SolverType;
93
94 // Private constructor + friend declaration prevents derived class
95 // to inherit from wrong base class and from creating directly a base object
96 Accessor(SolverType_* solver, const MPI_Comm& commStg) : m_solver(solver), m_commStg(commStg) {} //= default;
97 friend Derived;
98
99 protected:
100 using Storage = std::vector<MInt>;
101 using iterator = std::vector<MInt>::iterator;
102 using const_iterator = std::vector<MInt>::const_iterator;
103
104 public:
105 // CRTP accessor
106 Derived* d() { return static_cast<Derived*>(this); }
107
108 virtual MFloat a_coordinate(MInt, MInt) const = 0;
109 virtual MInt domainId(MInt, MInt) const = 0;
110 virtual MFloat& a_pvariable(MInt, MInt) = 0;
111 virtual MFloat& UInfinity() const = 0;
112 virtual MFloat& PInfinity() const = 0;
113 virtual MFloat& rhoInfinity() const = 0;
114
115 // Member functions
116 MInt sizeBC() const { return m_noBcCells; }
117 MInt sizeStg() const { return m_stgSize; }
118
119
121
122 // Note: This is not valid c++ because the iterator-types of the derived classes are different
123 // The proper way to do this is to define a common custom iterator type and derive the specific
124 // iterator type from this common type.
125 // An other way to do this is to implement a function which returns the next element to access and
126 // nullptr at the end. For example:
127 //
128 // //reset to start
129 // for(accessType* example = exampleObj->begin(); example!=nullptr; example=exampleObj->next())
130 // example->bla();
131 // }
132
133 //
134
135 // 'iterateSlopes' -> iterate over all cells for which slopes have been computed
138 // 'iterateB1' -> iterate over first layer of boundary cells
141 // 'iterateAll' -> iterate over all stg cells
144
145 MInt getCellId(const nDim_citerator& it_) const;
146 MInt getStgId(const nDim_citerator& it_) const;
147 MInt getNghbr(const nDim_citerator& it_, MInt dir) const;
148 MInt getNghbrStg(const nDim_citerator& it_, MInt dir) const;
149
150 // protected: //TODO labels:FV make this protected again
151 public:
153 MPI_Comm m_commStg;
154 static constexpr const MInt m_nDim = 3;
155
156 // Cell ids of all stg cells in normal container
158 // Stg ids of boundary cells
160 // Stg ids of all stg cells (just numbers in increasing order)
162 // # bc cells
164 // # stg cells; m_stgSize=m_noBcCells
166};
167
168
169// Accessor for unstructured finite volume solver
170template <class SolverType>
171class AccessorUnstructured : public Accessor<AccessorUnstructured<SolverType>, SolverType> {
172 public:
180
181 // Constructor
182 explicit AccessorUnstructured(MInt* sortedBndryCellIds, MInt size, SolverType* solver, const MPI_Comm& commStg,
183 MBool cutOff)
184 : Accessor<AccessorUnstructured<SolverType>, SolverType>(solver, commStg) {
185 MInt myrank = 0;
186 MPI_Comm_rank(commStg, &myrank);
187
188 // First put the boundary cells into m_stgToCellId and then append the ghost cells and finally
189 // the reconstruction neighbors, which haven't been appended already
190 m_stgToCellId.assign(sortedBndryCellIds, sortedBndryCellIds + size);
191 m_noBcCells = size;
192 assert(m_stgToCellId.size() == static_cast<MUint>(m_noBcCells));
194
195 if(!cutOff) {
196 // append ghost cells and create map from stgGhost to stgBndry
198 m_stgGhost2stgBndry.assign(2 * m_noBcCells, -1);
199 m_stgToCellId.resize(2 * m_noBcCells);
200 for(MInt id = 0; id < m_noBcCells; ++id) {
201 const MInt cellId = m_stgToCellId[id];
202 const MInt bndryId = solver->a_bndryId(cellId);
203
204 if(bndryId < 0) {
205 TERMM(1, "errror");
206 }
207
208 const MInt ghostCellId = m_solver->m_bndryCells->a[bndryId].m_srfcVariables[0]->m_ghostCellId;
209 m_stgToCellId[m_noBcCells + id] = ghostCellId;
212 }
213 }
214
215 m_stgSize = m_stgToCellId.size();
216
217 mAlloc(m_nghbrMapping, m_noBcCells, m_solver->m_cells.noRecNghbrs(), "m_nghbrMapping_", AT_);
218
219 if(!cutOff) {
220 // Check if cellId has been already assigned a stgId
221 using MyMap = std::map<MInt, MInt>;
222 MyMap cellIdToStgId_map;
223 auto cellIdToStgId = [this, &cellIdToStgId_map](MInt cellId) -> MInt {
224 std::pair<MyMap::const_iterator, MBool> res = cellIdToStgId_map.insert(MyMap::value_type(cellId, m_stgSize));
225 if(res.second) ++m_stgSize;
226 return res.first->second;
227 };
228 for(MInt i = 0; i < m_stgSize; ++i) {
229 cellIdToStgId_map.insert(MyMap::value_type(m_stgToCellId[i], i));
230 }
231
232 for(MInt i = 0; i < m_noBcCells; ++i) {
233 for(MInt nghbr = 0; nghbr < m_solver->a_noReconstructionNeighbors(m_stgToCellId[i]); ++nghbr) {
234 const MInt nghbrId = m_solver->a_reconstructionNeighborId(m_stgToCellId[i], nghbr);
235 const MInt stgId = cellIdToStgId(nghbrId);
236
237 m_nghbrMapping[i][nghbr] = stgId;
238
239 if(m_stgToCellId.size() <= static_cast<MUint>(stgId)) {
240 /* if(!cutOff){ */
241 if(m_solver->a_isBndryGhostCell(nghbrId)) {
242 std::cout << "NOO My rank " << myrank << "; " << nghbrId << "; " << stgId << "; "
243 << solver->a_coordinate(nghbrId, 0) << "|" << solver->a_coordinate(nghbrId, 1) << "|"
244 << solver->a_coordinate(nghbrId, 2) << std::endl;
245 TERMM(1, "ERROR");
246 }
247 /* } */
248 m_stgToCellId.push_back(nghbrId);
249 }
250 }
251 }
252
253 if(m_stgToCellId.size() != static_cast<MUint>(m_stgSize)) {
254 TERMM(1, "ERROR");
255 }
256
257 /* if(!cutOff) */ m_stgGhost2stgBndry.resize(m_stgSize, -1);
258 }
259
260 // Create list of stg Ids of only the boundary cells, which are stored at the beginning for unstructured case
261 m_bcStgId.resize(m_noBcCells);
262 std::iota(std::begin(m_bcStgId), std::end(m_bcStgId), 0); // Fill with 0, 1, ..., m_noBcCells-1
263
264 // Create list of stg Ids of all cells
265 m_stgId.resize(m_stgSize);
266 std::iota(std::begin(m_stgId), std::end(m_stgId), 0); // Fill with 0, 1, ..., m_stgSize-1
267
268
269 // Print summary
270 /* #ifndef NDEBUG */
271 /* std::cout << "--- STG INFO ---" << std::endl */
272 /* << myrank << "# bcCells/stgCells: " << m_noBcCells << " / " << m_stgSize << std::endl; */
273 /* #endif */
274 } // Constructor ends
275
276 // Member functions
277 MInt getNghbrMapping(MInt stgId, MInt nghbr) const { return m_nghbrMapping[stgId][nghbr]; }
278 // TODO labels:FV Assumption that only one ghost cell per boundary cell!!!
280 const MInt cellId = m_stgToCellId[stgId];
281 const MInt bndryId = m_solver->a_bndryId(cellId);
282 // TODO labels:FV,totest check if it is a boundary cell
283 return m_solver->m_bndryCells->a[bndryId].m_srfcVariables[0]->m_ghostCellId;
284 }
285
287 nDim_citerator iterateSlopes() { return {m_bcStgId.begin(), this}; }
289
290 nDim_citerator iterateB1() { return {m_bcStgId.begin(), this}; }
292
293 nDim_citerator iterateAll() { return {m_stgId.begin(), this}; }
295
297 MInt getCellId(const nDim_citerator& it_) const { return m_stgToCellId[*it_]; }
298 MInt getStgId(const nDim_citerator& it_) const { return *it_; }
299 MInt getNghbr(const nDim_citerator& it_, MInt dir) const {
300 const MInt cellId = m_stgToCellId[*it_];
301 if(m_solver->a_hasNeighbor(cellId, dir) > 0) {
302 return m_solver->c_neighborId(cellId, dir);
303 } else {
304 return -1;
305 }
306 }
307 // MInt getNghbrStg(const nDim_citerator& it_, MInt dir) const //not implemented yet
308
309 MFloat a_coordinate(MInt cellId, MInt dim) const override { return m_solver->a_coordinate(cellId, dim); }
310 MInt domainId() const { return m_solver->domainId(); }
311 MInt domainId(MInt, MInt) const override { TERMM(-1, "Invalid call!"); }
312 MFloat& a_pvariable(MInt cellId, MInt varId) override { return m_solver->a_pvariable(cellId, varId); }
313 MFloat& UInfinity() const override { return m_solver->m_UInfinity; }
314 MFloat& PInfinity() const override { return m_solver->m_PInfinity; }
315 MFloat& rhoInfinity() const override { return m_solver->m_rhoInfinity; }
316
317 private:
318 MInt** m_nghbrMapping = nullptr;
319 // evtl. introduce this also in the unstructured accesor
320 // maybe also introduce a iterator to iterate over all stgGhost cells
321 public: // TODO labels:FV make it private later
322 std::vector<MInt> m_stgBndry2stgGhost;
323 std::vector<MInt> m_stgGhost2stgBndry;
324};
325
326// Helper class to access cellIds & stg cellIds from i,j,k indices
327// 3D: x: m_nCells[2]; m_start[0]
328// y: m_nCells[1]; m_start[1]
329// z: m_nCells[0]; m_start[2]
330// 2D: x: m_nCells[1]; m_start[0]
331// y: m_nCells[0]; m_start[1]
332template <class SolverType>
333class AccessorStructured : public Accessor<AccessorStructured<SolverType>, SolverType> {
334 public:
342
343 // static const SolverType solvertyp = MAIA_STRUCTURED;
344
345 // Constructor
346 // Check later if it is a good idea to pass a pointer to this function; what if the memory to which the pointer
347 // points to, get deleted somewhere else!
348 explicit AccessorStructured(const MInt* const start, const MInt* const end, const MInt* const nCells,
349 const MInt* const stgBoxSize, MInt noGhostLayers, FvStructuredSolver<3>* solver,
350 const MPI_Comm commStg)
351 : Accessor<AccessorStructured<SolverType>, SolverType>(solver, commStg),
352 m_start(start),
353 m_end(end),
354 m_nCells(nCells),
355 m_stgBoxSize(stgBoxSize),
356 m_noGhostLayers(noGhostLayers) {
357 /* if (m_nDim==2) {
358 m_nCells[2] = m_nCells[1];
359 m_nCells[1] = m_nCells[0];
360 m_nCells[0] = 1;
361 }*/
362
363 m_stgSize = 1;
364 for(MInt d = 0; d < m_nDim; ++d) {
366 }
367
368 m_noBcCells = 1;
369 for(MInt d = 0; d < m_nDim; d++) {
370 const MInt nDims = m_end[d] - m_start[d];
371 m_noBcCells *= nDims;
372 }
373
374 m_stgToCellId.resize(m_stgSize);
375 MInt cnt = 0;
376 for(MInt k = 0; k < m_stgBoxSize[2]; ++k) {
377 for(MInt j = 0; j < m_stgBoxSize[1]; ++j) {
378 for(MInt i = 0; i < m_stgBoxSize[0]; ++i) {
379 m_stgToCellId[cnt++] = cellIndex(i, j, k);
380 }
381 }
382 }
383
384 m_bcStgId.resize(m_noBcCells);
385 cnt = 0;
386 for(MInt k = m_start[2]; k < m_end[2]; ++k) {
387 for(MInt j = m_start[1]; j < m_end[1]; ++j) {
388 for(MInt i = m_start[0]; i < m_end[0]; ++i) {
389 m_bcStgId[cnt++] = cellIndexBC(i, j, k);
390 }
391 }
392 }
393
394 m_stgId.resize(m_stgSize);
395 std::iota(std::begin(m_stgId), std::end(m_stgId), 0); // Fill with 0, 1, ..., m_stgSize-1
396 } // Constructor ends
397
398
399 MInt cellIndex(const MInt i, const MInt j, const MInt k) const { return i + (j + k * m_nCells[1]) * m_nCells[2]; }
400 MInt cellIndex(const MInt* const ijk_) const { return ijk_[0] + (ijk_[1] + ijk_[2] * m_nCells[1]) * m_nCells[2]; }
401 MInt cellIndexBC(const MInt i, const MInt j, const MInt k) const {
402 return i + (j + k * m_stgBoxSize[1]) * m_stgBoxSize[2];
403 }
404 MInt cellIndexBC(const MInt* const ijk_) const {
405 return ijk_[0] + (ijk_[1] + ijk_[2] * m_stgBoxSize[1]) * m_stgBoxSize[2];
406 }
407 MInt start(MInt dim) const { return m_start[dim]; }
408 MInt end(MInt dim) const { return m_end[dim]; }
409
410 MInt domainId() const { return m_solver->domainId(); }
411 MInt domainId(MInt, MInt) const override { TERMM(-1, "Invalid call!"); }
412
416 public:
418 using iterator_category = std::random_access_iterator_tag;
421 using pointer = MInt*;
422 using reference = MInt&;
423
424 // Constructors (default one for creation of invalid object)
425 nDim_citerator() = default;
426 nDim_citerator(const AccessorStructured* parent, const /*pointer*/ MInt* start, const /*pointer*/ MInt* end)
427 : p(parent), ijk_start(start), ijk_end(end) {
428 // std::copy_n(&start[0], m_nDim, &ijk_start[0]);
429 // std::copy_n(&end[0], m_nDim, &ijk_end[0]);
430 std::copy_n(&start[0], m_nDim, &ijk[0]);
431 stgId = p->cellIndexBC(ijk[0], ijk[1], ijk[2]);
432 }
433
434 // public member functions
435 // deprecated, since the new convention is to forward the call to the Accessor, i.e., instead of
436 // 'it.getCellId()', use 'a->getCellId(it)', when 'it' is the iterator and 'a' is a pointer to the Accessor
437 value_type getCellId() const { return p->cellIndex(ijk); }
438 value_type getStgId() const { return stgId; }
439 value_type getijk(MInt dim) const { return ijk[dim]; }
440 pointer getijk() const { return ijk; }
441 // TODO labels:FV implement more efficient specializations of this function
443 return p->cellIndex(ijk[0] + map_[dir][0], ijk[1] + map_[dir][1], ijk[2] + map_[dir][2]);
444 }
446 return p->cellIndexBC(ijk[0] + map_[dir][0], ijk[1] + map_[dir][1], ijk[2] + map_[dir][2]);
447 }
448
449 // pre increment operator, i.e. return current value and move to next pos; called for i++
450 /*value_type*/ self_type operator++(MInt) {
451 self_type temp = *this;
452 if(ijk[0] < ijk_end[0]) {
453 stgId++;
454 ijk[0]++;
455 return temp;
456 }
457
458 if(ijk[1] < ijk_end[1]) {
459 ijk[0] = ijk_start[0];
460 ijk[1]++;
461 stgId = p->cellIndexBC(ijk[0], ijk[1], ijk[2]);
462 return temp;
463 }
464
465 if(ijk[2] < ijk_end[2]) {
466 ijk[0] = ijk_start[0];
467 ijk[1] = ijk_start[1];
468 ijk[2]++;
469 stgId = p->cellIndexBC(ijk[0], ijk[1], ijk[2]);
470 return temp;
471 }
472 if(flag_last) {
474 }
475 flag_last = true;
476 return temp;
477 }
478
479 // called for ++i
480 /*value_type&*/ self_type& operator++() {
481 if(ijk[0] < ijk_end[0]) {
482 ijk[0]++;
483 ++stgId;
484 return *this;
485 }
486
487 if(ijk[1] < ijk_end[1]) {
488 ijk[0] = ijk_start[0];
489 ijk[1]++;
490 stgId = p->cellIndexBC(ijk[0], ijk[1], ijk[2]);
491 return *this;
492 }
493
494 if(ijk[2] < ijk_end[2]) {
495 ijk[0] = ijk_start[0];
496 ijk[1] = ijk_start[1];
497 ijk[2]++;
498 stgId = p->cellIndexBC(ijk[0], ijk[1], ijk[2]);
499 return *this;
500 }
501 return const_cast<AccessorStructured*>(p)->nDim_citerator_invalid;
502 }
503
504 const MInt& operator*() { return stgId; } // emulate as if it was a iterator
505 const MInt& operator*() const { return stgId; } // emulate as if it was a iterator
506 const self_type* operator->() { return this; } // emulate as if it was a iterator
507 MBool operator==(const self_type& rhs) const {
508 return (stgId == *rhs && getijk(0) == rhs.getijk(0) && getijk(1) == rhs.getijk(1) && getijk(2) == rhs.getijk(2));
509 }
510 MBool operator!=(const self_type& rhs) const { return !(*this == rhs); }
511
512 private:
513 // to access member function of enclosing object
515 // current stgId
517 // current ijk indices
519 // helper indices
520 const MInt* const ijk_start = nullptr;
521 const MInt* const ijk_end = nullptr;
523 };
524
525 // Helper functions for structured nDim_citerator
526 nDim_citerator nDim_citerator_begin(const MInt* start, const MInt* end) const { return {this, start, end}; }
529 // Helper functions for structured nDim_citerator ends
530
533 MInt ii = 1;
534 MInt start_[3] = {ii, m_start[1] + 1, m_start[2] + 1};
535 MInt end_[3] = {ii + 1, m_end[1] - 1, m_end[2] - 1};
536 return {this, &start_[0], &end_[0]};
537 }
538
540
542 MInt ii = m_noGhostLayers - 1;
543 MInt start_[3] = {ii, m_start[1], m_start[2]};
544 MInt end_[3] = {ii + 1, m_end[1], m_end[2]};
545 return {this, &start_[0], &end_[0]};
546 }
547
549
551 MInt start_[3] = {0, 0, 0};
552 return {this, &start_[0], m_stgBoxSize};
553 }
554
556
558 MInt getCellId(const nDim_citerator& it_) const {
559 assert(m_stgToCellId[*it_] == cellIndex(it_.getijk()));
560 return m_stgToCellId[*it_];
561 }
562 MInt getStgId(const nDim_citerator& it_) const { return *it_; }
563 // TODO labels:FV implement more efficient specializations of this function, by exploiting the fact, that neigbors
564 // in some directions differ from the current id by some fixed offset
565 MInt getNghbr(const nDim_citerator& it_, MInt dir) const {
566 return cellIndex(it_.getijk(0) + map_[dir][0], it_.getijk(1) + map_[dir][1], it_.getijk(2) + map_[dir][2]);
567 }
568 MInt getNghbrStg(const nDim_citerator& it_, MInt dir) const {
569 return cellIndexBC(it_.getijk(0) + map_[dir][0], it_.getijk(1) + map_[dir][1], it_.getijk(2) + map_[dir][2]);
570 }
571
572 MFloat a_coordinate(MInt cellId, MInt dim) const override { return m_solver->m_cells->coordinates[dim][cellId]; }
573 MFloat& a_pvariable(MInt cellId, MInt varId) override { return m_solver->m_cells->pvariables[varId][cellId]; }
574 MFloat& UInfinity() const override { return m_solver->PV->UInfinity; }
575 MFloat& PInfinity() const override { return m_solver->PV->PInfinity; }
576 MFloat& rhoInfinity() const override { return m_solver->CV->rhoInfinity; }
577
578 private:
579 // starting indices of respective boundary
580 const MInt* const m_start; //[3] = {0,0,0};
581 // ending indices of respective boundary
582 const MInt* const m_end; //[3] = {1,1,1};
583 // # cells in each dimension
584 const MInt* const m_nCells; //[3];
585 // # stg cells in each dimension
586 const MInt* const m_stgBoxSize; //[3];
588 // Invalid iterator
590 //
591 constexpr static const MInt map_[6][3] = {{-1, 0, 0}, {1, 0, 0}, {0, -1, 0}, {0, 1, 0}, {0, 0, -1}, {0, 0, 1}};
592};
594
595// FIXME labels:FV
596template <MInt nDim, SolverType SolverTypeR, SolverType SolverTypeL>
597class MSTG;
598template <MInt nDim, SolverType SolverTypeR, SolverType SolverTypeL>
601
602// 1) In the constructor initialize things related to LES solver:
603// Set communicators;
604// Initialize iterator of LES solver;
605// Determine size of m_stgVariables (it must include space for gradient determination)
606// and allocate m_stgVariables;
607// template paramters are the solver type of the RANS and LES runs
608template <MInt nDim, SolverType SolverTypeR, SolverType SolverTypeL>
609class MSTG {
610 public:
612
614
616 friend void saveStg<nDim, SolverTypeR, SolverTypeL>(std::map<MInt, self*>, SolverTypeL_*);
617
618 friend class FvBndryCndXD<nDim, FvSysEqnNS<nDim>>;
619
621 MInt bcId,
622 const MPI_Comm commStg,
623 MInt* sortedBndryCellIds,
624 MInt noStgLCells,
625 MInt stgFaceNormalDir,
626 MInt stgDir,
627 MInt stgWallNormalDir,
628 MInt wallDir,
629 MBool cutOff);
630
631 void bc7909();
632 void init(MInt commStgRoot);
633
634 protected:
635 void saveStg(); // for adaptation
636 void loadStg();
637
638 private:
639 // Methods
643 void advanceEddies();
644 void printSTGSummary();
646 void calcEddieCoverage();
648 void setVb(MFloat*, MFloat*);
649
650 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_STRUCTURED, _*> = nullptr>
652 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
654 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_STRUCTURED, _*> = nullptr>
655 void apply();
656 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
657 void apply();
658 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_STRUCTURED, _*> = nullptr>
660 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_STRUCTURED, _*> = nullptr>
662 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
664 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_STRUCTURED, _*> = nullptr>
665 void calcStrainRate();
666 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
668 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_STRUCTURED, _*> = nullptr>
670 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
672 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_STRUCTURED, _*> = nullptr>
674 template <class _ = void, std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
676 template <class _ = void, std::enable_if_t<SolverTypeR == MAIA_STRUCTURED, _*> = nullptr,
677 std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
678 void readRANSProfileStg();
679 template <class _ = void, std::enable_if_t<SolverTypeR == MAIA_FINITE_VOLUME, _*> = nullptr,
680 std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr>
682
684
685 SolverTypeL_* solver() const { return m_solver; }
686
691
692 /* template <class _ = void, std::enable_if_t<SolverTypeR == MAIA_STRUCTURED, _*> = nullptr, */
693 /* std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr> */
694 /* void resetAfterAdaptation(){} */
695 /* template <class _ = void, std::enable_if_t<SolverTypeR == MAIA_FINITE_VOLUME, _*> = nullptr, */
696 /* std::enable_if_t<SolverTypeL == MAIA_FINITE_VOLUME, _*> = nullptr> */
697 /* void resetAfterAdaptation(); */
698
699 // Accessor*;
700 static constexpr const MInt m_nDim = nDim; // 3;
703 const MInt m_bcId; // e.g. 7909, 7910, ...
704
705 // ToDo: generalize this to other RANS models
707
710
712
714 MFloat* m_inflowEnd = nullptr;
715
717
718 MPI_Comm m_commStg;
719 MInt m_commStgRoot; // Check the purpose of this; by now just set to 0
728 MFloat** m_stgEddies = nullptr;
730 MInt m_stgMaxNoEddies; // to scale length of eddies
733 MInt m_noStgLCells; //# LES cells
749 MFloat* m_stgVbEnd = nullptr;
752 MFloat* m_stgMaxVel = nullptr;
754
755 // JANNIK
762
769 std::vector<MInt>* m_stgPeriodicCellId = nullptr;
770 std::vector<MFloat> m_stgWallNormalLocations;
775
777
779
780 // Limiters
781 const MFloat epss = 1e-34;
782 const MFloat eps = 1e-16;
783 const MFloat epsl = 1e-13;
784
785 // Manual parameters
786 const MFloat c_mu = 0.09;
787 const MFloat a1 = 1 / sqrt(c_mu);
788
789 const MFloat timsm = 0.3; // Smoothing factor for new eddies (in time)
790 const MFloat aniso = 1.0; // Anisotropic, clustered eddies
791
794
795 public:
796 struct STG;
797
798 // random number for eddy position
799 std::mt19937_64 m_PRNGEddy;
801 inline std::mt19937_64& randomEddyPosGenerator() { return m_PRNGEddy; }
802};
803
804
805template <MInt nDim, SolverType SolverTypeR, SolverType SolverTypeL>
806struct MSTG<nDim, SolverTypeR, SolverTypeL>::STG {
807 static constexpr const MInt AVG_U = 0;
808 static constexpr const MInt AVG_V = 1;
809 static constexpr const MInt AVG_W = 2;
810 static constexpr const MInt AVG_UU[3] = {0, 1, 2};
811 static constexpr const MInt AVG_RHO = nDim;
812 static constexpr const MInt AVG_P = nDim + 1;
813 static constexpr const MInt FLUC_U = nDim + 2;
814 static constexpr const MInt FLUC_V = nDim + 3;
815 static constexpr const MInt FLUC_W = nDim + 4;
816 static constexpr const MInt NU_T = nDim + 5;
817 static constexpr const MInt SIJSIJ = nDim + 6;
818 static constexpr const MInt LENGTH_SCALE = nDim + 7;
819 static constexpr const MInt FLUC_UU = nDim + 8;
820 static constexpr const MInt FLUC_UV = nDim + 9;
821 static constexpr const MInt FLUC_UW = nDim + 10;
822 static constexpr const MInt FLUC_VV = nDim + 11;
823 static constexpr const MInt FLUC_VW = nDim + 12;
824 static constexpr const MInt FLUC_WW = nDim + 13;
825 static constexpr const MInt LENGTH_X = nDim + 14;
826 static constexpr const MInt LENGTH_Y = nDim + 15;
827 static constexpr const MInt LENGTH_Z = nDim + 16;
828 static constexpr const MInt S11 = nDim + 17;
829 static constexpr const MInt S22 = nDim + 18;
830 static constexpr const MInt S33 = nDim + 19;
831 static constexpr const MInt S12 = nDim + 20;
832 static constexpr const MInt S23 = nDim + 21;
833 static constexpr const MInt S13 = nDim + 22;
834 static constexpr const MInt noStgVars = nDim + 23;
835};
836
837
838template <MInt nDim, SolverType SolverTypeR, SolverType SolverTypeL>
840
841
842// Standalone function to interpolate solution from structured mesh to unstructured, e.g., as an IC
843// The primitive variables from the structured grid will be interpolated onto the unstructured mesh.
844// Evtl. call computeConservative variables() afterwards.
845// TODO: make this a member function of FvCartesianSolver
846template <MInt nDim, class SysEqn>
847void readRANSProfile(FvCartesianSolverXD<nDim, SysEqn>* solver) {
848 TRACE();
849
850 constexpr const MInt noVars = nDim + 2;
851 MFloat zCoord = 0;
852 if(nDim == 2) {
864 zCoord = Context::getSolverProperty<MFloat>("zCoordFor2DInterpolation", solver->solverId(), AT_, &zCoord);
865 }
866
867 // Coordinates need to be reorderd
868 // ScratchSpace<MFloat> coords(nDim, solver->a_noCells(), AT_, "coords" );
869 std::vector<std::vector<MFloat>> coords(3);
870 for(MInt d = 0; d < 3; ++d) {
871 coords[d].resize(solver->a_noCells());
872 }
873
874 for(MInt cellId = 0; cellId < solver->a_noCells(); ++cellId) {
875 for(MInt d = 0; d < nDim; ++d) {
876 coords[d][cellId] = solver->a_coordinate(cellId, d);
877 }
878 }
879 if(nDim == 2) {
880 for(MInt cellId = 0; cellId < solver->a_noCells(); ++cellId) {
881 coords[2][cellId] = zCoord;
882 }
883 }
884
885 // hack, since prepareInterpolation takes C-type pointer to pointer as input argument
886 MInt c = 0;
887 std::vector<MFloat*> coords_ptr(nDim);
888 for(auto& c_ptr : coords) {
889 coords_ptr[c++] = c_ptr.data();
890 }
891
892 auto* structuredInterpolation = new StructuredInterpolation<3>(solver->mpiComm());
893 MInt temp[] = {solver->a_noCells(), 1, 1};
894 structuredInterpolation->prepareInterpolationField(&temp[0], coords_ptr.data());
895
896 // pvariableNames saves the conversion between the naming of the primitive variables of the FvCartesianSolver and
897 // the structured solver
898 std::array<MString, /*solver->PV->noVariables*/ noVars> pvariableNames;
899 pvariableNames[solver->PV->U] = "u";
900 pvariableNames[solver->PV->V] = "v";
901 if(nDim == 3) {
902 pvariableNames[solver->PV->W] = "w";
903 }
904 pvariableNames[solver->PV->P] = "p";
905 pvariableNames[solver->PV->RHO] = "rho";
906 // ScratchSpace<MFloat> vars(nDim+2, solver->a_noCells(), AT_, "vars" );
907 /* std::vector<std::vector<MFloat>> vars(nDim+2);
908 for(MInt var=0; var<solver->PV->noVariables; var++) {
909 vars[var].resize(solver->a_noCells());
910 structuredInterpolation->interpolateField(pvariableNames[var], &vars[var][0]);
911 }
912
913 for (MInt cellId = 0; cellId < solver->a_noCells(); ++cellId) {
914 for(MInt var=0; var<solver->PV->noVariables; var++) {
915 solver->a_pvariable(cellId, var) = vars[var][cellId];
916 }
917 }*/
918 std::vector<MFloat> vars(solver->a_noCells());
919 for(MInt var = 0; var < noVars /*solver->PV->noVariables*/; var++) {
920 structuredInterpolation->interpolateField(pvariableNames[var], &vars[0]);
921 for(MInt cellId = 0; cellId < solver->a_noCells(); ++cellId) {
922 solver->a_pvariable(cellId, var) = vars[cellId];
923 }
924 }
925 m_log << " ... FINISHED!";
926}
927
929// IO
931/* Assumption in the IO is that each boundary cell has its ghost cell included as a reconstrucion
932 * neighbor and thus this ghost cell appears in the stg list
933 */
934
935/*
936 - The saving order of the stg cells is as follows:
937 1) all non-ghost cells sorted by global id
938 2) all ghost cells sorted by the global id of the respective boudnary cell
939 - Save optionally the coordinates and check optionally the coordinates when loading
940 the data for consistency
941*/
942template <MInt nDim, SolverType SolverTypeR, SolverType SolverTypeL>
945 /* void saveStg(std::map<MInt, MSTG<nDim, SolverTypeR, SolverTypeL>*> stgBC, SolverTypeL* solver) { */
946 TRACE();
947
948 // All ranks of the solver needs to enter this function
949
950 // using SolverType = typename SolverTraits<nDim, MAIA_FINITE_VOLUME>::SolverType;
952 using myMap = std::map<MInt, myStg*>;
953 const MInt noVars = myStg::STG::noStgVars;
954
966 MInt stgIOCoordinates = 0;
967 stgIOCoordinates = Context::getSolverProperty<MInt>("stgIOCoordinates", solver->solverId(), AT_, &stgIOCoordinates);
968
969 using namespace maia::parallel_io;
970 std::stringstream filename;
971 filename << solver->outputDir() << "stgRestart_" << globalTimeStep << ParallelIo::fileExt();
972
973 m_log << "Writing restart file " << filename.str() << " ..." << std::endl;
974
975 // The stg boundaries in stgBC are sorted by bcId, but we want to have them sorted by bndryCndId
976 myMap stgBC_sorted;
977 for(typename myMap::const_iterator it = stgBC.begin(); it != stgBC.end(); ++it) {
978 stgBC_sorted.insert(typename myMap::value_type(it->second->m_bcId, it->second));
979 }
980
981 // Find unique list of stgBndryCnds among all ranks
982 MInt noStgBCs = stgBC_sorted.size();
983 std::vector<MInt> stgBCIds;
984 for(typename myMap::const_iterator it = stgBC_sorted.begin(); it != stgBC_sorted.end(); ++it) {
985 stgBCIds.push_back(it->first);
986 }
987
988 MPI_Allreduce(MPI_IN_PLACE, &noStgBCs, 1, MPI_INT, MPI_MAX, solver->mpiComm(), AT_, "MPI_IN_PLACE", "noStgBCs");
989 std::vector<MInt> stgBCIds_global(noStgBCs * solver->noDomains());
990 stgBCIds.resize(noStgBCs, -1);
991 MPI_Allgather(stgBCIds.data(), noStgBCs, MPI_INT, stgBCIds_global.data(), noStgBCs, MPI_INT, solver->mpiComm(), AT_,
992 "stgBCIds", "stgBCIds_global");
993 std::sort(stgBCIds_global.begin(), stgBCIds_global.end());
994 stgBCIds_global.erase(std::unique(stgBCIds_global.begin(), stgBCIds_global.end()), stgBCIds_global.end());
995 auto itm1 = std::find(stgBCIds_global.begin(), stgBCIds_global.end(), -1);
996 if(itm1 != stgBCIds_global.end()) stgBCIds_global.erase(itm1);
997
998 // if(stgBCIds_global.size() > 0){
999 // std::cout << m_solver->m_solverId << " " << solver->domainId() << " " << noStgBCs << " " << stgBCIds_global.size()
1000 // << std::endl;
1001
1002 // MPI_Barrier(solver->mpiComm(), AT_);
1003 //}
1004
1005 {
1006 ParallelIo parallelIo((filename.str()).c_str(), PIO_REPLACE, solver->mpiComm());
1007 parallelIo.setAttribute(static_cast<MInt>(stgBCIds_global.size()), "noStgBCs");
1008 parallelIo.setAttributes(&stgBCIds_global[0], "stgBCIds", stgBCIds_global.size());
1009 }
1010
1011 // Note: It seems that it is not possible to simultanously write to the same netcdf file
1012 // -> therefore ensure that all ranks involved in writing 7909 info have finished,
1013 // before writing 7910 info;
1014 for(auto stgBCId : stgBCIds_global) {
1015 auto it = stgBC_sorted.find(stgBCId);
1016 if(it != stgBC_sorted.end()) {
1017 // Iterate over all stg boundaries
1018 // for (typename myMap::const_iterator it = stgBC_sorted.begin(); it != stgBC_sorted.end(); ++it) {
1019 const MInt bcId = it->first;
1020 myStg* s = it->second;
1021 assert(bcId == s->m_bcId);
1022 if(bcId != s->m_bcId) // TODO labels:FV doppelt
1023 TERMM(1, "");
1024
1025 std::stringstream stgPrefix_;
1026 stgPrefix_ << "stgVar" << s->m_bcId << "_";
1027 MString stgPrefix = stgPrefix_.str();
1028
1029 if(s->m_stgLocal) {
1030 ParallelIo parallelIo((filename.str()).c_str(), PIO_APPEND, s->m_commStg);
1031 // WRITE TO FILE
1032 parallelIo.setAttributes(&(s->m_stgMaxNoEddies), (stgPrefix + "stgMaxNoEddies").c_str(), 1);
1033 parallelIo.defineArray(PIO_FLOAT, (stgPrefix + "FQeddies").c_str(),
1034 s->m_stgMaxNoEddies * s->m_stgNoEddieProperties);
1035 parallelIo.setAttribute("FQeddies", "name", (stgPrefix + "FQeddies").c_str());
1036 parallelIo.setOffset(s->m_stgMaxNoEddies * s->m_stgNoEddieProperties, 0);
1037 parallelIo.writeArray(&(s->m_stgEddies[0][0]), (stgPrefix + "FQeddies").c_str());
1038 }
1039
1040 /* stgIOCoordinates=0: Save internal cells and ghost cells seperately; Output is globally sorted
1041 * by globalId; globalId of ghost cell is that of its internal cell;
1042 * coordinates are not written to output
1043 * stgIOCoordinates=1: Same as stgIOCoordinates=0, but the coordinates of the internal and ghost
1044 * cells are also written to output, so that at reading the consistency can
1045 * be double checked;
1046 * stgIOCoordinates=2: Coordinates are also written to output and are used at loading to put
1047 * the data at the right place
1048 */
1049
1050 if(stgIOCoordinates == 0 || stgIOCoordinates == 1) {
1051 // All ranks need to enter this if-solver
1052
1053 /*
1054 * if (m_stgLocal): Loop over all stg cells and save stgGlobalIds & stgGlobalIdsGhost in ordered form
1055 * Determine all window cells & halo cells
1056 * Determine the position of all determined window cells in m_windowCells and send it by MPI_Alltoall
1057 * Determine the position of all determined halo cells in m_haloCells and check if those halo cells are already
1058 * existing as window cell Send the remaining list by MPI_Alltoall; each domain now knows what data it will
1059 * receive in the next step Send the data to the respective domains
1060 */
1061
1062 std::vector<std::pair<MInt /*globalId*/, MInt /*stgId*/>> stgGlobalIds;
1063 std::vector<std::pair<MInt /*globalId*/, MInt /*stgId*/>> stgGlobalIdsGhost;
1064 std::map<MInt /*cellId*/, MInt /*globalId*/> stgWindow;
1065 std::map<MInt /*globalId*/, MInt /*stgId*/> stgHalo;
1066 if(s->m_stgLocal) {
1067 for(typename myStg::Accessor::nDim_citerator it_ = s->a->iterateAll();
1068 it_ != s->a->iterateAll_nDim_citerator_end();
1069 ++it_) {
1070 const MInt cellId = s->a->getCellId(it_);
1071 const MInt stgId = s->a->getStgId(it_);
1072 if(solver->a_isHalo(cellId)) {
1073 const MInt globalId = solver->c_globalId(cellId);
1074 stgHalo.insert(std::make_pair(globalId, stgId));
1075 } else if(solver->a_isWindow(cellId)) {
1076 const MInt globalId = solver->c_globalId(cellId);
1077 stgWindow.insert(std::make_pair(cellId, globalId));
1078 }
1079 if(!solver->a_isBndryGhostCell(cellId)) {
1080 const MInt globalId = solver->c_globalId(cellId);
1081 stgGlobalIds.push_back(std::make_pair(globalId, stgId));
1082 } else /*if (solver->a_isBndryGhostCell(cellId))*/ {
1083 // GlobalId of ghost cell is that of its internal cell
1084 const MInt stgIdInternal = s->a->m_stgGhost2stgBndry[stgId];
1085 const MInt globalId = solver->c_globalId(s->a->m_stgToCellId[stgIdInternal]);
1086 stgGlobalIdsGhost.push_back(std::make_pair(globalId, stgId));
1087 }
1088 }
1089
1090 // Sort the lists by the globalId
1091 if(std::is_sorted(stgGlobalIds.begin(), stgGlobalIds.end()))
1092 m_log << "Non-ghost stg cells already sorted after globalId" << std::endl;
1093 else {
1094 std::sort(stgGlobalIds.begin(), stgGlobalIds.end(),
1095 [](const std::pair<MInt, MInt>& a, const std::pair<MInt, MInt>& b) { return a.first < b.first; });
1096 }
1097 if(std::is_sorted(stgGlobalIdsGhost.begin(), stgGlobalIdsGhost.end()))
1098 m_log << "Ghost stg cells already sorted after globalId" << std::endl;
1099 else {
1100 std::sort(stgGlobalIdsGhost.begin(), stgGlobalIdsGhost.end(),
1101 [](const std::pair<MInt, MInt>& a, const std::pair<MInt, MInt>& b) { return a.first < b.first; });
1102 }
1103 } // if(s->m_stgLocal)
1104
1105 // At this point all ranks should have the following data:
1106 // - stgGlobalIds --> each entry is (globalId, stgId)
1107 // - stgGlobalIdsGhost --> each entry is (globalId_of_internal_cell, stgId)
1108 // - stgWindow --> each entry is map(cellId, globalId)
1109 // - stgHalo --> each entry is map(globalId, stgId)
1110
1111 // Auxiliary variables needed for communication
1112 const MInt noNeighborDomains = solver->noNeighborDomains();
1113 std::vector<MInt> snghbrs(noNeighborDomains);
1114 for(MInt d = 0; d < noNeighborDomains; ++d) {
1115 snghbrs[d] = solver->neighborDomain(d);
1116 }
1117
1118 // Determine the corresponding halo domains of the window cells
1119 std::vector<MInt> sendcounts(noNeighborDomains);
1120 std::vector<MInt> stgWindowGlobalId(stgWindow.size());
1121 MInt cnt = 0;
1122 for(MInt domain = 0; domain < noNeighborDomains; ++domain) {
1123 for(MInt window = 0; window < solver->noWindowCells(domain); ++window) {
1124 auto it_ = stgWindow.find(solver->grid().windowCell(domain, window));
1125 if(it_ != stgWindow.end()) {
1126 stgWindowGlobalId[cnt++] = it_->second;
1127 ++sendcounts[domain];
1128 }
1129 }
1130 }
1131 if(static_cast<MUint>(cnt) != stgWindow.size()) TERMM(-1, "Scheisse Alter!!!");
1132
1133 // Communicate the window cells to the corresponding halo domains
1134 std::vector<MInt> globalIdsOfNghbrWindowCells =
1135 maia::mpi::mpiExchangePointToPoint(&stgWindowGlobalId[0], //<--Input
1136 &snghbrs[0], //<--Input
1137 noNeighborDomains, //<--Input
1138 &sendcounts[0], //<--Input
1139 &snghbrs[0], //<--Input
1140 noNeighborDomains, //<--Input
1141 solver->mpiComm(), //<--Input
1142 solver->domainId(), //<--Input
1143 1); //<--Input
1144
1145 // Erase halo cells in stgHalo map, which already exists as window cell on a different stg domain
1146 for(auto globalId : globalIdsOfNghbrWindowCells) {
1147 auto it_ = stgHalo.find(globalId);
1148 if(it_ != stgHalo.end()) {
1149 stgHalo.erase(it_);
1150 }
1151 }
1152
1153 // Determine the corresponding window domains of the remaining halo cells; since the key of the stgHalo map
1154 // is globalId, the map should be already sorted by the globalId; store sorted globalIds in vector, such that
1155 // it can be sent by MPI
1156 if(!std::is_sorted(stgHalo.begin(), stgHalo.end(),
1157 [](const std::pair<MInt, MInt>& a, const std::pair<MInt, MInt>& b) {
1158 return a.first < b.first;
1159 })) // comp argument to is_sorted actually not necessary
1160 {
1161 TERMM(-1, "If you read this message you are stupid!");
1162 }
1163 std::vector<MInt> haloGlobalIds(stgHalo.size());
1164 std::fill(sendcounts.begin(), sendcounts.end(), 0);
1165 MInt domain = 0;
1166 cnt = 0;
1167 for(auto& h : stgHalo) {
1168 haloGlobalIds[cnt++] = h.first;
1169 while(!(h.first < solver->domainOffset(domain + 1))) {
1170 ++domain;
1171 }
1172 ++sendcounts[domain]; //<-- this sendcounts is the key to the outer world
1173 }
1174
1175 // Exchange globalIds
1176 std::vector<MInt> stgGlobalIdsWindow_ = maia::mpi::mpiExchangePointToPoint(&haloGlobalIds[0],
1177 &snghbrs[0],
1178 noNeighborDomains,
1179 &sendcounts[0],
1180 &snghbrs[0],
1181 noNeighborDomains,
1182 solver->mpiComm(),
1183 solver->domainId(),
1184 1);
1185
1187 // Exchange m_stgLVariables of halo cells to their corresponding window cells
1189 // Put data to communicate (from halo cells) into send buffer tempStgVarsWS
1190 std::vector<MFloat> tempStgVarsWS(noVars * stgHalo.size());
1191 cnt = 0;
1192 for(auto& globalId_stgId : stgHalo) {
1193 const MInt stgId = globalId_stgId.second;
1194 for(MInt var = 0; var < myStg::STG::noStgVars; ++var) {
1195 tempStgVarsWS[myStg::STG::noStgVars * cnt + var] = s->m_stgLVariables[var][stgId];
1196 }
1197 ++cnt;
1198 }
1199
1200 std::vector<MFloat> tempStgVarsWR = maia::mpi::mpiExchangePointToPoint(&tempStgVarsWS[0],
1201 &snghbrs[0],
1202 noNeighborDomains,
1203 &sendcounts[0],
1204 &snghbrs[0],
1205 noNeighborDomains,
1206 solver->mpiComm(),
1207 solver->domainId(),
1208 noVars);
1209
1210 // Since stgGlobalIdsWindow has been gathered from multiple neighbor domains, they might
1211 // not be sorted by globalId; Using globalId as key in map will automatically sort
1212 std::map<MInt /*globalId*/, MInt /*stgId_inside_tempStgVarsWR*/> stgGlobalIdsWindow;
1213 cnt = 0;
1214 for(auto globalId : stgGlobalIdsWindow_) {
1215 stgGlobalIdsWindow.insert(std::make_pair(globalId, cnt++));
1216 }
1217 std::vector<std::pair<MInt, MInt>> stgGlobalIdsWindowV(stgGlobalIdsWindow.begin(), stgGlobalIdsWindow.end());
1218
1220 // From here on, we will work with the following variables:
1221 // tempStgVarsWR
1222 // stgGlobalIdsWindowV
1223 // stgGlobalIds
1224 // stgGlobalIdsGhost
1226
1227 // Save m_stgLVariables in scratch space ordered by globalId; take into account window cell variables
1228 // which have been sent by other domains
1229 MInt noStgInternalCells = stgGlobalIds.size() + stgGlobalIdsWindowV.size(); //<--
1230 MInt noStgGhostCells = stgGlobalIdsGhost.size(); //<--
1231 std::vector<MFloat> tempStgVars(noStgInternalCells * noVars); //<--
1232 std::vector<MFloat> tempStgGVars(noStgGhostCells * noVars); //<--
1233 std::vector<MInt> posInSTG(stgGlobalIds.size());
1234 std::vector<MInt> posInSTGWindow(stgGlobalIdsWindowV.size());
1235 MInt cnt1 = 0, cnt2 = 0;
1236 // stgGlobalIds & stgGlobalIdsWindowV should be sorted by globalId at this point
1237 for(MInt i = 0; i < noStgInternalCells; ++i) {
1238 if(stgGlobalIds[cnt1] < stgGlobalIdsWindowV[cnt2]) {
1239 posInSTG[cnt1++] = i;
1240 } else {
1241 posInSTGWindow[cnt2++] = i;
1242 }
1243 }
1244 if(static_cast<MUint>(cnt1) != stgGlobalIds.size() || static_cast<MUint>(cnt2) != stgGlobalIdsWindowV.size())
1245 TERMM(-1, "cnt1 != stgGlobalIds.size() or cnt2 != stgGlobalIds.size()");
1246 for(MInt var = 0; var < noVars; ++var) {
1247 for(MUint i = 0; i < posInSTG.size(); ++i) {
1248 const MInt stgId = stgGlobalIds[i].second;
1249 tempStgVars[var * noStgInternalCells + posInSTG[i]] = s->m_stgLVariables[var][stgId];
1250 }
1251 for(MUint i = 0; i < posInSTGWindow.size(); ++i) {
1252 const MInt stgId = stgGlobalIdsWindowV[i].second;
1253 tempStgVars[var * noStgInternalCells + posInSTGWindow[i]] = tempStgVarsWR[var + stgId * noVars];
1254 }
1255 cnt = 0;
1256 for(std::vector<std::pair<MInt, MInt>>::const_iterator it_ = stgGlobalIdsGhost.begin();
1257 it_ != stgGlobalIdsGhost.end();
1258 ++it_) {
1259 const MInt stgId = it_->second;
1260 tempStgGVars[var * noStgInternalCells + cnt] = s->m_stgLVariables[var][stgId];
1261 ++cnt;
1262 }
1263 }
1264
1265 // Write globalIds into a linear array for IO
1266 std::vector<MInt> globalIds(noStgInternalCells); //<--
1267 std::vector<MInt> globalIdsGhost(noStgGhostCells); //<--
1268 for(MUint i = 0; i < posInSTG.size(); ++i) {
1269 globalIds[posInSTG[i]] = stgGlobalIds[i].first;
1270 }
1271 for(MUint i = 0; i < posInSTGWindow.size(); ++i) {
1272 globalIds[posInSTGWindow[i]] = stgGlobalIdsWindowV[i].first;
1273 }
1274 for(MInt i = 0; i < noStgGhostCells; ++i) {
1275 globalIdsGhost[i] = stgGlobalIdsGhost[i].first;
1276 }
1277
1279 // SANITY CHECK
1281 MInt noStgInternalCellsGlobal = 0;
1282 MInt noStgGhostCellsGlobal = 0;
1283 MInt* displ;
1284 MInt* displG;
1285 MInt noDomains = solver->noDomains();
1286 MInt* noStgInternalCellsPerRank = new MInt[noDomains];
1287 MInt* noStgGhostCellsPerRank = new MInt[noDomains];
1288 MPI_Allgather(&noStgInternalCells, 1, MPI_INT, noStgInternalCellsPerRank, 1, MPI_INT, s->m_commStg, AT_,
1289 "noStgInternalCells", "noStgInternalCellsPerRank");
1290 MPI_Allgather(&noStgGhostCells, 1, MPI_INT, noStgGhostCellsPerRank, 1, MPI_INT, s->m_commStg, AT_,
1291 "noStgGhostCells", "noStgGhostCellsPerRank");
1292 // if (s->m_stgMyRank == s->m_commStgRoot) {
1293 displ = new MInt[noDomains];
1294 displG = new MInt[noDomains];
1295 displ[0] = 0;
1296 displG[0] = 0;
1297 for(MInt i = 1; i < noDomains; ++i) {
1298 displ[i] = displ[i - 1] + noStgInternalCellsPerRank[i - 1];
1299 displG[i] = displG[i - 1] + noStgGhostCellsPerRank[i - 1];
1300 }
1301 noStgInternalCellsGlobal = std::accumulate(noStgInternalCellsPerRank, noStgInternalCellsPerRank + noDomains, 0);
1302 noStgGhostCellsGlobal = std::accumulate(noStgGhostCellsPerRank, noStgGhostCellsPerRank + noDomains, 0);
1303 // }
1304
1305 ScratchSpace<MInt> globalIdsGlobal(noStgInternalCellsGlobal, AT_, "globalIdsGlobal");
1306 ScratchSpace<MInt> globalIdsGhostGlobal(noStgGhostCellsGlobal, AT_, "globalIdsGhostGlobal");
1307 MPI_Gatherv(&globalIds[0], noStgInternalCells, MPI_INT, &globalIdsGlobal[0], &noStgInternalCellsPerRank[0],
1308 &displ[0], MPI_INT, 0, s->m_commStg, AT_, "globalIds[0]", "globalIdsGlobal[0]");
1309 MPI_Gatherv(&globalIdsGhost[0], noStgGhostCells, MPI_INT, &globalIdsGhostGlobal[0], &noStgGhostCellsPerRank[0],
1310 &displG[0], MPI_INT, 0, s->m_commStg, AT_, "globalIdsGhost[0]", "globalIdsGhostGlobal[0]");
1311
1312 // Sanity checks
1313 if(s->m_stgMyRank == s->m_commStgRoot) {
1314 /* root rank has following data:
1315 * noStgInternalCellsGlobal
1316 * noStgGhostCellsGlobal
1317 * globalIdsGlobal
1318 * globalIdsGhostGlobal
1319 */
1320 // Check if global Ids are sorted
1321 if(!std::is_sorted(globalIdsGlobal.begin(), globalIdsGlobal.end())) TERMM(-1, "MAIA sucks");
1322 if(!std::is_sorted(globalIdsGhostGlobal.begin(), globalIdsGhostGlobal.end())) TERMM(-1, "MAIA sucks");
1323 // Since halo cells have been skipped all global ids should be unique
1324 for(MInt i = 0; i < noStgInternalCellsGlobal - 1; ++i) {
1325 if(globalIdsGlobal[i] == globalIdsGhost[i + 1]) TERMM(-1, "MAIA really sucks");
1326 }
1327 for(MInt i = 0; i < noStgGhostCellsGlobal - 1; ++i) {
1328 if(globalIdsGhostGlobal[i] == globalIdsGhostGlobal[i + 1]) TERMM(-1, "MAIA really sucks");
1329 }
1330 }
1332 // WRITE TO FILE
1334
1335 /* Following variables are written to output
1336 * globalIds
1337 * globalIdsGhost
1338 * tempStgVars
1339 * tempStgGVars
1340 * coordinates (optionally)
1341 */
1342 ParallelIo parallelIo((filename.str()).c_str(), PIO_APPEND, solver->mpiComm());
1343
1344 // Determine data offsets for intenalStgCells and ghostStgCells
1345 ParallelIo::size_type stgInternalOffset, stgGhostOffset;
1346 ParallelIo::size_type stgGlobalNoInternalCells, stgGlobalNoGhostCells;
1347 parallelIo.calcOffset(noStgInternalCells, &stgInternalOffset, &stgGlobalNoInternalCells, solver->mpiComm());
1348 parallelIo.calcOffset(noStgGhostCells, &stgGhostOffset, &stgGlobalNoGhostCells, solver->mpiComm());
1349
1350 assert(stgGlobalNoInternalCells == noStgInternalCellsGlobal);
1351 assert(stgGlobalNoGhostCells == noStgGhostCellsGlobal);
1352
1353 // Add new variables to grid file
1354 parallelIo.defineArray(PIO_INT, (stgPrefix + "globalIdsInternal").c_str(), noStgInternalCells);
1355 parallelIo.defineArray(PIO_INT, (stgPrefix + "globalIdsGhost").c_str(), noStgGhostCells);
1356
1357 // Write data to file
1358 parallelIo.setOffset(noStgInternalCells, stgInternalOffset);
1359 parallelIo.writeArray(&globalIds[0], (stgPrefix + "globalIdsInternal").c_str());
1360 parallelIo.setOffset(noStgGhostCells, stgGhostOffset);
1361 parallelIo.writeArray(&globalIdsGhost[0], (stgPrefix + "globalIdsGhost").c_str());
1362
1363 // Write
1364 parallelIo.setOffset(noStgInternalCells, stgInternalOffset);
1365 for(MInt var = 0; var < myStg::STG::noStgVars; ++var) {
1366 std::stringstream varName;
1367 varName << stgPrefix + std::to_string(var);
1368 parallelIo.defineArray(PIO_FLOAT, (varName.str()).c_str(), noStgInternalCells);
1369 parallelIo.writeArray(&tempStgVars[var * noStgInternalCells], (varName.str()).c_str());
1370 }
1371 parallelIo.setOffset(noStgGhostCells, stgGhostOffset);
1372 for(MInt var = 0; var < myStg::STG::noStgVars; ++var) {
1373 std::stringstream varName;
1374 varName << stgPrefix + "G_" + std::to_string(var);
1375 parallelIo.defineArray(PIO_FLOAT, (varName.str()).c_str(), noStgGhostCells);
1376 parallelIo.writeArray(&tempStgGVars[var * noStgGhostCells], (varName.str()).c_str());
1377 }
1378
1379 if(stgIOCoordinates == 1) {
1380 ScratchSpace<MFloat> tempStgCoord(noStgInternalCells * nDim, AT_, "tempStgCoord");
1381 ScratchSpace<MFloat> tempStgGCoord(noStgGhostCells * nDim, AT_, "tempStgGCoord");
1382 cnt = 0;
1383 for(auto globalId : globalIds) {
1384 const MInt cellId = globalId - solver->domainOffset(solver->domainId());
1385 for(MInt d = 0; d < nDim; ++d) {
1386 tempStgCoord.p[noStgInternalCells * d + cnt] = s->a->a_coordinate(cellId, d);
1387 ++cnt;
1388 }
1389 }
1390 cnt = 0;
1391 for(auto globalId : globalIds) {
1392 const MInt cellId = globalId - solver->domainOffset(solver->domainId());
1393 const MInt bndryId = solver->a_bndryId(cellId);
1394 if(bndryId < 0) TERMM(-1, "");
1395 const MInt ghostCellId =
1396 solver->a_bndryGhostCellId(bndryId,
1397 0); // solver->m_bndryCells->a[bndryId].m_srfcVariables[0]->m_ghostCellId;
1398 for(MInt d = 0; d < nDim; ++d) {
1399 tempStgGCoord.p[noStgGhostCells * d + cnt] = s->a->a_coordinate(ghostCellId, d);
1400 ++cnt;
1401 }
1402 }
1403 parallelIo.setOffset(noStgInternalCells, stgInternalOffset);
1404 for(MInt d = 0; d < nDim; ++d) {
1405 std::stringstream varName;
1406 varName << stgPrefix + "x" + std::to_string(d);
1407 parallelIo.defineArray(PIO_FLOAT, (varName.str()).c_str(), noStgInternalCells);
1408 parallelIo.writeArray(&tempStgCoord.p[noStgInternalCells * d], (varName.str()).c_str());
1409 }
1410 parallelIo.setOffset(noStgGhostCells, stgGhostOffset);
1411 for(MInt d = 0; d < nDim; ++d) {
1412 std::stringstream varName;
1413 varName << stgPrefix + "G_" + "x" + std::to_string(d);
1414 parallelIo.defineArray(PIO_FLOAT, (varName.str()).c_str(), noStgGhostCells);
1415 parallelIo.writeArray(&tempStgGCoord.p[noStgGhostCells * d], (varName.str()).c_str());
1416 }
1417 }
1418 } else if(stgIOCoordinates == 2) {
1419 // This if-solver is only executed by stg-domains
1420
1421 /* Output is domainwise sorted by globalId; if all stg halo cells of current domain are
1422 * contained as stg window cell on some other rank, than the output should also be sorted
1423 * globally by the globalId; Write an attribute indicating, if output is globally sorted
1424 * by globalId; in case of ghostcells the globalIds are the globalIds of the respective
1425 * internal cells; it is also stored if cell is ghost cell; two consecutive cells in output
1426 * can only have the same globalId, if one of them is a ghost cell;
1427 */
1428
1429 if(s->m_stgLocal) {
1430 // Create mapping commStg_rank->mpiComm_rank
1431 MInt comm_size;
1432 MPI_Comm_size(s->m_commStg, &comm_size);
1433 std::vector<MInt> local2GlobalRank(comm_size);
1434 const MInt temp = solver->domainId();
1435 MPI_Allgather(&temp, 1, MPI_INT, local2GlobalRank.data(), 1, MPI_INT, s->m_commStg, AT_, "solver->domainId()",
1436 "local2GlobalRank");
1437 if(!std::is_sorted(local2GlobalRank.begin(), local2GlobalRank.end())) TERMM(1, "ERROR!");
1438
1439 std::map<MInt /*cellId*/, MInt /*globalId*/> stgWindow;
1440 for(typename myStg::Accessor::nDim_citerator it_ = s->a->iterateAll();
1441 it_ != s->a->iterateAll_nDim_citerator_end();
1442 ++it_) {
1443 const MInt cellId = s->a->getCellId(it_);
1444 if(solver->a_isBndryGhostCell(cellId))
1445 continue; // TODO labels:FV this skip needs to be done evtl. also in other loops in saveSTG!!!
1446 if(solver->a_isWindow(cellId)) {
1447 const MInt globalId = solver->c_globalId(cellId);
1448 stgWindow.insert(std::make_pair(cellId, globalId));
1449 }
1450 }
1451
1452 // Auxiliary variables needed for communication
1453 const MInt noNeighborDomains = solver->noNeighborDomains();
1454 std::vector<MInt> snghbrs; //(noNeighborDomains);
1455 std::vector<MInt> nghbr_commSTG2mpiComm;
1456 for(MInt d = 0; d < noNeighborDomains; ++d) {
1457 auto it_ = std::find(local2GlobalRank.begin(), local2GlobalRank.end(), solver->neighborDomain(d));
1458 if(it_ != local2GlobalRank.end()) {
1459 snghbrs.push_back(std::distance(local2GlobalRank.begin(), it_)); // solver->neighborDomain(d);
1460 nghbr_commSTG2mpiComm.push_back(d);
1461 }
1462 }
1463
1464 // Determine the corresponding halo domainIds of the window cells
1465 const MInt noNeighborDomainsLocal = snghbrs.size();
1466 std::vector<MInt> sendcounts(noNeighborDomainsLocal);
1467 std::vector<MInt> stgWindowGlobalId; //(stgWindow.size());
1468 MInt cnt = 0;
1469 MInt noDoubleEntries = 0;
1470 for(MInt domain_ = 0; domain_ < noNeighborDomainsLocal; ++domain_) {
1471 const MInt domain = nghbr_commSTG2mpiComm[domain_];
1472 for(MInt window = 0; window < solver->noWindowCells(domain); ++window) {
1473 auto it_ = stgWindow.find(solver->grid().windowCell(domain, window));
1474 if(it_ != stgWindow.end()) {
1475 auto it__ = std::find(stgWindowGlobalId.begin(), stgWindowGlobalId.end(), it_->second);
1476 if(it__ != stgWindowGlobalId.end()) {
1477 noDoubleEntries++;
1478 }
1479 // stgWindowGlobalId[cnt++] = it_->second;
1480 stgWindowGlobalId.push_back(it_->second);
1481 ++cnt;
1482 ++sendcounts[domain_];
1483 }
1484 }
1485 }
1486
1487 if(static_cast<MUint>(cnt - noDoubleEntries) != stgWindow.size()) {
1488 std::cout << "SCHEISSE: RANK=" << solver->domainId() << ": " << cnt << "; " << noDoubleEntries << "; "
1489 << stgWindow.size() << "; " << stgWindowGlobalId.size() << std::endl;
1490 std::cout << std::endl;
1491 }
1492 if(static_cast<MUint>(cnt) != stgWindowGlobalId.size()) TERMM(1, "NEINNNNN");
1493
1494 // Communicate the window cells to the corresponding halo domains
1495 MInt myrank;
1496 MPI_Comm_rank(s->m_commStg, &myrank);
1497
1498 std::vector<MInt> globalIdsOfNghbrWindowCells =
1499 maia::mpi::mpiExchangePointToPoint(&stgWindowGlobalId[0], //<--Input
1500 &snghbrs[0], //<--Input
1501 noNeighborDomainsLocal, //<--Input
1502 &sendcounts[0], //<--Input
1503 &snghbrs[0], //<--Input
1504 noNeighborDomainsLocal, //<--Input
1505 s->m_commStg, // solver->mpiComm(), //<--Input
1506 myrank, // solver->domainId(), //<--Input
1507 1); //<--Input
1508
1509 // Skip halo cells if they are contained as window cell somewhere else
1510 std::vector<std::pair<MInt /*globalId*/, MInt /*stgId*/>> stgGlobalIds;
1511 stgGlobalIds.reserve(s->a->sizeStg());
1512 std::vector<std::pair<MInt /*globalId*/, MInt /*isGhost*/>> stgIsGhost;
1513 stgIsGhost.reserve(s->a->sizeStg());
1514 MInt noHaloWithNoWindow = 0;
1515 MInt noHaloTotal = 0;
1516 for(typename myStg::Accessor::nDim_citerator it_ = s->a->iterateAll();
1517 it_ != s->a->iterateAll_nDim_citerator_end();
1518 ++it_) {
1519 const MInt cellId = s->a->getCellId(it_);
1520 const MInt stgId = s->a->getStgId(it_);
1521 /* if (myrank==0) {
1522 std::cout << "STGID=" << stgId << " : " << s->a->m_stgSize << std::endl;
1523 if (stgId>10000)
1524 TERMM(1, "");
1525 }*/
1526 MInt cellId_ = cellId;
1527 // If cell is ghost cell, check if its internal cell is a halo cell already contained on a different domain
1528 if(solver->a_isBndryGhostCell(cellId)) {
1529 const MInt stgIdInternal = s->a->m_stgGhost2stgBndry[stgId];
1530 cellId_ = s->a->m_stgToCellId[stgIdInternal];
1531 }
1532 // Skip if it is a periodic cell; periodic cells are taken into account in loadStg
1533 if(solver->a_isPeriodic(cellId_)) continue;
1534 if(solver->a_isHalo(cellId_) /*&& !solver->a_isPeriodic(cellId_)*/) {
1535 ++noHaloTotal;
1536 const MInt globalId = solver->c_globalId(cellId_);
1537 // if (myrank==0 && globalId==3634505)
1538 // std::cout << "HALLOO YES" << stgId << std::endl;
1539 // TODO labels:FV,toenhance evtl. if bndry cell is halo cell, also skip its ghost cell
1540 // search for this globalId in globalIdsOfNghbrWindowCells; if found continue
1541 if(std::find(globalIdsOfNghbrWindowCells.begin(), globalIdsOfNghbrWindowCells.end(), globalId)
1542 != globalIdsOfNghbrWindowCells.end()) {
1543 continue;
1544 }
1545 ++noHaloWithNoWindow;
1546 // count no cells which are not contained as window somewhere else, in that case output should be also
1547 // globally sorted by globaId -> check that
1548 }
1549 if(!solver->a_isBndryGhostCell(cellId)) {
1550 const MInt globalId = solver->c_globalId(cellId);
1551 // if (myrank==0 && globalId==3634505)
1552 // std::cout << "HALLOO " << stgId << std::endl;
1553 stgGlobalIds.push_back(std::make_pair(globalId, stgId));
1554 stgIsGhost.push_back(std::make_pair(globalId, 0));
1555 } else /*if (solver->a_isBndryGhostCell(cellId))*/ {
1556 // GlobalId of ghost cells are that of their internal cell
1557 if(stgId > 2 * s->a->m_noBcCells) {
1558 std::cout << "z) On rank=" << myrank << ":EOOR " << stgId << "; " << s->a->m_noBcCells << std::endl;
1559 TERMM(1, "ERROR");
1560 }
1561 const MInt stgIdInternal = s->a->m_stgGhost2stgBndry[stgId];
1562 if(stgIdInternal > s->a->m_noBcCells) {
1563 std::cout << "z2) On rank=" << myrank << ":EOOR " << stgIdInternal << "; " << s->a->m_noBcCells << "; "
1564 << stgId << std::endl;
1565 TERMM(1, "ERROR");
1566 }
1567 const MInt globalId = solver->c_globalId(s->a->m_stgToCellId[stgIdInternal]);
1568 // if (myrank==0 && globalId==3634505)
1569 // std::cout << "HALLOO 2 " << stgId << "; " << stgIdInternal << "; " <<
1570 // s->a->m_stgToCellId[stgIdInternal] <<std::endl;
1571
1572 stgGlobalIds.push_back(std::make_pair(globalId, stgId));
1573 // Save a flag indicating if the cell is a ghost cell
1574 stgIsGhost.push_back(std::make_pair(globalId, 1));
1575 }
1576 }
1577 /*m_log*/ std::cout << "On domain " << solver->domainId() << ": " << noHaloWithNoWindow << " of "
1578 << noHaloTotal << " halo cells have no window cells somewhere else" << std::endl;
1579
1580 // check the following later: if sorted domainwise, it should be also globally sorted
1581 // Sort the lists by the globalId
1582 if(std::is_sorted(stgGlobalIds.begin(), stgGlobalIds.end()))
1583 m_log << "stg cells already sorted after globalId" << std::endl;
1584 else {
1585 std::sort(stgIsGhost.begin(), stgIsGhost.end(),
1586 [](const std::pair<MInt, MInt>& a, const std::pair<MInt, MInt>& b) { return a.first < b.first; });
1587 std::sort(stgGlobalIds.begin(), stgGlobalIds.end(),
1588 [](const std::pair<MInt, MInt>& a, const std::pair<MInt, MInt>& b) { return a.first < b.first; });
1589 }
1590
1591 // Save m_stgLVariables in scratch space ordered by globalId; skip halo cell already contained as window cell
1592 // somewhere else
1593 MInt noStgInternalCells = stgGlobalIds.size(); //<--
1594 std::vector<MFloat> tempStgVars(noStgInternalCells * noVars); //<--
1595 for(MInt var = 0; var < noVars; ++var) {
1596 cnt = 0;
1597 for(auto& it_ : stgGlobalIds) {
1598 const MInt stgId = it_.second;
1599 tempStgVars[var * noStgInternalCells + cnt++] = s->m_stgLVariables[var][stgId];
1600 }
1601 }
1602
1603 // Write globalIds into a linear array for IO
1604 std::vector<MInt> globalIds(noStgInternalCells); //<--
1605 std::vector<MInt> isGhost(noStgInternalCells); //<--
1606 cnt = 0;
1607 for(auto /*&*/ itGlobalId = stgGlobalIds.begin(), itIsGhost = stgIsGhost.begin();
1608 itGlobalId != stgGlobalIds.end() /*|| itIsGhost!=stgIsGhost.end()*/; ++itGlobalId, ++itIsGhost) {
1609 globalIds[cnt] = itGlobalId->first;
1610 isGhost[cnt] = itIsGhost->second;
1611 cnt++;
1612 }
1613
1615 // SANITY CHECK (actually this check can be performed by only one rank)
1617 MInt* displ;
1618 MInt noInvolvedRanks;
1619 MPI_Comm_size(s->m_commStg, &noInvolvedRanks);
1620 MInt* noStgInternalCellsPerRank = new MInt[noInvolvedRanks];
1621 MPI_Allgather(&noStgInternalCells, 1, MPI_INT, noStgInternalCellsPerRank, 1, MPI_INT, s->m_commStg, AT_,
1622 "noStgInternalCells", "noStgInternalCellsPerRank");
1623 displ = new MInt[noInvolvedRanks];
1624 displ[0] = 0;
1625 for(MInt i = 1; i < noInvolvedRanks; ++i) {
1626 displ[i] = displ[i - 1] + noStgInternalCellsPerRank[i - 1];
1627 }
1628 MInt noStgInternalCellsGlobal =
1629 std::accumulate(noStgInternalCellsPerRank, noStgInternalCellsPerRank + noInvolvedRanks, 0);
1630
1631 ScratchSpace<MInt> globalIdsGlobal(noStgInternalCellsGlobal, AT_, "globalIdsGlobal");
1632 MPI_Allgatherv(&globalIds[0], noStgInternalCells, MPI_INT, &globalIdsGlobal[0], &noStgInternalCellsPerRank[0],
1633 &displ[0], MPI_INT, s->m_commStg, AT_, "globalIds[0]", "globalIdsGlobal[0]");
1634 // Gather the information about halo cells without a window cell
1635 MInt* noHaloWithNoWindowGlobal = new MInt[noInvolvedRanks];
1636 MPI_Allgather(&noHaloWithNoWindow, 1, MPI_INT, &noHaloWithNoWindowGlobal[0], 1, MPI_INT, s->m_commStg, AT_,
1637 "noHaloWithNoWindow", "noHaloWithNoWindowGlobal");
1638
1639 // Sanity checks
1640 /* root rank has following data:
1641 * noStgInternalCellsGlobal
1642 * globalIdsGlobal
1643 */
1644 // Check if global Ids are sorted
1645 MInt isGloballySorted = 1;
1646 if(!std::is_sorted(globalIdsGlobal.begin(), globalIdsGlobal.end())) {
1647 isGloballySorted = 0;
1648 // Check if there are halo cells for which no window cells existed->only in that case it is allowed that the
1649 // cells are globally not sorted by globalId
1650 // TODO labels:FV
1651 // if (std::accumulate(noHaloWithNoWindowGlobal, noHaloWithNoWindowGlobal+noInvolvedRanks, 0)==0) {
1652 // TERMM(-1, "List should be globally sorted by globalId.");
1653 // }
1654 }
1656 // WRITE TO FILE
1658
1659 /*
1660 * globalIds
1661 * isGhost
1662 * tempStgVars
1663 * coordinates
1664 */
1665 ParallelIo parallelIo((filename.str()).c_str(), PIO_APPEND, s->m_commStg);
1666
1667 parallelIo.setAttribute(isGloballySorted, (stgPrefix + "isGloballySorted").c_str());
1668
1669 // Determine data offsets for intenalStgCells
1670 ParallelIo::size_type stgInternalOffset;
1671 ParallelIo::size_type stgGlobalNoInternalCells;
1672 parallelIo.calcOffset(noStgInternalCells, &stgInternalOffset, &stgGlobalNoInternalCells, s->m_commStg);
1673
1674 assert(stgGlobalNoInternalCells == noStgInternalCellsGlobal);
1675 if(stgGlobalNoInternalCells != noStgInternalCellsGlobal) {
1676 std::cout << "ERROR " << stgGlobalNoInternalCells << "; " << noStgInternalCellsGlobal << std::endl;
1677 TERMM(1, "stgGlobalNoInternalCells != noStgInternalCellsGlobal");
1678 }
1679
1680 // Add new variables to grid file
1681 parallelIo.defineArray(PIO_INT, (stgPrefix + "globalIdsInternal").c_str(), stgGlobalNoInternalCells);
1682 parallelIo.defineArray(PIO_INT, (stgPrefix + "isGhost").c_str(), stgGlobalNoInternalCells);
1683 //
1684 for(MInt var = 0; var < myStg::STG::noStgVars; ++var) {
1685 std::stringstream varName;
1686 varName << stgPrefix + std::to_string(var);
1687 parallelIo.defineArray(PIO_FLOAT, (varName.str()).c_str(), stgGlobalNoInternalCells);
1688 }
1689 //
1690 for(MInt d = 0; d < nDim; ++d) {
1691 std::stringstream varName;
1692 varName << stgPrefix + "x" + std::to_string(d);
1693 parallelIo.defineArray(PIO_FLOAT, (varName.str()).c_str(), stgGlobalNoInternalCells);
1694 }
1695
1696 // Write data to file
1697 parallelIo.setOffset(noStgInternalCells, stgInternalOffset);
1698 parallelIo.writeArray(&globalIds[0], (stgPrefix + "globalIdsInternal").c_str());
1699 parallelIo.writeArray(&isGhost[0], (stgPrefix + "isGhost").c_str());
1700
1701 // Write
1702 for(MInt var = 0; var < myStg::STG::noStgVars; ++var) {
1703 std::stringstream varName;
1704 varName << stgPrefix + std::to_string(var);
1705 parallelIo.writeArray(&tempStgVars[var * noStgInternalCells], (varName.str()).c_str());
1706 }
1707
1708 ScratchSpace<MFloat> tempStgCoord(noStgInternalCells * nDim, AT_, "tempStgCoord");
1709 cnt = 0;
1710 for(std::vector<std::pair<MInt, MInt>>::const_iterator it_ = stgGlobalIds.begin(); it_ != stgGlobalIds.end();
1711 ++it_) {
1712 const MInt stgId = it_->second;
1713 const MInt cellId = s->a->m_stgToCellId[stgId]; // s->a->getCellId(stgId); // TODO labels:FV
1714 for(MInt d = 0; d < nDim; ++d) {
1715 tempStgCoord.p[noStgInternalCells * d + cnt] = s->a->a_coordinate(cellId, d);
1716 }
1717
1718 ++cnt;
1719 }
1720 if(cnt != noStgInternalCells) TERMM(1, "errror");
1721 for(MInt d = 0; d < nDim; ++d) {
1722 std::stringstream varName;
1723 varName << stgPrefix + "x" + std::to_string(d);
1724 parallelIo.writeArray(&(tempStgCoord.p[noStgInternalCells * d]), (varName.str()).c_str());
1725 }
1726 }
1727 } else
1728 TERMM(-1, "Unknown value for property stgIOCoordinates");
1729 }
1730 MPI_Barrier(solver->mpiComm(), AT_);
1731 m_log << "::saveStg: Finished writing stgRestart for " << stgBCId << std::endl;
1732 }
1733}
1734
1735// Load stg function is independent of RANS solver
1736template <MInt nDim, SolverType SolverTypeR, SolverType SolverTypeL>
1738 TRACE();
1739
1740 m_log << "Loading stg stuff ..." << std::endl;
1741
1753 MInt stgIOCoordinates = 0;
1754 stgIOCoordinates = Context::getSolverProperty<MInt>("stgIOCoordinates", m_solver->solverId(), AT_, &stgIOCoordinates);
1755
1756 std::stringstream filename;
1757 filename << m_solver->restartDir() << "stgRestart_" << globalTimeStep << ParallelIo::fileExt();
1758
1759 using namespace maia::parallel_io;
1760
1761 {
1762 ParallelIo parallelIo((filename.str()).c_str(), PIO_READ, m_commStg /*m_solver->mpiComm()*/);
1763
1764 MInt noStgBCs;
1765 parallelIo.getAttribute(&noStgBCs, "noStgBCs");
1766 std::vector<MInt> bcIds(noStgBCs);
1767 parallelIo.getAttribute(&bcIds[0], "stgBCIds", noStgBCs);
1768
1769 // Check if current bcId is contained in stgRestart file
1770 if(std::find(bcIds.begin(), bcIds.end(), m_bcId) == bcIds.end())
1771 TERMM(-1, "Current bcId not found in stgRestart file!");
1772 }
1773
1774 std::stringstream stgPrefix_;
1775 stgPrefix_ << "stgVar" << m_bcId << "_";
1776 MString stgPrefix = stgPrefix_.str();
1777
1778 // All stg ranks read m_stgMaxNoEddies & m_stgEddies
1779 {
1780 ParallelIo parallelIo((filename.str()).c_str(), PIO_READ, m_commStg);
1781 MInt stgMaxNoEddies;
1782 parallelIo.getAttribute(&stgMaxNoEddies, (stgPrefix + "stgMaxNoEddies").c_str());
1783 if(stgMaxNoEddies != m_stgMaxNoEddies && !m_stgCreateNewEddies) TERMM(-1, "");
1784 ParallelIo::size_type stgMaxNoEddies_ = parallelIo.getArraySize((stgPrefix + "FQeddies").c_str());
1785 if(stgMaxNoEddies_ != stgMaxNoEddies * m_stgNoEddieProperties) TERMM(-1, "");
1786
1787 parallelIo.setOffset(stgMaxNoEddies_, 0);
1788 parallelIo.readArray(&(m_stgEddies[0][0]), (stgPrefix + "FQeddies").c_str()); // TODO labels:FV,IO,totest check this
1789 }
1790
1791 // if(!m_cutOff){
1792 if(stgIOCoordinates == 0 || stgIOCoordinates == 1) {
1793#if 1 // Parallel reading, i.e. not every stg rank is reading everything
1794
1795 // Assumption: ghost cells are neither window nor halo cells
1796
1797 // 1) All ranks read globalIdsInternal & globalIdsGhost and check for duplicates
1798 ParallelIo parallelIo((filename.str()).c_str(), PIO_READ, m_solver->mpiComm());
1799 ParallelIo::size_type noStgInternalCellsGlobal = parallelIo.getArraySize((stgPrefix + "globalIdsInternal").c_str());
1800 ParallelIo::size_type noStgGhostCellsGlobal = parallelIo.getArraySize((stgPrefix + "globalIdsGhost").c_str());
1801 std::vector<MInt> globalIdsInternalGlobal(noStgInternalCellsGlobal);
1802 std::vector<MInt> globalIdsGhostGlobal(noStgGhostCellsGlobal);
1803 parallelIo.setOffset(noStgInternalCellsGlobal, 0);
1804 parallelIo.readArray(globalIdsInternalGlobal.data(), (stgPrefix + "globalIdsInternal").c_str());
1805 parallelIo.setOffset(noStgGhostCellsGlobal, 0);
1806 parallelIo.readArray(globalIdsGhostGlobal.data(), (stgPrefix + "globalIdsGhost").c_str());
1807
1808 if(!std::is_sorted(globalIdsInternalGlobal.begin(), globalIdsInternalGlobal.end()))
1809 TERMM(-1, "globalIdsInternal is not sorted in stgRestart file");
1810 if(!std::is_sorted(globalIdsGhostGlobal.begin(), globalIdsGhostGlobal.end()))
1811 TERMM(-1, "globalIdsGhost is not sorted in stgRestart file");
1812
1813 // 2)
1814 auto lower = std::lower_bound(globalIdsInternalGlobal.begin(), globalIdsInternalGlobal.end(),
1815 m_solver->domainOffset(m_solver->domainId()));
1816 auto upper = std::upper_bound(globalIdsInternalGlobal.begin(), globalIdsInternalGlobal.end(),
1817 m_solver->domainOffset(m_solver->domainId() + 1));
1818 std::vector<MInt> offsets(2 * m_solver->noDomains());
1819 const MInt domOff[] = {static_cast<MInt>(std::distance(globalIdsInternalGlobal.begin(), lower)),
1820 static_cast<MInt>(std::distance(globalIdsInternalGlobal.begin(), upper))};
1821 const MInt noStgRestartCellsLocal = domOff[1] - domOff[0];
1822 // Do I have any data, but I am no stg domain?
1823 if(!m_stgLocal && noStgRestartCellsLocal > 0) {
1824 m_log << "Domain " << m_solver->domainId() << " has stg data, but is not stg domain" << std::endl;
1825 // Data must be window data
1826 for(MInt i = domOff[0]; i < domOff[1]; ++i) {
1827 const MInt cellId = globalIdsInternalGlobal[i] - m_solver->domainOffset(m_solver->domainId());
1828 if(!m_solver->a_isWindow(cellId)) {
1829 TERMM(-1, "Domain has non-window data, even though it is no stg domain!");
1830 }
1831 }
1832 }
1833 // Sanity checks
1834 MPI_Gather(&domOff[0], 2, MPI_INT, offsets.data(), 2, MPI_INT, 0, m_commStg, AT_, "domOff[0]", "offsets.data()");
1835 if(m_commStgMyRank == m_commStgRoot) {
1836 if(offsets[0] != 0) TERMM(-1, "offsets[0] != 0");
1837 if(offsets[offsets.size() - 1] != noStgInternalCellsGlobal) TERMM(-1, "");
1838 for(MInt rank = 0; rank < m_solver->noDomains() - 1; ++rank) {
1839 if(offsets[rank * 2 + 1] != offsets[(rank + 1) * 2 + 0]) TERMM(-1, "");
1840 }
1841 }
1842 // Store pairs of globalId and position in restart file of the cells residing on the current rank in a map;
1843 // Since key is globalId and map will be sorted by key, iterating trhough this map is same as iterating through
1844 // globalIdsInternalGlobal
1845 std::map<MInt /*globalId*/, MInt /*stgIdRestart*/> globalIdStgIdRestartMap;
1846 for(MInt i = domOff[0]; i < domOff[1]; ++i) {
1847 globalIdStgIdRestartMap.insert(globalIdStgIdRestartMap.end(),
1848 std::map<MInt, MInt>::value_type(globalIdsInternalGlobal[i], i - domOff[0]));
1849 }
1850
1851 auto lowerG = std::lower_bound(globalIdsGhostGlobal.begin(), globalIdsGhostGlobal.end(),
1852 m_solver->domainOffset(m_solver->domainId()));
1853 auto upperG = std::upper_bound(globalIdsGhostGlobal.begin(), globalIdsGhostGlobal.end(),
1854 m_solver->domainOffset(m_solver->domainId() + 1));
1855 if(std::distance(lowerG, upperG) < 1) TERMM(-1, "");
1856 std::vector<MInt> offsetsG(2 * m_solver->noDomains());
1857 const MInt domOffG[] = {static_cast<MInt>(std::distance(globalIdsGhostGlobal.begin(), lowerG)),
1858 static_cast<MInt>(std::distance(globalIdsGhostGlobal.begin(), upperG))};
1859 MInt noStgGRestartCellsLocal = domOffG[1] - domOffG[0];
1860 // Sanity checks
1861 if(!m_stgLocal && noStgGRestartCellsLocal > 0) TERMM(-1, "");
1862 MPI_Gather(&domOffG[0], 2, MPI_INT, offsetsG.data(), 2, MPI_INT, 0, m_commStg, AT_, "domOffG[0]",
1863 "offsetsG.data()");
1864 if(m_commStgMyRank == m_commStgRoot) {
1865 if(offsetsG[0] != 0) TERMM(-1, "offsetsG[0] != 0");
1866 if(offsetsG[offsetsG.size() - 1] != noStgGhostCellsGlobal) TERMM(-1, "");
1867 for(MInt rank = 0; rank < m_solver->noDomains() - 1; ++rank) {
1868 if(offsetsG[rank * 2 + 1] != offsetsG[(rank + 1) * 2 + 0]) TERMM(-1, "");
1869 }
1870 }
1871 std::vector<MInt> globalIdsGhost(lowerG, upperG);
1872
1873 // // Store pairs of globalId and position in restart file of the cells residing on the current rank in a map
1874 // std::map<MInt, MInt> globalIdGStgIdRestartMap;
1875 // for (MInt i = domOffG[0]; i < domOffG[1]; ++i) {
1876 // globalIdGStgIdRestartMap.insert(globalIdGStgIdRestartMap.end(), std::map<MInt,
1877 // MInt>::value_type(globalIdsGhostGlobal[i] ,i-domOffG[0]));
1878 // }
1879
1880 // Look for all window cells on own domain
1881 std::map<MInt /*cellId*/, MInt /*restartStgId*/> window_cellId_restartStgId;
1882 // for (MInt stgRestartIdGlobal = domOff[0]; stgRestartIdGlobal < domOff[1]; ++stgRestartIdGlobal) {
1883 for(auto& it : globalIdStgIdRestartMap) {
1884 // const MInt cellId = globalIdsInternalGlobal[stgRestartIdGlobal] -
1885 // m_solver->domainOffset(m_solver->domainId());
1886 const MInt cellId = it.first - m_solver->domainOffset(m_solver->domainId());
1887 if(m_solver->a_isWindow(cellId)) {
1888 window_cellId_restartStgId.insert(std::make_pair(cellId, it.second));
1889 }
1890 }
1891
1892 // Auxiliary variables needed for communication
1893 const MInt noNeighborDomains = m_solver->noNeighborDomains();
1894 std::vector<MInt> snghbrs(noNeighborDomains);
1895 for(MInt d = 0; d < noNeighborDomains; ++d) {
1896 snghbrs[d] = m_solver->neighborDomain(d);
1897 }
1898
1899 // Determine the corresponding halo domainIds of the window cells
1900 std::vector<MInt> sendcounts(noNeighborDomains);
1901 std::vector<MInt> stgWindowSortedGlobalId(window_cellId_restartStgId.size());
1902 std::vector<MInt> stgWindowSortedRestartStgId(window_cellId_restartStgId.size());
1903 MInt cnt = 0;
1904 for(MInt domain = 0; domain < noNeighborDomains; ++domain) {
1905 for(MInt window = 0; window < m_solver->noWindowCells(domain); ++window) {
1906 auto it = window_cellId_restartStgId.find(m_solver->grid().windowCell(domain, window));
1907 if(it != window_cellId_restartStgId.end()) {
1908 stgWindowSortedGlobalId[cnt] = m_solver->c_globalId(it->first);
1909 stgWindowSortedRestartStgId[cnt] = it->second;
1910 ++sendcounts[domain];
1911 ++cnt;
1912 }
1913 }
1914 }
1915 if(static_cast<MUint>(cnt) != window_cellId_restartStgId.size()) TERMM(-1, "Scheisse Alter!!!");
1916
1917 // read in internal and ghost data of current rank
1918 ScratchSpace<MFloat> tempStgVars(noStgRestartCellsLocal, AT_, "tempStgVars");
1919 ScratchSpace<MFloat> tempStgGVars(noStgGRestartCellsLocal, AT_, "tempStgGVars");
1920 {
1921 // ParallelIo parallelIo(filename, PIO_READ, m_solver->mpiComm());
1922 for(MInt var = 0; var < STG::noStgVars; ++var) {
1923 parallelIo.setOffset(noStgRestartCellsLocal, domOff[0]);
1924 parallelIo.readArray(tempStgVars.getPointer(), (stgPrefix + std::to_string(var)).c_str());
1925 parallelIo.setOffset(noStgGRestartCellsLocal, domOffG[0]);
1926 parallelIo.readArray(tempStgGVars.getPointer(), (stgPrefix + std::to_string(var)).c_str());
1927 }
1928 }
1929
1930 // Save data to send in scratch space
1931 const MInt noVars = STG::noStgVars;
1932 MInt noWindowsToSend = window_cellId_restartStgId.size(); //<--
1933 std::vector<MFloat> tempStgVarsWS(noWindowsToSend * noVars); //<--
1934 for(MInt var = 0; var < noVars; ++var) {
1935 cnt = 0;
1936 for(auto stgIdRestart : stgWindowSortedRestartStgId) {
1937 tempStgVarsWS[var * noWindowsToSend + cnt++] = tempStgVars[var * noStgRestartCellsLocal + stgIdRestart];
1938 }
1939 }
1940
1941 // SEND: stgWindowSortedGlobalId & tempStgVarsWS
1942 std::vector<MInt> globalIdsOfNghbrWindowCells =
1943 maia::mpi::mpiExchangePointToPoint(&stgWindowSortedGlobalId[0], //<--Input
1944 &snghbrs[0], //<--Input
1945 noNeighborDomains, //<--Input
1946 &sendcounts[0], //<--Input
1947 &snghbrs[0], //<--Input
1948 noNeighborDomains, //<--Input
1949 m_solver->mpiComm(), //<--Input
1950 m_solver->domainId(), //<--Input
1951 1); //<--Input
1952
1953 std::vector<MFloat> tempStgVarsWR = maia::mpi::mpiExchangePointToPoint(&tempStgVarsWS[0],
1954 &snghbrs[0],
1955 noNeighborDomains,
1956 &sendcounts[0],
1957 &snghbrs[0],
1958 noNeighborDomains,
1959 m_solver->mpiComm(),
1960 m_solver->domainId(),
1961 noVars);
1962
1963 // Maps for convenience
1964 std::map<MInt /*globalId*/, MInt /*stgIdRestart*/>
1965 map2; // for halos; this is the equivalent to globalIdStgIdRestartMap for halo cells
1966 for(MInt i = 0; i < noStgRestartCellsLocal; ++i) {
1967 globalIdStgIdRestartMap.insert(std::make_pair(globalIdsInternalGlobal[i + domOff[0]], i));
1968 }
1969 for(MUint i = 0; i < globalIdsOfNghbrWindowCells.size(); ++i) {
1970 map2.insert(std::make_pair(globalIdsOfNghbrWindowCells[i], i));
1971 }
1972
1973 for(typename Accessor::nDim_citerator it = a->iterateAll(); it != a->iterateAll_nDim_citerator_end(); ++it) {
1974 const MInt stgId = a->getStgId(it);
1975 const MInt cellId = a->getCellId(it);
1976
1977 if(m_solver->a_isHalo(cellId)) {
1978 const MInt globalId = m_solver->c_globalId(cellId);
1979 auto it_ = map2.find(globalId);
1980 if(it_ == map2.end()) TERMM(-1, "");
1981 const MInt stgIdRestart = it_->second;
1982 map2.erase(it_);
1983 for(MInt var = 0; var < noVars; ++var) {
1984 m_stgLVariables[var][stgId] = tempStgVarsWR[noVars * stgIdRestart + var];
1985 }
1986 } else if(m_solver->a_isBndryGhostCell(cellId)) {
1987 const MInt stgBndry = a->m_stgGhost2stgBndry[stgId];
1988 const MInt globalId = m_solver->c_globalId(a->m_stgToCellId[stgBndry]);
1989 const MInt stgIdRestart =
1990 std::find(globalIdsGhost.begin(), globalIdsGhost.end(), globalId) - globalIdsGhost.begin();
1991 for(MInt var = 0; var < noVars; ++var) {
1992 m_stgLVariables[var][stgId] = tempStgGVars[noVars * stgIdRestart + var];
1993 }
1994 } else {
1995 const MInt globalId = m_solver->c_globalId(cellId);
1996 auto it_ = globalIdStgIdRestartMap.find(globalId);
1997 if(it_ == globalIdStgIdRestartMap.end()) TERMM(-1, "");
1998 const MInt stgIdRestart = it_->second;
1999 for(MInt var = 0; var < noVars; ++var) {
2000 m_stgLVariables[var][stgId] = tempStgVars[noVars * stgIdRestart + var];
2001 }
2002 }
2003 }
2004 if(globalIdStgIdRestartMap.size() > 0) TERMM(-1, "");
2005 if(map2.size() > 0) TERMM(-1, "");
2006
2008 // Check for coordinates
2010 if(m_stgLocal) {
2011 if(stgIOCoordinates == 1) {
2012 if(!parallelIo.hasDataset((stgPrefix + "x" + std::to_string(0)).c_str()))
2013 TERMM(-1, "Coordinates check activated, but dataset does not contain coordinates!");
2014
2015 // All stg ranks read all coordinates
2016 // ParallelIo parallelIo(filename, PIO_READ, m_commStg); //TODO labels:FV,IO only stg ranks should
2017 // read the coordinates
2018 ScratchSpace<MFloat> tempStgCoord(noStgInternalCellsGlobal * nDim, AT_, "tempStgCoord");
2019 ScratchSpace<MFloat> tempStgGCoord(noStgGhostCellsGlobal * nDim, AT_, "tempStgGCoord");
2020 parallelIo.setOffset(noStgInternalCellsGlobal, 0);
2021 for(MInt d = 0; d < nDim; ++d) {
2022 std::stringstream varName;
2023 varName << stgPrefix + "x" + std::to_string(d);
2024 parallelIo.readArray(&tempStgCoord[noStgInternalCellsGlobal * d], (varName.str()).c_str());
2025 }
2026 parallelIo.setOffset(noStgGhostCellsGlobal, 0);
2027 for(MInt d = 0; d < nDim; ++d) {
2028 std::stringstream varName;
2029 varName << stgPrefix + "x" + std::to_string(d);
2030 parallelIo.readArray(&tempStgGCoord[noStgGhostCellsGlobal * d], (varName.str()).c_str());
2031 }
2032
2033 m_log << "Building up kd-tree..." << std::endl;
2034
2035 // first create kd tree for internal cells
2036 std::vector<Point<3>> donorPoints;
2037 for(MInt stgRestartId = 0; stgRestartId < noStgInternalCellsGlobal; stgRestartId++) {
2038 Point<3> pt(tempStgCoord[noStgInternalCellsGlobal * 0 + stgRestartId],
2039 tempStgCoord[noStgInternalCellsGlobal * 1 + stgRestartId],
2040 tempStgCoord[noStgInternalCellsGlobal * 2 + stgRestartId], globalIdsInternalGlobal[stgRestartId]);
2041 donorPoints.push_back(pt);
2042 }
2043
2044 // build up the tree and fill it
2045 KDtree<nDim>* donorTreeInternal = new KDtree<nDim>(donorPoints);
2046
2047 // second create kd tree for ghost cells
2048 donorPoints.clear();
2049 for(MInt stgRestartId = 0; stgRestartId < noStgGhostCellsGlobal; stgRestartId++) {
2050 Point<3> pt(tempStgGCoord[noStgGhostCellsGlobal * 0 + stgRestartId],
2051 tempStgGCoord[noStgGhostCellsGlobal * 1 + stgRestartId],
2052 tempStgGCoord[noStgGhostCellsGlobal * 2 + stgRestartId], globalIdsGhostGlobal[stgRestartId]);
2053 donorPoints.push_back(pt);
2054 }
2055
2056 // build up the tree and fill it
2057 KDtree<nDim>* donorTreeGhost = new KDtree<nDim>(donorPoints);
2058
2059 m_log << "Building up kd-tree... FINISHED!" << std::endl;
2060
2061 constexpr MFloat epsDistance = 1e-4; // TODO labels:FV make it dependent on current cell size
2062 for(typename Accessor::nDim_citerator it = a->iterateAll(); it != a->iterateAll_nDim_citerator_end(); ++it) {
2063 const MInt stgId = a->getStgId(it);
2064 const MInt cellId = a->getCellId(it);
2065 MFloat distance = 0;
2066 MInt globalIdFromRestart = -1;
2067 MInt globalId;
2068 Point<3> pt(a->a_coordinate(cellId, 0), a->a_coordinate(cellId, 1), a->a_coordinate(cellId, 2));
2069
2070 if(m_solver->a_isBndryGhostCell(cellId)) {
2071 const MInt stgBndry = a->m_stgGhost2stgBndry[stgId];
2072 globalId = m_solver->c_globalId(a->m_stgToCellId[stgBndry]);
2073 globalIdFromRestart = donorTreeGhost->nearest(pt, distance);
2074 } else {
2075 globalId = m_solver->c_globalId(cellId);
2076 globalIdFromRestart = donorTreeInternal->nearest(pt, distance);
2077 }
2078 if(globalIdFromRestart != globalId) TERMM(-1, "");
2079 if(distance > epsDistance) TERMM(-1, "");
2080 }
2081 m_log << "Coordinates check consistent!" << std::endl;
2082 }
2083 }
2084
2086
2087#else // Every stg rank is reading everything
2088 // Assumption: ghost cells are neither window nor halo cells
2089 if(m_stgLocal) {
2090 ParallelIo parallelIo(filename, PIO_READ, m_commStg);
2091
2092 // 1) All ranks read globalIdsInternal & check for duplicates
2093 ParallelIo::size_type noStgInternalCellsGlobal =
2094 parallelIo.getArraySize((stgPrefix + "globalIdsInternal").c_str());
2095 ParallelIo::size_type noStgGhostCellsGlobal = parallelIo.getArraySize((stgPrefix + "globalIdsGhost").c_str());
2096 std::vector<MInt> globalIdsInternalGlobal(noStgInternalCellsGlobal);
2097 std::vector<MInt> globalIdsGhostGlobal(noStgGhostCellsGlobal);
2098 parallelIo.setOffset(noStgInternalCellsGlobal, 0);
2099 parallelIo.readArray(globalIdsInternalGlobal.data(), (stgPrefix + "globalIdsInternal").c_str());
2100 parallelIo.setOffset(noStgGhostCellsGlobal, 0);
2101 parallelIo.readArray(globalIdsGhostGlobal.data(), (stgPrefix + "globalIdsGhost").c_str());
2102
2103 if(!std::is_sorted(globalIdsInternalGlobal.begin(), globalIdsInternalGlobal.end()))
2104 TERMM(-1, "globalIdsInternal is not sorted in stgRestart file");
2105 if(!std::is_sorted(globalIdsGhostGlobal.begin(), globalIdsGhostGlobal.end()))
2106 TERMM(-1, "globalIdsGhost is not sorted in stgRestart file");
2107
2108 std::map<MInt /*globalId*/, MInt /*stgRestartId*/> globalId2stgRestartId;
2109 std::map<MInt /*globalId*/, MInt /*stgRestartId*/> globalId2stgRestartIdG;
2110 for(MInt i = 0; i < noStgInternalCellsGlobal; ++i) {
2111 globalId2stgRestartId.insert(std::make_pair(globalIdsInternalGlobal[i], i));
2112 }
2113 for(MInt i = 0; i < noStgGhostCellsGlobal; ++i) {
2114 globalId2stgRestartIdG.insert(std::make_pair(globalIdsGhostGlobal[i], i));
2115 }
2116
2117 // Create mapping stgId to stgRestartId
2118 // Loop over all own stg cells
2119 std::vector<MInt> stgId2stgRestartId.reserve(a->sizeStg());
2120 std::vector<MInt> stgId2stgRestartIdG;
2121 MInt cnt = 0, cntG = 0;
2122 for(typename Accessor::nDim_citerator it = a->iterateAll(); it != a->iterateAll_nDim_citerator_end(); ++it) {
2123 const MInt stgId = a->getStgId(it);
2124 const MInt cellId = a->getCellId(it);
2125
2126 if(m_solver->a_isBndryGhostCell(cellId)) {
2127 const MInt stgBndry = a->m_stgGhost2stgBndry[stgId];
2128 const MInt globalId = m_solver->c_globalId(a->m_stgToCellId[stgBndry]);
2129 auto it = globalId2stgRestartIdG.find(globalId);
2130 if(it == globalId2stgRestartIdG.end()) TERMM(-1, "");
2131 stgId2stgRestartIdG[cntG++] = it->second;
2132 globalId2stgRestartIdG.erase(it); // to avoid that it is used twice
2133 } else {
2134 const MInt globalId = m_solver->c_globalId(cellId);
2135 auto it = globalId2stgRestartId.find(globalId);
2136 if(it == globalId2stgRestartId.end()) TERMM(-1, "");
2137 stgId2stgRestartId[cnt++] = it->second;
2138 globalId2stgRestartId.erase(it); // to avoid that it is used twice
2139 }
2140 }
2141
2143 // Check for coordinates
2145 if(stgIOCoordinates == 1) {
2146 if(!parallelIo.hasDataset((stgPrefix + "x" + io_string(0)).c_str()))
2147 TERMM(-1, "Coordinates check activated, but dataset does not contain coordinates!");
2148
2149 // All stg ranks read all coordinates
2150 ScratchSpace<MFloat> tempStgCoord(noStgInternalCellsGlobal * nDim, AT_, "tempStgCoord");
2151 ScratchSpace<MFloat> tempStgGCoord(noStgGhostCellsGlobal * nDim, AT_, "tempStgGCoord");
2152 parallelIo.setOffset(noStgInternalCellsGlobal, 0);
2153 for(MInt d = 0; d < nDim; ++d) {
2154 std::stringstream varName;
2155 varName << stgPrefix + "x" + std::to_string(d);
2156 parallelIo.readArray(&tempStgCoord[noStgInternalCellsGlobal * d], (varName.str()).c_str());
2157 }
2158 parallelIo.setOffset(noStgGhostCellsGlobal, 0);
2159 for(MInt d = 0; d < nDim; ++d) {
2160 std::stringstream varName;
2161 varName << stgPrefix + "x" + std::to_string(d);
2162 parallelIo.readArray(&tempStgGCoord[noStgGhostCellsGlobal * d], (varName.str()).c_str());
2163 }
2164
2165 m_log << "Building up kd-tree..." << std::endl;
2166
2167 // first create kd tree for internal cells
2168 std::vector<Point<3>> donorPoints;
2169 for(MInt stgRestartId = 0; stgRestartId < noStgInternalCellsGlobal; stgRestartId++) {
2170 Point<3> a(tempStgCoord[noStgInternalCellsGlobal * 0 + stgRestartId],
2171 tempStgCoord[noStgInternalCellsGlobal * 1 + stgRestartId],
2172 tempStgCoord[noStgInternalCellsGlobal * 2 + stgRestartId], globalIdsInternalGlobal[stgRestartId]);
2173 donorPoints.push_back(a);
2174 }
2175
2176 // build up the tree and fill it
2177 donorTreeInternal = new KDtree<3>(donorPoints);
2178
2179 // second create kd tree for ghost cells
2180 donorPoints.clear();
2181 for(MInt stgRestartId = 0; stgRestartId < noStgGhostCellsGlobal; stgRestartId++) {
2182 Point<3> a(tempStgGCoord[noStgGhostCellsGlobal * 0 + stgRestartId],
2183 tempStgGCoord[noStgGhostCellsGlobal * 1 + stgRestartId],
2184 tempStgGCoord[noStgGhostCellsGlobal * 2 + stgRestartId], globalIdsGhostGlobal[stgRestartId]);
2185 donorPoints.push_back(a);
2186 }
2187
2188 // build up the tree and fill it
2189 donorTreeGhost = new KDtree<3>(donorPoints);
2190
2191 m_log << "Building up kd-tree... FINISHED!" << std::endl;
2192
2193 constexpr MFloat epsDistance = 1e-4; // TODO labels:FV make it dependent on current cell size
2194 cnt = 0, cntG = 0;
2195 for(typename Accessor::nDim_citerator it = a->iterateAll(); it != a->iterateAll_nDim_citerator_end(); ++it) {
2196 const MInt stgId = a->getStgId(it);
2197 const MInt cellId = a->getCellId(it);
2198
2199 if(m_solver->a_isBndryGhostCell(cellId)) {
2200 const MInt stgRestartId = stgId2stgRestartIdG[cntG++];
2201 MFloat distance = 0;
2202 for(MInt d = 0; d < nDim; ++d) {
2203 distance += POW2(a->a_coordinate(cellId, d) - tempStgGCoord[noStgGhostCellsGlobal * d + stgRestartId]);
2204 }
2205 if(distance > epsDistance) TERMM(-1, "");
2206 } else {
2207 const MInt stgRestartId = stgId2stgRestartIdG[cntG++];
2208 MFloat distance = 0;
2209 for(MInt d = 0; d < nDim; ++d) {
2210 distance += POW2(a->a_coordinate(cellId, d) - tempStgGCoord[noStgGhostCellsGlobal * d + stgRestartId]);
2211 }
2212 if(distance > epsDistance) TERMM(-1, "");
2213 }
2214 }
2215 } // if(stgIOCoordinates==1)
2216
2217 // Read variables and populate m_stgLVariables
2218 ScratchSpace<MFloat> tempStgVars(noStgInternalCellsGlobal, AT_, "tempStgVars");
2219 ScratchSpace<MFloat> tempStgGVars(noStgGhostCellsGlobal, AT_, "tempStgGVars");
2220
2221 for(MInt var = 0; var < STG::noStgVars; ++var) {
2222 parallelIo.setOffset(noStgInternalCellsGlobal, 0);
2223 parallelIo.readArray(tempStgVars.getPointer(), (stgPrefix + std::to_string(var)).c_str());
2224 parallelIo.setOffset(noStgGhostCellsGlobal, 0);
2225 parallelIo.readArray(tempStgGVars.getPointer(), (stgPrefix + std::to_string(var)).c_str());
2226 cnt = 0, cntG = 0;
2227 for(typename Accessor::nDim_citerator it = a->iterateAll(); it != a->iterateAll_nDim_citerator_end(); ++it) {
2228 const MInt stgId = a->getStgId(it);
2229 const MInt cellId = a->getCellId(it);
2230
2231 if(m_solver->a_isBndryGhostCell(cellId)) {
2232 m_stgLVariables[var][stgId] = tempStgGVars[stgId2stgRestartIdG[stgId]];
2233 } else {
2234 m_stgLVariables[var][stgId] = tempStgVars[stgId2stgRestartId[stgId]];
2235 }
2236 }
2237 }
2238#endif
2239
2241
2242 } else if(stgIOCoordinates == 2) {
2243 /* All stgLocal ranks read all data; first read list of globalIds and coordinates;
2244 * loop through own stg cells and find match by means of a spatial search; check if
2245 * corresponding globalIds match; create map of own stgId to restartStgId
2246 */
2247 if(m_stgLocal) {
2248 ParallelIo parallelIo((filename.str()).c_str(), PIO_READ, m_commStg);
2249
2250 ParallelIo::size_type noStgInternalCellsGlobal =
2251 parallelIo.getArraySize((stgPrefix + "globalIdsInternal").c_str());
2252 std::vector<MInt> globalIdsInternalGlobal(noStgInternalCellsGlobal);
2253 std::vector<MInt> isGhostGlobal(noStgInternalCellsGlobal);
2254 parallelIo.setOffset(noStgInternalCellsGlobal, 0);
2255 parallelIo.readArray(globalIdsInternalGlobal.data(), (stgPrefix + "globalIdsInternal").c_str());
2256 parallelIo.readArray(isGhostGlobal.data(), (stgPrefix + "isGhost").c_str());
2257
2258 ScratchSpace<MFloat> tempStgCoord(noStgInternalCellsGlobal * nDim, AT_, "tempStgCoord");
2259 for(MInt d = 0; d < nDim; ++d) {
2260 std::stringstream varName;
2261 varName << stgPrefix + "x" + std::to_string(d);
2262 parallelIo.readArray(&tempStgCoord[noStgInternalCellsGlobal * d], (varName.str()).c_str());
2263 }
2264
2265 MInt myrank = 0;
2266 MPI_Comm_rank(m_commStg, &myrank);
2267#ifndef NDEBUG
2268 for(MInt i = 0; i < noStgInternalCellsGlobal; ++i) {
2269 std::cout << "Start loadStg:myrank=" << myrank << ": (" << globalIdsInternalGlobal[i] << "/" << isGhostGlobal[i]
2270 << ") : " << tempStgCoord[noStgInternalCellsGlobal * 0 + i] << "|"
2271 << tempStgCoord[noStgInternalCellsGlobal * 1 + i] << "|"
2272 << tempStgCoord[noStgInternalCellsGlobal * 2 + i] << std::endl;
2273 }
2274#endif
2275
2276 m_log << "Building up kd-tree..." << std::endl;
2277
2278 // first create kd tree for internal cells
2279 std::vector<Point<3>> donorPoints;
2280 for(MInt stgRestartId = 0; stgRestartId < noStgInternalCellsGlobal; stgRestartId++) {
2281 Point<3> pt(tempStgCoord[noStgInternalCellsGlobal * 0 + stgRestartId],
2282 tempStgCoord[noStgInternalCellsGlobal * 1 + stgRestartId],
2283 tempStgCoord[noStgInternalCellsGlobal * 2 + stgRestartId], stgRestartId);
2284 donorPoints.push_back(pt);
2285 }
2286
2287 // build up the tree and fill it
2288 auto* donorTree = new KDtree<nDim>(donorPoints);
2289
2290 m_log << "Building up kd-tree... FINISHED!" << std::endl;
2291
2292 // Loop over all own stg cells
2293 constexpr MFloat epsDistance = 1e-5; // TODO labels:FV make it dependent on current cell size
2294 std::vector<MInt> stgId2stgRestartId(a->sizeStg());
2295 for(typename Accessor::nDim_citerator it = a->iterateAll(); it != a->iterateAll_nDim_citerator_end(); ++it) {
2296 const MInt stgId = a->getStgId(it);
2297 const MInt cellId = a->getCellId(it);
2298 MFloat distance = 0;
2299 MInt globalId;
2300 MInt stgRestartId;
2301 if(m_solver->a_isPeriodic(cellId)
2302 || (m_solver->a_isBndryGhostCell(cellId)
2303 && m_solver->a_isPeriodic(a->m_stgToCellId[a->m_stgGhost2stgBndry[stgId]]))) {
2304 if(m_solver->grid().periodicCartesianDir(0) > 0 || m_solver->grid().periodicCartesianDir(1) > 0)
2305 TERMM(1, "Periodicity only allowed in z-direction!");
2306 Point<3> pt(a->a_coordinate(cellId, 0),
2307 a->a_coordinate(cellId, 1),
2308 a->a_coordinate(cellId, 2) + m_solver->grid().periodicCartesianLength(2));
2309 stgRestartId = donorTree->nearest(pt, distance);
2310 if(distance > epsDistance) {
2311 Point<3> pt2(a->a_coordinate(cellId, 0),
2312 a->a_coordinate(cellId, 1),
2313 a->a_coordinate(cellId, 2) - m_solver->grid().periodicCartesianLength(2));
2314 stgRestartId = donorTree->nearest(pt2, distance);
2315 if(distance > epsDistance) TERMM(1, "");
2316 }
2317 } else {
2318 Point<3> pt(a->a_coordinate(cellId, 0), a->a_coordinate(cellId, 1), a->a_coordinate(cellId, 2));
2319
2320 stgRestartId = donorTree->nearest(pt, distance);
2321 }
2322
2323 if(m_solver->a_isBndryGhostCell(cellId)) {
2324 if(!isGhostGlobal[stgRestartId]) {
2325 TERMM(-1, "In current computation this cell is a ghost cell, but in restart file it is not!");
2326 }
2327 const MInt stgBndry = a->m_stgGhost2stgBndry[stgId];
2328 globalId = m_solver->c_globalId(a->m_stgToCellId[stgBndry]);
2329 } else {
2330 globalId = m_solver->c_globalId(cellId);
2331 }
2332 if(globalIdsInternalGlobal[stgRestartId] != globalId) {
2333 std::cout << "FAILED (" << globalId << "/" << m_solver->a_isBndryGhostCell(cellId) << "); "
2334 << globalIdsInternalGlobal[stgRestartId] << "; " << m_solver->a_isBndryGhostCell(cellId) << "; "
2335 << m_solver->a_isPeriodic(cellId) << "; " << a->a_coordinate(cellId, 0) << "|"
2336 << a->a_coordinate(cellId, 1) << "|" << a->a_coordinate(cellId, 2) << " || "
2337 << tempStgCoord[noStgInternalCellsGlobal * 0 + stgRestartId] << "|"
2338 << tempStgCoord[noStgInternalCellsGlobal * 1 + stgRestartId] << "|"
2339 << tempStgCoord[noStgInternalCellsGlobal * 2 + stgRestartId] << std::endl;
2340 TERMM(-1, "");
2341 }
2342 if(distance > epsDistance) {
2343 std::cout << myrank << ":FAILED2 (" << globalId << "/" << m_solver->a_isBndryGhostCell(cellId) << "); "
2344 << distance << "; " << a->a_coordinate(cellId, 0) << "|" << a->a_coordinate(cellId, 1) << "|"
2345 << a->a_coordinate(cellId, 2) << " || " << tempStgCoord[noStgInternalCellsGlobal * 0 + stgRestartId]
2346 << "|" << tempStgCoord[noStgInternalCellsGlobal * 1 + stgRestartId] << "|"
2347 << tempStgCoord[noStgInternalCellsGlobal * 2 + stgRestartId] << std::endl;
2348 TERMM(-1, "");
2349 }
2350 stgId2stgRestartId[stgId] = stgRestartId;
2351 }
2352
2353 // Read variables and populate m_stgLVariables
2354 ScratchSpace<MFloat> tempStgVars(noStgInternalCellsGlobal, AT_, "tempStgVars");
2355 for(MInt var = 0; var < STG::noStgVars; ++var) {
2356 parallelIo.readArray(tempStgVars.getPointer(), (stgPrefix + std::to_string(var)).c_str());
2357 for(typename Accessor::nDim_citerator it = a->iterateAll(); it != a->iterateAll_nDim_citerator_end(); ++it) {
2358 const MInt stgId = a->getStgId(it);
2359 m_stgLVariables[var][stgId] = tempStgVars[stgId2stgRestartId[stgId]];
2360 }
2361 }
2362 } // if (m_stgLocal)
2363 } // else if (stgIOCoordinates==2)
2364 //}//!m_cutOff
2365}
2366
2367#endif // STG_H_
void mAlloc(T *&a, const MLong N, const MString &objectName, MString function)
allocates memory for one-dimensional array 'a' of size N
Definition: alloc.h:173
Definition: fvstg.h:88
SolverType *const m_solver
Definition: fvstg.h:152
MInt getNghbr(const nDim_citerator &it_, MInt dir) const
nDim_iterator_t< std::vector< MInt >::const_iterator, SolverType > nDim_citerator
Definition: fvstg.h:120
friend Derived
Definition: fvstg.h:97
virtual MInt domainId(MInt, MInt) const =0
Derived * d()
Definition: fvstg.h:106
virtual MFloat a_coordinate(MInt, MInt) const =0
MInt getStgId(const nDim_citerator &it_) const
nDim_citerator iterateB1_nDim_citerator_end()
std::vector< MInt >::iterator iterator
Definition: fvstg.h:101
std::vector< MInt >::const_iterator const_iterator
Definition: fvstg.h:102
virtual MFloat & PInfinity() const =0
nDim_citerator iterateB1()
MInt m_noBcCells
Definition: fvstg.h:163
Storage m_bcStgId
Definition: fvstg.h:159
Storage m_stgToCellId
Definition: fvstg.h:157
MInt sizeBC() const
Definition: fvstg.h:116
MInt getNghbrStg(const nDim_citerator &it_, MInt dir) const
SolverType_ SolverType
Definition: fvstg.h:92
virtual MFloat & UInfinity() const =0
nDim_citerator iterateSlopes_nDim_citerator_end()
virtual MFloat & a_pvariable(MInt, MInt)=0
Storage m_stgId
Definition: fvstg.h:161
MInt m_stgSize
Definition: fvstg.h:165
nDim_citerator iterateAll_nDim_citerator_end()
std::vector< MInt > Storage
Definition: fvstg.h:100
MInt getCellId(const nDim_citerator &it_) const
Accessor(SolverType_ *solver, const MPI_Comm &commStg)
Definition: fvstg.h:96
MPI_Comm m_commStg
Definition: fvstg.h:153
static constexpr const MInt m_nDim
Definition: fvstg.h:154
virtual MFloat & rhoInfinity() const =0
MInt sizeStg() const
Definition: fvstg.h:117
nDim_citerator iterateAll()
nDim_citerator iterateSlopes()
nDim_citerator(const AccessorStructured *parent, const MInt *start, const MInt *end)
Definition: fvstg.h:426
MBool operator==(const self_type &rhs) const
Definition: fvstg.h:507
value_type getCellId() const
Definition: fvstg.h:437
const self_type * operator->()
Definition: fvstg.h:506
value_type getNghbr(MInt dir) const
Definition: fvstg.h:442
self_type operator++(MInt)
Definition: fvstg.h:450
std::random_access_iterator_tag iterator_category
Definition: fvstg.h:418
value_type getNghbrStg(MInt dir) const
Definition: fvstg.h:445
MBool operator!=(const self_type &rhs) const
Definition: fvstg.h:510
const MInt *const ijk_end
Definition: fvstg.h:521
value_type getijk(MInt dim) const
Definition: fvstg.h:439
value_type getStgId() const
Definition: fvstg.h:438
const MInt *const ijk_start
Definition: fvstg.h:520
const MInt & operator*() const
Definition: fvstg.h:505
const AccessorStructured * p
Definition: fvstg.h:514
nDim_citerator nDim_citerator_invalid
Definition: fvstg.h:589
MFloat a_coordinate(MInt cellId, MInt dim) const override
Definition: fvstg.h:572
const MInt m_noGhostLayers
Definition: fvstg.h:587
MInt getNghbrStg(const nDim_citerator &it_, MInt dir) const
Definition: fvstg.h:568
nDim_citerator iterateSlopes()
Solver specific implementations of the interface defined in base class.
Definition: fvstg.h:532
MInt cellIndex(const MInt *const ijk_) const
Definition: fvstg.h:400
const MInt *const m_stgBoxSize
Definition: fvstg.h:586
MFloat & rhoInfinity() const override
Definition: fvstg.h:576
const MInt *const m_nCells
Definition: fvstg.h:584
MFloat & a_pvariable(MInt cellId, MInt varId) override
Definition: fvstg.h:573
const MInt *const m_end
Definition: fvstg.h:582
MInt domainId() const
Definition: fvstg.h:410
nDim_citerator nDim_citerator_begin() const
Definition: fvstg.h:527
MFloat & UInfinity() const override
Definition: fvstg.h:574
nDim_citerator iterateB1_nDim_citerator_end()
Definition: fvstg.h:548
AccessorStructured(const MInt *const start, const MInt *const end, const MInt *const nCells, const MInt *const stgBoxSize, MInt noGhostLayers, FvStructuredSolver< 3 > *solver, const MPI_Comm commStg)
Definition: fvstg.h:348
nDim_citerator iterateAll()
Definition: fvstg.h:550
MInt end(MInt dim) const
Definition: fvstg.h:408
nDim_citerator iterateB1()
Definition: fvstg.h:541
MFloat & PInfinity() const override
Definition: fvstg.h:575
MInt getStgId(const nDim_citerator &it_) const
Definition: fvstg.h:562
MInt getCellId(const nDim_citerator &it_) const
Helper functions for nDim_citerator as declared in base class.
Definition: fvstg.h:558
MInt cellIndexBC(const MInt *const ijk_) const
Definition: fvstg.h:404
MInt cellIndexBC(const MInt i, const MInt j, const MInt k) const
Definition: fvstg.h:401
nDim_citerator nDim_citerator_end() const
Definition: fvstg.h:528
const MInt *const m_start
Definition: fvstg.h:580
static constexpr const MInt map_[6][3]
Definition: fvstg.h:591
MInt domainId(MInt, MInt) const override
Definition: fvstg.h:411
MInt getNghbr(const nDim_citerator &it_, MInt dir) const
Definition: fvstg.h:565
nDim_citerator nDim_citerator_begin(const MInt *start, const MInt *end) const
Definition: fvstg.h:526
nDim_citerator iterateAll_nDim_citerator_end()
Definition: fvstg.h:555
nDim_citerator iterateSlopes_nDim_citerator_end()
Definition: fvstg.h:539
MInt cellIndex(const MInt i, const MInt j, const MInt k) const
Definition: fvstg.h:399
MInt start(MInt dim) const
Definition: fvstg.h:407
MInt domainId(MInt, MInt) const override
Definition: fvstg.h:311
MInt getCellId(const nDim_citerator &it_) const
Helper functions for the nDim_citerator as declared in base class.
Definition: fvstg.h:297
MInt getGhostIdFromStgId(MInt stgId) const
Definition: fvstg.h:279
nDim_citerator iterateSlopes()
Solver specific implementation of the interface defined in base class.
Definition: fvstg.h:287
nDim_citerator iterateAll_nDim_citerator_end()
Definition: fvstg.h:294
MInt getStgId(const nDim_citerator &it_) const
Definition: fvstg.h:298
MInt getNghbr(const nDim_citerator &it_, MInt dir) const
Definition: fvstg.h:299
AccessorUnstructured(MInt *sortedBndryCellIds, MInt size, SolverType *solver, const MPI_Comm &commStg, MBool cutOff)
Definition: fvstg.h:182
MInt domainId() const
Definition: fvstg.h:310
nDim_citerator iterateB1()
Definition: fvstg.h:290
nDim_citerator iterateAll()
Definition: fvstg.h:293
nDim_citerator iterateB1_nDim_citerator_end()
Definition: fvstg.h:291
MFloat & UInfinity() const override
Definition: fvstg.h:313
std::vector< MInt > m_stgBndry2stgGhost
Definition: fvstg.h:322
MFloat a_coordinate(MInt cellId, MInt dim) const override
Definition: fvstg.h:309
MFloat & rhoInfinity() const override
Definition: fvstg.h:315
MInt ** m_nghbrMapping
Definition: fvstg.h:318
nDim_citerator iterateSlopes_nDim_citerator_end()
Definition: fvstg.h:288
MFloat & PInfinity() const override
Definition: fvstg.h:314
MInt getNghbrMapping(MInt stgId, MInt nghbr) const
Definition: fvstg.h:277
MFloat & a_pvariable(MInt cellId, MInt varId) override
Definition: fvstg.h:312
std::vector< MInt > m_stgGhost2stgBndry
Definition: fvstg.h:323
SysEqn::PrimitiveVariables * PV
MInt a_noCells() const
Returns the number of cells.
MFloat & a_coordinate(const MInt cellId, const MInt dir)
Returns the coordinate of the cell from the fvcellcollector cellId for dimension dir.
MFloat & a_pvariable(const MInt cellId, const MInt varId)
Returns primitive variable v of the cell cellId for variables varId.
Base class of the structured solver.
Definition: fvstg.h:609
MBool m_cutOff
Definition: fvstg.h:778
MBool m_stgSupersonic
Definition: fvstg.h:739
MFloat m_sutherlandPlusOne
Definition: fvstg.h:709
const MFloat epss
Definition: fvstg.h:781
MBool m_stgInitialStartup
Definition: fvstg.h:726
MFloat m_stgDelta99Inflow
Definition: fvstg.h:725
MFloat * m_stgVbStart
Definition: fvstg.h:748
MInt m_stgMyRank
Definition: fvstg.h:747
void getInflowStartEnd(MFloat *, MFloat *)
Definition: fvstg.cpp:1000
MFloat ** m_stgEddieCoverage
Definition: fvstg.h:768
typename SolverTraits< nDim, SolverTypeL >::SolverType SolverTypeL_
Definition: fvstg.h:611
MFloat getCellSize(typename Accessor::nDim_citerator it)
Definition: fvstg.cpp:1895
MBool m_preliminaryRans2D
Definition: fvstg.h:765
MBool m_cylinderTransformation
Definition: fvstg.h:776
MFloat * m_inflowEnd
Definition: fvstg.h:714
MBool m_newStgMethod
Definition: fvstg.h:766
MFloat * m_stgRSTFactors
Definition: fvstg.h:746
void setVb(MFloat *, MFloat *)
Definition: fvstg.cpp:1071
void extrapolateToGX(typename Accessor::nDim_citerator)
Definition: fvstg.cpp:2273
MInt m_stgDir
Definition: fvstg.h:742
MFloat m_BLEddieFraction
Definition: fvstg.h:761
MInt * m_stgGlobalNoPeriodicLocations
Definition: fvstg.h:773
void calcTotalFluctuationCholesky()
Definition: fvstg.cpp:2053
std::mt19937_64 & randomEddyPosGenerator()
Definition: fvstg.h:801
MFloat generate_rand()
Definition: fvstg.cpp:80
void setSTGProperties()
std::vector< MInt > * m_stgPeriodicCellId
Definition: fvstg.h:769
void printSTGSummary()
Definition: fvstg.cpp:2029
void bc7909()
Reformulated Synthetic Turbulence Generation Synthetic Turbulence Generation Method Computes fluctuat...
Definition: fvstg.cpp:788
MBool m_initialRange
Definition: fvstg.h:716
const MFloat eps
Definition: fvstg.h:782
MFloat ** m_stgLVariables
Definition: fvstg.h:753
MFloat m_stgBLT1
Definition: fvstg.h:722
MInt m_solverId
Definition: fvstg.h:702
void calcReynoldsStressConvVelLengthScale()
Definition: fvstg.cpp:1682
MBool m_stgRootRank
Definition: fvstg.h:721
void loadStg()
Definition: fvstg.cpp:740
const MFloat c_mu
Definition: fvstg.h:786
MFloat generate_rand_normalDist()
Definition: fvstg.cpp:103
MInt m_stgMaxNoEddies
Definition: fvstg.h:730
MBool m_freeStreamTurbulence
Definition: fvstg.h:756
MFloat m_stgBLT2
Definition: fvstg.h:723
MFloat m_sutherlandConstant
Definition: fvstg.h:708
MFloat m_stgExple
Definition: fvstg.h:731
void advanceEddies()
Definition: fvstg.cpp:1964
Accessor * LES
Definition: fvstg.h:793
Accessor * a
Definition: fvstg.h:792
MInt m_stgWallNormalDir
Definition: fvstg.h:741
MInt m_commStgMyRank
Definition: fvstg.h:720
MFloat * m_stgLengthFactors
Definition: fvstg.h:745
MFloat get_angle(MFloat y, MFloat z)
Definition: fvstg.cpp:109
MFloat generate_rand_weighted()
Definition: fvstg.cpp:96
const MInt m_bcId
Definition: fvstg.h:703
MFloat m_stgWallEnd
Definition: fvstg.h:751
MBool m_stgSubSup
Definition: fvstg.h:738
void getBoundaryLayerThickness()
Definition: fvstg.cpp:846
MFloat m_stgEddieDistribution
Definition: fvstg.h:732
MInt m_noStgLCells
Definition: fvstg.h:733
MInt m_stgNoEddieProperties
Definition: fvstg.h:727
void determinePeriodicCells()
Definition: fvstg.cpp:594
void loadStg(SolverTraits< nDim, MAIA_FINITE_VOLUME > *)
MFloat * m_stgVbStartFS
Definition: fvstg.h:750
MFloat * m_stgMaxVel
Definition: fvstg.h:752
std::vector< MFloat > m_stgWallNormalLocations
Definition: fvstg.h:770
MFloat m_uuFS
Definition: fvstg.h:757
MInt m_stgFaceNormalDir
Definition: fvstg.h:740
friend void saveStg(std::map< MInt, self * >, SolverTypeL_ *)
std::vector< MFloat > m_stgGlobalWallNormalLocations
Definition: fvstg.h:771
MFloat * m_inflowStart
Definition: fvstg.h:713
static constexpr const MInt m_nDim
Definition: fvstg.h:700
void apply()
Definition: fvstg.cpp:2287
MInt m_stgShapeFunction
Definition: fvstg.h:736
MPI_Comm m_commStg
Definition: fvstg.h:718
MFloat m_maxStreamwiseLengthscale
Definition: fvstg.h:767
MInt m_randomEddySeed
Definition: fvstg.h:800
MInt m_wallDir
Definition: fvstg.h:743
std::mt19937_64 m_PRNGEddy
Definition: fvstg.h:799
void readRANSProfileStg()
write RANSValues from solver to stgLVariables (fully coupled) /author Jannik Borgelt
Definition: fvstg.cpp:1140
MFloat ** m_stgEddies
Definition: fvstg.h:728
MInt m_stgGlobalNoWallNormalLocations
Definition: fvstg.h:772
MFloat m_SijSijFS
Definition: fvstg.h:760
FvSysEqnRANS< nDim, RANSModelConstants< RANS_SA_DV > >::PrimitiveVariables * PV
Definition: fvstg.h:706
MFloat m_stgBLT3
Definition: fvstg.h:724
MFloat * m_stgEddyStrength
Definition: fvstg.h:729
MBool m_isotropicTurbulence
Definition: fvstg.h:763
MBool m_stgCreateNewEddies
Definition: fvstg.h:735
void calcStrainRate()
Definition: fvstg.cpp:1472
MFloat m_vvFS
Definition: fvstg.h:758
MInt * m_stgPeriodicIndex
Definition: fvstg.h:774
const MFloat a1
Definition: fvstg.h:787
MFloat m_wwFS
Definition: fvstg.h:759
MBool m_stgEddieLengthScales
Definition: fvstg.h:737
MFloat * m_stgVbEnd
Definition: fvstg.h:749
void generateNewEddies(MFloat *, MFloat *)
Definition: fvstg.cpp:1929
MBool m_stgLocal
Definition: fvstg.h:734
MInt m_periodicDir
Definition: fvstg.h:744
void calcEddieCoverage()
Definition: fvstg.cpp:2206
const MFloat aniso
Definition: fvstg.h:790
SolverTypeL_ * m_solver
Definition: fvstg.h:701
MInt m_commStgRoot
Definition: fvstg.h:719
const MFloat epsl
Definition: fvstg.h:783
void init(MInt commStgRoot)
Definition: fvstg.cpp:505
SolverTypeL_ * solver() const
Definition: fvstg.h:685
MBool m_preliminary
Definition: fvstg.h:764
const MFloat timsm
Definition: fvstg.h:789
void extrapolateToBoundary(typename Accessor::nDim_citerator)
Definition: fvstg.cpp:2242
MBool m_zonal
Definition: fvstg.h:711
This class is a ScratchSpace.
Definition: scratch.h:758
MPI_Comm mpiComm() const
Return the MPI communicator used by this solver.
Definition: solver.h:380
MInt solverId() const
Return the solverId.
Definition: solver.h:425
value_type getStgId() const
Definition: fvstg.h:74
nDim_iterator_t(base_iterator it, const AccessorUnstructured< SolverType > *parent)
Definition: fvstg.h:68
nDim_iterator_t()=default
typename base_iterator::value_type value_type
Definition: fvstg.h:64
value_type getCellId() const
Definition: fvstg.cpp:20
const AccessorUnstructured< SolverType > * p
Definition: fvstg.h:79
value_type getNghbr(MInt dir) const
Definition: fvstg.cpp:13
SolverType
Definition: enums.h:22
@ MAIA_FINITE_VOLUME
Definition: enums.h:23
@ MAIA_STRUCTURED
Definition: enums.h:40
constexpr Real POW2(const Real x)
Definition: functions.h:119
void saveStg(std::map< MInt, MSTG< nDim, SolverTypeR, SolverTypeL > * > stgBC, typename SolverTraits< nDim, SolverTypeL >::SolverType *solver)
MInt globalTimeStep
InfoOutFile m_log
int32_t MInt
Definition: maiatypes.h:62
uint32_t MUint
Definition: maiatypes.h:63
std::basic_string< char > MString
Definition: maiatypes.h:55
double MFloat
Definition: maiatypes.h:52
bool MBool
Definition: maiatypes.h:58
MInt id
Definition: maiatypes.h:71
int MPI_Barrier(MPI_Comm comm, const MString &name)
same as MPI_Barrier
int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgatherv
int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Gatherv
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allreduce
int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Gather
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgather
void const MInt cellId
Definition: collector.h:239
MFloat distance(const MFloat *a, const MFloat *b)
Definition: maiamath.h:249
std::vector< T > mpiExchangePointToPoint(const T *const sendBuffer, const MInt *const snghbrs, const MInt nosnghbrs, const MInt *const sendcounts, const MInt *const rnghbrs, const MInt nornghbrs, const MPI_Comm &mpi_comm, const MInt domainId, const MInt dataBlockSize, MInt *const recvcounts_=nullptr, MInt *const rdispls_=nullptr)
Definition: mpiexchange.h:1057
PARALLELIO_DEFAULT_BACKEND ParallelIo
Definition: parallelio.h:292
Definition: kdtree.h:73
MInt nearest(Point< DIM > pt, MFloat &dist)
Definition: kdtree.h:199
Definition: pointbox.h:20
Definition: contexttypes.h:19
define array structures