MAIA bb96820c
Multiphysics at AIA
Loading...
Searching...
No Matches
dgcartesianslices.h
Go to the documentation of this file.
1// Copyright (C) 2024 The m-AIA AUTHORS
2//
3// This file is part of m-AIA (https://git.rwth-aachen.de/aia/m-AIA/m-AIA)
4//
5// SPDX-License-Identifier: LGPL-3.0-only
6
7#ifndef DGSLICES_H_
8#define DGSLICES_H_
9
10#include <numeric>
11#include "COMM/mpioverride.h"
12#include "typetraits.h"
13
14// Predeclaration of auxiliary claas
15class DgSliceSeries;
16template <MInt nDim, class SysEqn>
18
26template <MInt nDim, class SysEqn>
27class DgSlices {
28 public:
29 explicit DgSlices(DgCartesianSolver<nDim, SysEqn>& solver) : m_solver(solver) {}
30 void setProperties();
31 void init();
32 void save(const MBool isFinalTimeStep);
33 MBool enabled() const { return m_enabled; }
34
35 private:
36 MPI_Comm mpiComm() const { return m_solver.mpiComm(); }
37 MInt solverId() const { return m_solver.m_solverId; }
38 MInt domainId() const { return m_solver.domainId(); }
39
40 void updateGridFile(DgSliceSeries& sliceSeries, const MString& prefix = "");
41
42 void init(DgSliceSeries& sliceSeries);
43 void save(DgSliceSeries& sliceSeries, const MBool isFinalTimeStep);
44
45 // Dimension var to ensure that slice is created only from 3D to 2D
46 static constexpr const MInt m_dim = 3;
47
48 // Store access to DG solver
50 // Enables functionality of this class
52 // Holds all properties and buffers for each slice
53 std::vector<DgSliceSeries> m_sliceSeries{};
54
56 std::vector<MInt> m_sliceVarIds{};
58 std::vector<MInt> m_noSliceVars{};
60 std::vector<std::vector<MString>> m_sliceVarNames{};
61};
62
63
64// auxiliary class contains to store slice information
66 public:
67 DgSliceSeries(const MString& axis, const MFloat intercept, const MInt writeInterval, const MString& gridFileName,
68 const MInt fileNo)
69 : m_axis(axis),
70 m_intercept(intercept),
71 m_writeInterval(writeInterval),
72 m_gridFileName(gridFileName),
73 m_fileNo(fileNo) {}
74
75 // Checks for initialization
76 MBool m_isInit = false;
77
78 // Settings of slice
79 // Axis which represents the normal vector of the slice layer
81 // Coordinate of slice
83 // Number of how many files will be created
85 // Name of the 2D slice grid file
87 // Number of slice
88 const MInt m_fileNo = -1;
89
90 // MPI Comm for current slice
91 MPI_Comm m_mpiComm = MPI_COMM_NULL;
92
93 // Buffer
94 // Number of hilbertIds in current slice
96 // Number of local nodes (also include nodes for dummy elements)
98 // Number of cells belong to slice
100 // Holds coordinates of elements which are relevant to the slice
101 std::vector<MFloat> m_coordinates{};
102 // Store polyDeg of every slice cell
103 std::vector<MInt> m_polyDegs{};
104 // Number of cells per HilbertId (for file writing)
105 std::vector<MInt> m_cellIdsPerHilbertId{};
106 // Offset for polyDeg computed by slice hilbertId
107 std::vector<MInt> m_cellIdsOffset{};
108 // elementIds in the slice are needed to determine data points
109 std::vector<MInt> m_elementIds{};
110 // holds position in internal array for each element
111 std::vector<MInt> m_elementOffset{};
112 // holds number of nodes for each element
113 std::vector<MInt> m_elementNodes{};
114 // Number of elements per slice hilbertId (for file writing)
115 std::vector<MInt> m_elementIdsPerHilbertId{};
116 // Number of nodes per slice hilbetrtId (number of local data)
117 std::vector<MInt> m_elementNodesPerHilbertIds{};
118 // Offset for local nodes computed by slice hilbertId
119 std::vector<MInt> m_elementNodesHilbertOffset{};
120};
121
122
130template <MInt nDim, class SysEqn>
132 TRACE();
133
134 using namespace std;
135
136 if(Context::propertyExists("sliceEnabled", solverId())) {
137 // Read properties and save them
151 m_enabled = Context::getSolverProperty<MBool>("sliceEnabled", solverId(), AT_);
152
153 // Leave when not enabled
154 if(!m_enabled) {
155 return;
156 }
157 }
158
159 // TODO labels:DG,PP unify properties for DG-slices and PP-slices!
160 m_log << "Set properties for DG Slice class...";
161
162 // Check for default properties
163
176 MString gridFileNameDefault = "grid.slice";
177 gridFileNameDefault = Context::getSolverProperty<MString>("sliceGridFileName", solverId(), AT_, &gridFileNameDefault);
178
190 MString axisDefault;
191 if(Context::propertyExists("sliceAxis", solverId())) {
192 axisDefault = Context::getSolverProperty<MString>("sliceAxis", solverId(), AT_, &axisDefault);
193 }
194
206 MFloat interceptDefault = 0.0;
207 interceptDefault = Context::getSolverProperty<MFloat>("sliceIntercept", solverId(), AT_, &interceptDefault);
208
221 MInt writeIntervalDefault = -1;
222 if(m_solver.m_restartInterval > 0) {
223 writeIntervalDefault =
224 Context::getSolverProperty<MInt>("sliceWriteInterval", solverId(), AT_, &m_solver.m_restartInterval);
225 } else {
226 writeIntervalDefault =
227 Context::getSolverProperty<MInt>("sliceWriteInterval", solverId(), AT_, &writeIntervalDefault);
228 }
229
230 // read properties for all slice numbers
231 MInt noFiles = 0;
232 MBool sliceFound = false;
233
241 // Check if sliceAxis or sliceIntercept property for next slice exists
242 if(Context::propertyExists("sliceAxis_" + to_string(noFiles), solverId())
243 || Context::propertyExists("sliceIntercept_" + to_string(noFiles), solverId())) {
244 sliceFound = true;
245 }
246
247 // Check if a full set of default properties are set, if true write one slice file
248 if(Context::propertyExists("sliceAxis", solverId()) && writeIntervalDefault > 0) {
249 sliceFound = true;
250 }
251
252 // Leave method if not all properties are set
253 if(!sliceFound) {
254 m_enabled = false;
255 m_log << "not all necessary slice properties are set!" << std::endl;
256 return;
257 }
258
259 // Search for slices in properties and create slice objects
260 while(sliceFound) {
261 // Check for non default values of properties
267 MString gridFileName = Context::getSolverProperty<MString>("sliceGridFileName_" + to_string(noFiles), solverId(),
268 AT_, &gridFileNameDefault);
269
270 MString axis =
271 Context::getSolverProperty<MString>("sliceAxis_" + to_string(noFiles), solverId(), AT_, &axisDefault);
272
278 MFloat intercept =
279 Context::getSolverProperty<MFloat>("sliceIntercept_" + to_string(noFiles), solverId(), AT_, &interceptDefault);
280
286 MInt writeInterval = Context::getSolverProperty<MInt>("sliceWriteInterval_" + to_string(noFiles), solverId(), AT_,
287 &writeIntervalDefault);
288
289 // Check for incorrect properties
290 if(writeInterval < 1) {
291 TERMM(1, "WriteInterval for Slice no. " + to_string(noFiles) + " is smaller than 1.");
292 }
293
294
295 if(axis != "x" && axis != "y" && axis != "z") {
296 TERMM(1, "Illegal axis for Slice no. " + to_string(noFiles) + ". Only x, y or z is a valid axis");
297 }
298
299 // Add slice number to grid file name
300 gridFileName += '_' + to_string(noFiles);
301 // Create slice objects
302 m_sliceSeries.emplace_back(axis, intercept, writeInterval, gridFileName, noFiles);
303 noFiles++;
304
305 // Check for noext slice
306 sliceFound = Context::propertyExists("sliceAxis_" + to_string(noFiles), solverId())
307 || Context::propertyExists("sliceIntercept_" + to_string(noFiles), solverId());
308 }
309
310 m_sliceVarIds.clear();
311 m_noSliceVars.clear();
312 m_sliceVarNames.clear();
313 m_solver.getSolverSamplingProperties(m_sliceVarIds, m_noSliceVars, m_sliceVarNames, "Slice");
314
315 // @ansgar_slice TODO labels:DG not required at the moment
316 // Allocate additional storage for the sampling variables if required
317 // solver().initSolverSamplingVariables(m_solverSamplingVarIds, m_noSolverSamplingVars);
318
319 // Initalization finished
320 m_enabled = true;
321 m_log << "done!" << std::endl;
322
323 // Abort to avoid usage of dg slice in 2D case
324 IF_CONSTEXPR(nDim == 2) {
325 if(m_enabled) {
326 TERMM(1, "Can not use DG Slice in 2D calculation.");
327 }
328 }
329}
330
331
338template <MInt nDim, class SysEqn>
340 TRACE();
341 // Return if dg slice is deactivated
342 if(!m_enabled) {
343 return;
344 }
345
346 // Call init method for all slice objects
347 for(MUint i = 0; i < m_sliceSeries.size(); i++) {
348 init(m_sliceSeries[i]);
349 }
350}
351
352
375template <MInt nDim, class SysEqn>
377 TRACE();
378
379 // Reset variables, needed when DgSlices::init() is called multiple times, e.g., DLB enabled
380 // Reset noLocalNodes
381 sliceSeries.m_localNoNodes = 0;
382
384 // Step 1: Determine DG Elements and number of Nodes
386
387 // Create grid file and get slice cells from grid
388 MIntScratchSpace cellIds(m_solver.grid().noInternalCells(), AT_, "cellIds");
389 MIntScratchSpace hilbertInfo(m_solver.grid().noInternalCells() * 3, AT_, "hilbertIds");
390 // Get cellIds for current slice and hilbertId informaiton. The cellIds are used to determine the
391 // DG elements for the slice and the hilbertId information is needed for file creation.
392 const MString gridFilePath = m_solver.outputDir() + sliceSeries.m_gridFileName;
393 m_solver.grid().raw().createGridSlice(sliceSeries.m_axis, sliceSeries.m_intercept, gridFilePath, solverId(),
394 &sliceSeries.m_noCellInSlice, &cellIds[0], &sliceSeries.m_noHilbertIds,
395 &hilbertInfo[0]);
396 sliceSeries.m_elementNodesPerHilbertIds.resize(sliceSeries.m_noHilbertIds);
397 sliceSeries.m_elementIdsPerHilbertId.resize(sliceSeries.m_noHilbertIds);
398
399 m_log << "Initializing DG Slice " << sliceSeries.m_fileNo << " ...";
400 // Search for slice elements if slice cells are found on current domain
401 if(sliceSeries.m_noCellInSlice > 0) {
402 // Convert grid cell-ids into solver cell-ids (only required if there are multiple solvers)
403 if(m_solver.grid().raw().treeb().noSolvers() > 1) {
404 for(MInt i = 0; i < sliceSeries.m_noCellInSlice; i++) {
405 cellIds[i] = m_solver.grid().tree().grid2solver(cellIds[i]);
406 }
407 }
408
409 // Initalize polyDegree vector
410 sliceSeries.m_polyDegs.resize(sliceSeries.m_noCellInSlice);
411 std::fill_n(&sliceSeries.m_polyDegs[0], sliceSeries.m_noCellInSlice, m_solver.m_initPolyDeg);
412
413 // Assume size of element variables
414 const MInt noMaxElements = m_solver.m_elements.size();
415 // Collectors for element data
416 MIntScratchSpace elementIds(noMaxElements, AT_, "elementIds");
417 MIntScratchSpace elementNodes(noMaxElements, AT_, "elementNodes");
418 MIntScratchSpace elementOffset(noMaxElements, AT_, "elementOffset");
419
420 // Total number of coordinates for all elements
421 MInt noCoords = 0;
422 // Assume max number of nodes
423 const MInt noMaxNodes = pow(
424 *std::max_element(&m_solver.m_elements.polyDeg(0), &m_solver.m_elements.polyDeg(0) + m_solver.m_elements.size())
425 + 1,
426 2);
427 MFloatScratchSpace coordinates(noMaxElements * noMaxNodes * m_dim, AT_, "coordinates");
428
429 // Holds coordinates for one element
430 std::array<MFloat, m_dim> nodeCoord;
431 // This loop searches for elements and node coordinates of the slice. The cells are checked in
432 // chunks the same sliceHilbertId to determine the element node offset at the end of this part.
433 MInt noElementsSlice = 0;
434 for(MInt h = 0, cellSum = 0, noNodes = 0, prevNoNodes = 0, sumElements = 0; h < sliceSeries.m_noHilbertIds; h++) {
435 for(MInt c = cellSum, e = 0; (c - cellSum) < hilbertInfo[h * 3 + 1]; e++) {
436 // Check to identify cells which are not an element; These cells are
437 // counted as dummy nodes in the solution file and have the value 0
438
439 // skip slice which do not have a corresponding element
440 if(cellIds[c] < m_solver.m_elements.cellId(e)) {
441 // count dummy nodes to cells which are not elements
442 noNodes += ipow(m_solver.m_initPolyDeg + 1, m_dim - 1);
443 // Go to next cell after counting nodes
444 c++;
445 // Hold element until all dummy elements are counted
446 e--;
447 continue;
448 }
449 // save node coordinates if element is in slice
450 if(cellIds[c] == m_solver.m_elements.cellId(e)) {
451 // Determine number of nodes
452 const MInt noNodes1D = m_solver.m_elements.polyDeg(e) + 1;
453
454 // Tensor to hold node coordinates
455 const MFloatTensor nodeCoordinates(&m_solver.m_elements.nodeCoordinates(e), noNodes1D, noNodes1D, noNodes1D,
456 m_dim);
457 // Loop over all nodes to save the coordinates
458 for(MInt i = 0; i < noNodes1D; i++) {
459 for(MInt j = 0; j < noNodes1D; j++) {
460 if(sliceSeries.m_axis == "x") {
461 nodeCoord[0] = sliceSeries.m_intercept;
462 nodeCoord[1] = nodeCoordinates(0, i, j, 1);
463 nodeCoord[2] = nodeCoordinates(0, i, j, 2);
464 } else if(sliceSeries.m_axis == "y") {
465 nodeCoord[0] = nodeCoordinates(i, 0, j, 0);
466 nodeCoord[1] = sliceSeries.m_intercept;
467 nodeCoord[2] = nodeCoordinates(i, 0, j, 2);
468 } else if(sliceSeries.m_axis == "z") {
469 nodeCoord[0] = nodeCoordinates(i, j, 0, 0);
470 nodeCoord[1] = nodeCoordinates(i, j, 0, 1);
471 nodeCoord[2] = sliceSeries.m_intercept;
472 }
473 // Save coordinate tuple
474 std::copy_n(&nodeCoord[0], m_dim, &coordinates[noCoords]);
475 noCoords += m_dim;
476 }
477 }
478 elementIds[noElementsSlice] = e;
479
480 // Otherwise use current offset for the current element
481 elementOffset[noElementsSlice] = noNodes - prevNoNodes; // sliceSeries.m_localNoNodes;
482
483 // Increase offset based on element polynomial degree
484 const MInt noNodesXD = ipow(m_solver.m_elements.polyDeg(e) + 1, m_dim - 1);
485 noNodes += noNodesXD;
486
487 // Set number of nodes to use later and proceed with next element
488 elementNodes[noElementsSlice] = noNodesXD;
489 noElementsSlice++;
490
491 // save polyDegree (it is easier to collect this data here)
492 sliceSeries.m_polyDegs[c] = m_solver.m_elements.polyDeg(e);
493
494 c++;
495 }
496 }
497 // Save number of elements which have the same hilbertId
498 sliceSeries.m_elementIdsPerHilbertId[h] = noElementsSlice - sumElements;
499
500 // Save number of nodes per hilbert id and total number
501 sliceSeries.m_elementNodesPerHilbertIds[h] = noNodes - prevNoNodes;
502 sliceSeries.m_localNoNodes += sliceSeries.m_elementNodesPerHilbertIds[h];
503 // Save number of evaluated cells, elements and nodes
504 cellSum += hilbertInfo[h * 3 + 1];
505 prevNoNodes = noNodes;
506 sumElements = noElementsSlice;
507 }
508 // Resize variables
509 sliceSeries.m_elementIds.resize(noElementsSlice);
510 sliceSeries.m_elementNodes.resize(noElementsSlice);
511 sliceSeries.m_elementOffset.resize(noElementsSlice);
512 sliceSeries.m_coordinates.resize(noCoords);
513 // Get values from scratches and save them
514 std::copy_n(&elementIds[0], noElementsSlice, &sliceSeries.m_elementIds[0]);
515 std::copy_n(&elementNodes[0], noElementsSlice, &sliceSeries.m_elementNodes[0]);
516 std::copy_n(&elementOffset[0], noElementsSlice, &sliceSeries.m_elementOffset[0]);
517 std::copy_n(&coordinates[0], noCoords, &sliceSeries.m_coordinates[0]);
518 }
519
520 // If only non element cells are found, make m_coordinates non zero length
521 // to create a dummy states scratch
522 if(sliceSeries.m_coordinates.empty()) {
523 sliceSeries.m_coordinates.resize(1);
524 }
525
527 // Step 2: Create MPI Communicator
529
530 using namespace maia;
531 // Set MPI Comm for ranks that are part of slice
532 // Create a new MPI Communicator
533 ScratchSpace<MInt> sliceScratch(globalNoDomains(), AT_, "sliceScratch");
534
535 // Get Rank if domain holds elements in slice
536 MInt rank = -1;
537 if(sliceSeries.m_noCellInSlice > 0) {
538 MPI_Comm_rank(mpiComm(), &rank);
539 }
540
541 // Combine all ranks to get relevant ranks
542 MPI_Allgather(&rank, 1, type_traits<MInt>::mpiType(), &sliceScratch[0], 1, type_traits<MInt>::mpiType(), mpiComm(),
543 AT_, "rank", "sliceScratch[0]");
544
545 // Check for relevant ranks and save them to create the new communicator
546 const MInt noSliceDomains =
547 std::count_if(sliceScratch.begin(), sliceScratch.end(), [](const MInt a) { return a != -1; });
548 MIntScratchSpace sliceDomains(noSliceDomains, AT_, "sliceDomains");
549 MInt position = 0;
550 for(MInt i : sliceScratch) {
551 if(i != -1) {
552 sliceDomains[position] = i;
553 position++;
554 }
555 }
556
557 // Create new point data mpi group
558 MPI_Group globalGroup, localGroup;
559 MPI_Comm_group(mpiComm(), &globalGroup, AT_, "globalGroup");
560 MPI_Group_incl(globalGroup, noSliceDomains, &sliceDomains[0], &localGroup, AT_);
561
562 // Create new communicator and clean up
563 MPI_Comm_create(mpiComm(), localGroup, &sliceSeries.m_mpiComm, AT_, "sliceSeries.m_mpiComm");
564
565 MPI_Group_free(&globalGroup, AT_);
566 MPI_Group_free(&localGroup, AT_);
567
569 // Step 3: Exchange hilbertId data and determine offsets
571
572 if(sliceSeries.m_noCellInSlice > 0) {
573 // Get slice domain
574 MInt sliceDomain = -1;
575 MPI_Comm_rank(sliceSeries.m_mpiComm, &sliceDomain);
576
577 // Determine total no of slice hilbertIds
578 // Cells and elements are sorted by slice hilbertIds and also by domain. So if we have hilbertId
579 // 2 on domain 3 and 5, then the order is determined by domaindId. This information is
580 // distributed globally and the purpose of the next lines.
581 MIntScratchSpace noLocalHilbertIds(noSliceDomains, AT_, "noLocalHilbertIds");
582 noLocalHilbertIds[sliceDomain] = sliceSeries.m_noHilbertIds;
583
584 MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, &noLocalHilbertIds[0], 1, MPI_INT, sliceSeries.m_mpiComm, AT_,
585 "MPI_IN_PLACE", "noLocalHilbertIds[0]");
586
587 // Compute offset for recieve data
588 MIntScratchSpace offsetsRecvData(noSliceDomains, AT_, "offsetsRecvData");
589 offsetsRecvData[0] = 0;
590 for(MInt i = 1; i < noSliceDomains; i++) {
591 offsetsRecvData[i] = offsetsRecvData[i - 1] + noLocalHilbertIds[i - 1] * 3;
592 }
593 MInt noTotalRecvData = offsetsRecvData[noSliceDomains - 1] + noLocalHilbertIds[noSliceDomains - 1] * 3;
594
595 // Create data array for sending
596 MIntScratchSpace sendHilbertData(noLocalHilbertIds[sliceDomain] * 3, AT_, "sendHilbertData");
597 // Send hilbertId, domainId and number of Nodes per HilbertId
598 for(MInt i = 0; i < sliceSeries.m_noHilbertIds; i++) {
599 sendHilbertData[i * 3] = hilbertInfo[i * 3];
600 sendHilbertData[i * 3 + 1] = domainId();
601 sendHilbertData[i * 3 + 2] = sliceSeries.m_elementNodesPerHilbertIds[i];
602 }
603
604 // Create data array for recieving
605 MIntScratchSpace recvHilbertData(noTotalRecvData, AT_, "recvHilbertData");
606 MIntScratchSpace noRecvData(noSliceDomains, AT_, "noRecvData");
607 for(MInt i = 0; i < noSliceDomains; i++) {
608 noRecvData[i] = noLocalHilbertIds[i] * 3;
609 }
610
611 // Exhange elementNoHilbertNodes
612 MPI_Allgatherv(&sendHilbertData[0], sendHilbertData.size(), MPI_INT, &recvHilbertData[0], &noRecvData[0],
613 &offsetsRecvData[0], MPI_INT, sliceSeries.m_mpiComm, AT_, "sendHilbertData[0]",
614 "recvHilbertData[0]");
615
616 // Create sliceGlobalHilbertInfo
617 ScratchSpace<std::array<MInt, 3>> sliceGlobalHilbertInfo(noTotalRecvData / 3, AT_, "sliceGlobalHilbertInfo");
618 for(MInt i = 0; i < noTotalRecvData / 3; i++) {
619 // Contains number of cells sorted by hilbertId (first), domaindId (second) and number of
620 // nodes per hilbertId
621 sliceGlobalHilbertInfo[i][0] = recvHilbertData[i * 3];
622 sliceGlobalHilbertInfo[i][1] = recvHilbertData[i * 3 + 1];
623 sliceGlobalHilbertInfo[i][2] = recvHilbertData[i * 3 + 2];
624 }
625 // Sort sliceGlobalHilbertInfo by domainId...
626 std::stable_sort(sliceGlobalHilbertInfo.begin(), sliceGlobalHilbertInfo.end(),
627 [](const std::array<MInt, 3>& a, const std::array<MInt, 3>& b) { return a[1] < b[1]; });
628 // ... and then sort stable by hilbertId -> list ordered by hilbertId and then nested for each hilbertId by domainId
629 std::stable_sort(sliceGlobalHilbertInfo.begin(), sliceGlobalHilbertInfo.end(),
630 [](const std::array<MInt, 3>& a, const std::array<MInt, 3>& b) { return a[0] < b[0]; });
631
632 // Calulate elementNodeOffset by sliceHilbertIds for file writing
633 MInt maxNoHilbertIds = *std::max_element(noLocalHilbertIds.begin(), noLocalHilbertIds.end());
634 sliceSeries.m_elementNodesHilbertOffset.resize(maxNoHilbertIds);
635 sliceSeries.m_cellIdsPerHilbertId.resize(maxNoHilbertIds);
636 sliceSeries.m_cellIdsOffset.resize(maxNoHilbertIds);
637 // The data in sliceGlobalHilbertInfo is sorted by their hilbertId on the first level. On the
638 // second level by domainId. Example:
639 // [hilbertId, domaindId, noNodes]
640 // sliceGlobalHilbertInfo[0] = [0, 0, 20]
641 // sliceGlobalHilbertInfo[1] = [0, 2, 10]
642 // sliceGlobalHilbertInfo[2] = [1, 1, 6]
643 //
644 // To determine the global offset for the local data (for each local hilbertId) count the number
645 // of nodes of all hilbertIds below the current one. If this hilbertId is distributed on
646 // multiple domains, then count also the next number of nodes till the local domainid is reached
647 // in sliceGlobalHilbertInfo. In the example above the offset for hilbertId 0 on domain 1 would
648 // be 20.
649 for(MInt i = 0, j = 0; i < noLocalHilbertIds[sliceDomain]; i++) {
650 MInt offset = 0;
651 j = 0;
652 // Count until current hilbertId is reached
653 while(sliceGlobalHilbertInfo[j][0] < hilbertInfo[i * 3]) {
654 offset += sliceGlobalHilbertInfo[j][2];
655 j++;
656 }
657 // Count to current domain
658 while(sliceGlobalHilbertInfo[j][1] < domainId()) {
659 offset += sliceGlobalHilbertInfo[j][2];
660 j++;
661 }
662 // Set local node offset
663 sliceSeries.m_elementNodesHilbertOffset[i] = offset;
664 // save noCells with same hilbertId for writing poly degree to the grid
665 sliceSeries.m_cellIdsPerHilbertId[i] = hilbertInfo[i * 3 + 1];
666 // save offset for writing poly degree to the grid
667 sliceSeries.m_cellIdsOffset[i] = hilbertInfo[i * 3 + 2];
668 }
669 }
670
671 // TODO labels:DG,DOC
672 MBool optimizedSliceIo = true;
673 optimizedSliceIo = Context::getBasicProperty<MBool>("optimizedSliceIo", AT_, &optimizedSliceIo);
674
675 // Reorganize data before writing
676 if(optimizedSliceIo && sliceSeries.m_noCellInSlice > 0) {
677 // Create buffers to organize m_elementOffset
678 std::vector<MInt> bufferElementIdsPerHilbertId;
679 std::vector<MInt> bufferElementNodesPerHilbertIds;
680 // Fill buffers
681 std::copy(sliceSeries.m_elementIdsPerHilbertId.begin(),
682 sliceSeries.m_elementIdsPerHilbertId.end(),
683 back_inserter(bufferElementIdsPerHilbertId));
684 std::copy(sliceSeries.m_elementNodesPerHilbertIds.begin(),
685 sliceSeries.m_elementNodesPerHilbertIds.end(),
686 back_inserter(bufferElementNodesPerHilbertIds));
687
688 MInt noHilbertIdChunks = 0;
689
690 // Reconstruct data for writing
691 for(MInt h = 1, i = 0, offsetSum = 0; h <= (MInt)sliceSeries.m_elementNodesHilbertOffset.size(); h++) {
692 // Reorganize data
693 if(h < sliceSeries.m_noHilbertIds) {
694 if((sliceSeries.m_elementNodesPerHilbertIds[i] + sliceSeries.m_elementNodesHilbertOffset[i])
695 == sliceSeries.m_elementNodesHilbertOffset[h]) {
696 sliceSeries.m_elementNodesPerHilbertIds[i] += sliceSeries.m_elementNodesPerHilbertIds[h];
697 sliceSeries.m_elementIdsPerHilbertId[i] += sliceSeries.m_elementIdsPerHilbertId[h];
698
699 sliceSeries.m_cellIdsPerHilbertId[i] += sliceSeries.m_cellIdsPerHilbertId[h];
700
701 // Update element offsets of the elements belonging to the added hilbert id
702 MInt startIdx = std::accumulate(&bufferElementIdsPerHilbertId[0], &bufferElementIdsPerHilbertId[h], 0);
703 MInt endIdx = std::accumulate(&bufferElementIdsPerHilbertId[0], &bufferElementIdsPerHilbertId[h + 1], 0);
704 offsetSum += bufferElementNodesPerHilbertIds[h - 1];
705 for(MInt j = startIdx; j < endIdx; j++) {
706 sliceSeries.m_elementOffset[j] += offsetSum;
707 }
708 } else {
709 i++;
710 sliceSeries.m_elementNodesPerHilbertIds[i] = sliceSeries.m_elementNodesPerHilbertIds[h];
711 sliceSeries.m_elementNodesHilbertOffset[i] = sliceSeries.m_elementNodesHilbertOffset[h];
712 sliceSeries.m_elementIdsPerHilbertId[i] = sliceSeries.m_elementIdsPerHilbertId[h];
713
714 sliceSeries.m_cellIdsPerHilbertId[i] = sliceSeries.m_cellIdsPerHilbertId[h];
715 sliceSeries.m_cellIdsOffset[i] = sliceSeries.m_cellIdsOffset[h];
716
717 // Reset offsetSum in case of multiple concatenations
718 offsetSum = 0;
719 }
720 }
721
722 // Set remaining elements to zero at last iteration
723 if(h == (MInt)sliceSeries.m_elementNodesHilbertOffset.size()) {
724 i++;
725 noHilbertIdChunks = i;
726
727 std::fill(sliceSeries.m_elementNodesPerHilbertIds.begin() + i, sliceSeries.m_elementNodesPerHilbertIds.end(),
728 0);
729 std::fill(sliceSeries.m_elementNodesHilbertOffset.begin() + i, sliceSeries.m_elementNodesHilbertOffset.end(),
730 0);
731 std::fill(sliceSeries.m_elementIdsPerHilbertId.begin() + i, sliceSeries.m_elementIdsPerHilbertId.end(), 0);
732
733 std::fill(sliceSeries.m_cellIdsPerHilbertId.begin() + i, sliceSeries.m_cellIdsPerHilbertId.end(), 0);
734 std::fill(sliceSeries.m_cellIdsOffset.begin() + i, sliceSeries.m_cellIdsOffset.end(), 0);
735 }
736 }
737
738 MInt maxNoHilbertIdChunks = -1;
739 MPI_Allreduce(&noHilbertIdChunks, &maxNoHilbertIdChunks, 1, maia::type_traits<MInt>::mpiType(), MPI_MAX,
740 sliceSeries.m_mpiComm, AT_, "noHilbertIdChunks", "maxNoHilbertIdChunks");
741 sliceSeries.m_elementNodesHilbertOffset.resize(maxNoHilbertIdChunks);
742
743 sliceSeries.m_noHilbertIds = noHilbertIdChunks;
744 sliceSeries.m_elementNodesPerHilbertIds.resize(noHilbertIdChunks);
745 sliceSeries.m_elementIdsPerHilbertId.resize(noHilbertIdChunks);
746 sliceSeries.m_cellIdsPerHilbertId.resize(noHilbertIdChunks);
747 sliceSeries.m_cellIdsOffset.resize(noHilbertIdChunks);
748 }
749
750 sliceSeries.m_isInit = true;
751 m_log << "done!" << std::endl;
752}
753
760template <MInt nDim, class SysEqn>
761void DgSlices<nDim, SysEqn>::save(const MBool isFinalTimeStep) {
762 TRACE();
763 // Return if dg slice is deactivated
764 if(!m_enabled) {
765 return;
766 }
767
768 // Call save method for all slice objects
769 for(MUint i = 0; i < m_sliceSeries.size(); i++) {
770 save(m_sliceSeries[i], isFinalTimeStep);
771 }
772}
773
774
782template <MInt nDim, class SysEqn>
783void DgSlices<nDim, SysEqn>::save(DgSliceSeries& sliceSeries, const MBool isFinalTimeStep) {
784 TRACE();
785
786 // Ensure that initalization was done
787 if(!sliceSeries.m_isInit) {
788 TERMM(1, "DG Slice class was not properly initialized.");
789 }
790
791 // Leave when domain is not part of slice
792 if(sliceSeries.m_localNoNodes < 1) {
793 return;
794 }
795
796 // Check for write Interval
797 if(!(m_solver.m_timeStep % sliceSeries.m_writeInterval == 0 || isFinalTimeStep)) {
798 return;
799 }
800
801 // TODO labels:DG @ansgar_slice not required at the moment
802 /* // Compute sampling variables if not done already for another time series in this time step */
803 /* if(m_solverCalcSamplingVars) { */
804 /* solver().calcSamplingVariables(m_solverSamplingVarIds); */
805 /* m_solverCalcSamplingVars = false; */
806 /* } */
807
808 // Write all slice variables to the same file
809 const MInt noVars = std::accumulate(m_noSliceVars.begin(), m_noSliceVars.end(), 0);
810 const MUint noVarIds = m_noSliceVars.size();
811
812 // Get state at slice coordinates
813 MFloatTensor coordinatesTensor(&sliceSeries.m_coordinates[0],
814 std::ceil((MFloat)sliceSeries.m_coordinates.size() / m_dim), m_dim);
815
816 // Save states of nodes
817 MFloatScratchSpace states(std::ceil((MFloat)sliceSeries.m_coordinates.size() / m_dim) * noVars, AT_, "states");
818 MFloatTensor stateTensor(&states[0], std::ceil((MFloat)sliceSeries.m_coordinates.size() / m_dim), noVars);
819 // TODO labels:DG,totest check performance of state calculation (timer)
820 // Calculate states of all nodes
821 if(sliceSeries.m_coordinates.size() > 1) {
822 for(MUint e = 0, offset = 0; e < sliceSeries.m_elementNodes.size(); e++) {
823 const MUint noNodes = sliceSeries.m_elementNodes[e];
824 for(MUint n = 0; n < noNodes; n++) {
825 MInt varOffset = 0;
826 for(MUint v = 0; v < noVarIds; v++) {
827 m_solver.calcSamplingVarAtPoint(&coordinatesTensor(offset + n, 0), sliceSeries.m_elementIds[e],
828 m_sliceVarIds[v], &stateTensor(offset + n, varOffset), true);
829 varOffset += m_noSliceVars[v];
830 }
831 }
832 offset += noNodes;
833 }
834 }
835
836 // Write File
837 using namespace maia::parallel_io;
838
839 // TODO labels:DG slice naming with slice axis and intercept? same as in postprocessing
840 // Create output file name with slices axis and number
841 // TODO labels:DG change bX (block X) into e.g. sX (solver X)? need to change other output files as well
842 const MString s = (m_solver.grid().raw().treeb().noSolvers() > 1) ? "_b" + std::to_string(solverId()) : "";
843 std::ostringstream fileName;
844 fileName << m_solver.outputDir() << "slice" << s << "_" << sliceSeries.m_fileNo << "_" << std::setw(8)
845 << std::setfill('0') << m_solver.m_timeStep << ParallelIo::fileExt();
846 ParallelIo file(fileName.str(), PIO_REPLACE, sliceSeries.m_mpiComm);
847
848 // Determine offset and global number of nodes
849 ParallelIo::size_type nodesOffset, globalNoNodes, offset, totalCount;
850 ParallelIo::calcOffset(sliceSeries.m_localNoNodes, &nodesOffset, &globalNoNodes, sliceSeries.m_mpiComm);
851 ParallelIo::calcOffset(sliceSeries.m_noCellInSlice, &offset, &totalCount, sliceSeries.m_mpiComm);
852
853 // Set Attributes
854 // Grid file name and solver type
855 file.setAttribute(sliceSeries.m_gridFileName + ParallelIo::fileExt(), "gridFile");
856 file.setAttribute(solverId(), "solverId");
857 file.setAttribute("DG", "solverType");
858 file.setAttribute(m_solver.m_timeStep, "timeStep");
859 file.setAttribute(m_solver.m_time, "time");
860 // Save slice axis and intercept to identify the grid file
861 file.setAttribute(sliceSeries.m_axis, "sliceAxis");
862 file.setAttribute(sliceSeries.m_intercept, "sliceIntercept");
863
864 // Define arrays in file
865 // Polyomial degree
866 file.defineArray(PIO_INT, "polyDegs", totalCount);
867
868 // Get information about integration method and polynomial type
869 MString dgIntegrationMethod = "DG_INTEGRATE_GAUSS";
870 dgIntegrationMethod =
871 Context::getSolverProperty<MString>("dgIntegrationMethod", solverId(), AT_, &dgIntegrationMethod);
872 MString dgPolynomialType = "DG_POLY_LEGENDRE";
873 dgPolynomialType = Context::getSolverProperty<MString>("dgPolynomialType", solverId(), AT_, &dgPolynomialType);
874
875 // Add integration method & polynomial type to grid file as attributes
876 file.setAttribute(dgIntegrationMethod, "dgIntegrationMethod", "polyDegs");
877 file.setAttribute(dgPolynomialType, "dgPolynomialType", "polyDegs");
878
879 // Set Variables
880 for(MUint v = 0, totalNoVars = 0; v < noVarIds; v++) {
881 for(MInt i = 0; i < m_noSliceVars[v]; i++) {
882 const MString name = "variables" + std::to_string(totalNoVars);
883 file.defineArray(PIO_FLOAT, name, globalNoNodes);
884 file.setAttribute(m_sliceVarNames[v][i], "name", name);
885 totalNoVars++;
886 }
887 }
888
889 // Write poly degree to file (sorted by hilbert id)
890 for(MInt h = 0, pOffset = 0; h < (MInt)sliceSeries.m_elementNodesHilbertOffset.size(); h++) {
891 if(h < sliceSeries.m_noHilbertIds && sliceSeries.m_cellIdsPerHilbertId[h] > 0) {
892 file.setOffset(sliceSeries.m_cellIdsPerHilbertId[h], sliceSeries.m_cellIdsOffset[h]);
893 file.writeArray(&sliceSeries.m_polyDegs[0 + pOffset], "polyDegs");
894 pOffset += sliceSeries.m_cellIdsPerHilbertId[h];
895 } else {
896 file.setOffset(0, 0);
897 file.writeArray(&sliceSeries.m_polyDegs[0], "polyDegs");
898 }
899 }
900
901 // Number of nodes for elements per HilbertId
902 const MInt tmp = std::accumulate(sliceSeries.m_elementNodes.begin(), sliceSeries.m_elementNodes.end(), 0);
903 // create statetensor variable for easier accessing
904 MFloatTensor stateTensorFinal(&states[0], std::max(tmp, 1), noVars);
905
906 // Write data
907 for(MInt i = 0; i < noVars; i++) {
908 for(MInt h = 0, elementOffset = 0, offsetElementNodes = 0; h < (MInt)sliceSeries.m_elementNodesHilbertOffset.size();
909 h++) {
910 if(h < sliceSeries.m_noHilbertIds && sliceSeries.m_elementNodesPerHilbertIds[h] > 0) {
911 // Write data for every (chunk of continuous) hilbertId(s) on this domain
912
913 // Set amount and offset for current hilbertId
914 file.setOffset(sliceSeries.m_elementNodesPerHilbertIds[h], sliceSeries.m_elementNodesHilbertOffset[h]);
915 // Create buffer to collect data in correct order for saving
916 MFloatScratchSpace buffer(sliceSeries.m_elementNodesPerHilbertIds[h], AT_, "buffer");
917 std::fill(buffer.begin(), buffer.end(), 0.0);
918 const MInt noElementsSlice = sliceSeries.m_elementIdsPerHilbertId[h] + elementOffset;
919
920 // Fill the buffer at positions where the element data belongs (reminder: elements only
921 // exist on highest refinement level)
922 for(MInt e = elementOffset; e < noElementsSlice; e++) {
923 MFloat* const b = &buffer[sliceSeries.m_elementOffset[e]];
924 for(MInt j = 0; j < sliceSeries.m_elementNodes[e]; j++) {
925 b[j] = stateTensorFinal(offsetElementNodes + j, i);
926 }
927 offsetElementNodes += sliceSeries.m_elementNodes[e];
928 }
929 elementOffset = noElementsSlice;
930
931 const MString name = "variables" + std::to_string(i);
932 // write data
933 file.writeArray(&buffer[0], name);
934 } else {
935 // Dummy calls for data writing if this domain is finished, but other domains not
936 file.setOffset(0, 0);
937 const MFloat tmpBuf = 0;
938 const MString name = "variables" + std::to_string(i);
939 file.writeArray(&tmpBuf, name);
940 }
941 }
942 }
943}
944#endif
static MBool propertyExists(const MString &name, MInt solver=m_noSolvers)
This function checks if a property exists in general.
Definition: context.cpp:494
std::vector< MInt > m_elementOffset
std::vector< MInt > m_elementNodes
std::vector< MFloat > m_coordinates
DgSliceSeries(const MString &axis, const MFloat intercept, const MInt writeInterval, const MString &gridFileName, const MInt fileNo)
const MInt m_fileNo
MString m_gridFileName
std::vector< MInt > m_elementNodesHilbertOffset
std::vector< MInt > m_elementNodesPerHilbertIds
std::vector< MInt > m_polyDegs
std::vector< MInt > m_cellIdsPerHilbertId
std::vector< MInt > m_cellIdsOffset
std::vector< MInt > m_elementIds
std::vector< MInt > m_elementIdsPerHilbertId
Determine all coordinates and alloc buffer size create a valid 2D grid which represents a slice from ...
std::vector< MInt > m_noSliceVars
Number of variables for each slice variable.
void save(const MBool isFinalTimeStep)
Call save method for all slice objects.
void updateGridFile(DgSliceSeries &sliceSeries, const MString &prefix="")
MInt domainId() const
MPI_Comm mpiComm() const
std::vector< MInt > m_sliceVarIds
List of slice variables.
void init()
Initialize all slice objects.
MInt solverId() const
std::vector< DgSliceSeries > m_sliceSeries
void setProperties()
Read properties for all slices and create corresponding slice objects.
DgSlices(DgCartesianSolver< nDim, SysEqn > &solver)
MBool enabled() const
static constexpr const MInt m_dim
DgCartesianSolver< nDim, SysEqn > & m_solver
std::vector< std::vector< MString > > m_sliceVarNames
List of variable names for each slice variable.
This class is a ScratchSpace.
Definition: scratch.h:758
size_type size() const
Definition: scratch.h:302
iterator end()
Definition: scratch.h:281
iterator begin()
Definition: scratch.h:273
MInt ipow(MInt base, MInt exp)
Integer exponent function for non-negative exponents.
Definition: functions.h:317
MInt globalNoDomains()
Return global number of domains.
InfoOutFile m_log
int32_t MInt
Definition: maiatypes.h:62
uint32_t MUint
Definition: maiatypes.h:63
std::basic_string< char > MString
Definition: maiatypes.h:55
double MFloat
Definition: maiatypes.h:52
bool MBool
Definition: maiatypes.h:58
int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgatherv
int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm, const MString &name, const MString &varname)
same as MPI_Comm_create, but updates the number of MPI communicators
int MPI_Group_incl(MPI_Group group, int n, const int ranks[], MPI_Group *newgroup, const MString &name)
same as MPI_Group_incl
int MPI_Comm_group(MPI_Comm comm, MPI_Group *group, const MString &name, const MString &varname)
same as MPI_Comm_group
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allreduce
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgather
int MPI_Group_free(MPI_Group *group, const MString &name)
same as MPI_Group_free
Namespace for auxiliary functions/classes.
PARALLELIO_DEFAULT_BACKEND ParallelIo
Definition: parallelio.h:292
Definition: contexttypes.h:19