13#include <sys/resource.h>
15#include <sys/sysinfo.h>
33 s <<
"Rank " <<
globalDomainId() <<
" threw exit code " << errorCode <<
"\n";
36 <<
"Program is aborting!!\n";
37 std::cerr << s.str() << std::flush;
48 MPI_Abort(MPI_COMM_WORLD, errorCode, AT_);
75 return (stat(fileName.c_str(), &buffer) == 0);
85 std::cerr <<
"Could not copy file " << fromName <<
".\n";
89 remove(toName.c_str());
91 std::ifstream src(fromName.c_str());
92 std::ofstream dst(toName.c_str());
93 if(src.good() && dst.good()) {
100 std::cerr <<
"Could not copy file " << fromName <<
" (2).\n";
113 const MInt minLevel,
const MFloat* targetGridCenterOfGravity,
114 const MFloat targetGridLengthLevel0,
const MInt targetGridMinLevel) {
120 m_log << std::setprecision(15) <<
"Check multisolver grid extends: length0=" << lengthLevel0
121 <<
"; minLevel=" << minLevel <<
"; targetLenght0=" << targetGridLengthLevel0
122 <<
"; targetMinLevel=" << targetGridMinLevel <<
"; eps=" << eps << std::endl;
125 const std::array<MString, 3> dirs = {{
"x",
"y",
"z"}};
126 for(
MInt dir = 0; dir < nDim; dir++) {
127 const std::array<MFloat, 2> gridExtent = {
128 {centerOfGravity[dir] - 0.5 * lengthLevel0, centerOfGravity[dir] + 0.5 * lengthLevel0}};
129 const std::array<MFloat, 2> globalExtent = {{targetGridCenterOfGravity[dir] - 0.5 * targetGridLengthLevel0,
130 targetGridCenterOfGravity[dir] + 0.5 * targetGridLengthLevel0}};
132 if(gridExtent[0] + eps < globalExtent[0]) {
134 "Grid extents exceed multisolver bouding box in negative " + dirs[dir]
135 +
"-direction: grid=" + std::to_string(gridExtent[0]) +
"; box=" + std::to_string(globalExtent[0]));
137 if(gridExtent[1] - eps > globalExtent[1]) {
139 "Grid extents exceed multisolver bouding box in positive " + dirs[dir]
140 +
"-direction: grid=" + std::to_string(gridExtent[1]) +
"; box=" + std::to_string(globalExtent[1]));
142 m_log << std::setprecision(15) <<
"Grid extents in the " << dirs[dir] <<
"-direction: [" << gridExtent[0] <<
", "
143 << gridExtent[1] <<
"]; global: [" << globalExtent[0] <<
", " << globalExtent[1] <<
"]" << std::endl;
147 const MFloat gridMinLevelLength = lengthLevel0 *
FFPOW2(minLevel);
148 const MFloat globalMinLevelLength = targetGridLengthLevel0 *
FFPOW2(targetGridMinLevel);
149 if(fabs(gridMinLevelLength - globalMinLevelLength) > eps) {
151 "Length of min level cells do not match between grid and given multisolver bounding "
153 + std::to_string(gridMinLevelLength) +
"; box: " + std::to_string(globalMinLevelLength));
155 m_log << std::setprecision(15)
156 <<
"Length of min level cells match between grid and given multisolver bounding "
158 << gridMinLevelLength <<
"; box: " << globalMinLevelLength << std::endl;
163 for(
MInt dir = 0; dir < nDim; dir++) {
164 const MFloat displacement = fabs(centerOfGravity[dir] - targetGridCenterOfGravity[dir]);
165 const MFloat quotient = displacement / globalMinLevelLength;
167 TERMM(1,
"The grid centers are displaced in the " + dirs[dir]
168 +
"-direction by a non-integer multiple of the length of a "
170 + std::to_string(quotient));
172 m_log << std::setprecision(15) <<
"The grid centers are displaced in the " << dirs[dir]
173 <<
"-direction by a multiple of the length of a "
175 << quotient <<
" (displacement = " << displacement <<
")" << std::endl;
188 std::istringstream iss;
189 std::ifstream csvFile(inputFileName);
193 while(getline(csvFile, line)) {
197 for(
MInt i = 0; i < nDim; i++) {
200 std::ostringstream err;
202 err <<
"Error at line " << noPoints + 1 <<
": " << line <<
"\n"
203 <<
"Either wrong dimension (nDim = " << nDim <<
") or otherwise wrong format."
204 <<
"Format should be nDim floats seperated by spaces per line.";
207 coordinates.push_back(curFloat);
221 static MLong virtMemLast = 0;
222 static MLong physMemLast = 0;
227 MLong physMemFree = 0;
228 MLong memAvailable = 0;
232 MInt fileNotFound = 0;
233 MInt memoryNotFound = 0;
237 fin.open(
"/proc/self/status");
246 std::istringstream iss;
247 std::array<MInt, 5> foundInfo;
248 foundInfo.fill(
false);
251 while(getline(fin, line)) {
255 getline(iss, name,
':');
256 if(name ==
"VmRSS") {
260 }
else if(name ==
"VmData") {
264 }
else if(name ==
"VmStk") {
274 fin.open(
"/proc/meminfo");
279 while(getline(fin, line)) {
283 getline(iss, name,
':');
284 if(name ==
"MemFree") {
286 physMemFree = buffer;
288 }
else if(name ==
"MemAvailable") {
290 memAvailable = buffer;
297 memoryNotFound = std::any_of(foundInfo.begin(), foundInfo.end(), [](
MBool i) { return !i; });
300 MPI_Allreduce(MPI_IN_PLACE, &fileNotFound, 1, MPI_INT, MPI_MAX, comm, AT_,
"MPI_IN_PLACE",
"&fileNotFound");
301 MPI_Allreduce(MPI_IN_PLACE, &memoryNotFound, 1, MPI_INT, MPI_MAX, comm, AT_,
"MPI_IN_PLACE",
"&memoryNotFound");
302 MPI_Allreduce(MPI_IN_PLACE, &foundInfo[0], 5, MPI_INT, MPI_MAX, comm, AT_,
"MPI_IN_PLACE",
"&foundInfo");
304 if(fileNotFound || memoryNotFound) {
307 std::stringstream ss;
308 ss <<
"Error in writeMemoryStatistics: Could not determine memory statistics! " << fileNotFound <<
" "
309 << memoryNotFound <<
" (";
310 for(
MInt i = 0; i < 5; i++) {
311 ss <<
" " << foundInfo[i];
313 ss <<
")" << std::endl;
314 std::cerr << ss.str();
325 MPI_Gather(&physMem, 1, MPI_LONG, physMemPerProcess.
getPointer(), 1, MPI_LONG, 0, comm, AT_,
"physMem",
326 "physMemPerProcess.getPointer()");
327 MPI_Gather(&virtMem, 1, MPI_LONG, virtMemPerProcess.
getPointer(), 1, MPI_LONG, 0, comm, AT_,
"virtMem",
328 "virtMemPerProcess.getPointer()");
330 MLongScratchSpace physMemDiffPerProcess(noDomains, AT_,
"physMemDiffPerProcess");
331 MLongScratchSpace virtMemDiffPerProcess(noDomains, AT_,
"virtMemDiffPerProcess");
333 MLong physMemDiff = physMem - physMemLast;
334 MLong virtMemDiff = virtMem - virtMemLast;
336 MPI_Gather(&physMemDiff, 1, MPI_LONG, physMemDiffPerProcess.
getPointer(), 1, MPI_LONG, 0, comm, AT_,
"physMem",
337 "physMemDiffPerProcess.getPointer()");
338 MPI_Gather(&virtMemDiff, 1, MPI_LONG, virtMemDiffPerProcess.
getPointer(), 1, MPI_LONG, 0, comm, AT_,
"virtMem",
339 "virtMemDiffPerProcess.getPointer()");
342 physMemLast = physMem;
343 virtMemLast = virtMem;
347 MPI_Gather(&stackMem, 1, MPI_LONG, stackMemPerProcess.
getPointer(), 1, MPI_LONG, 0, comm, AT_,
"stackMem",
348 "stackMemPerProcess.getPointer()");
351 MLong minPhysMemFree = physMemFree;
352 MPI_Allreduce(MPI_IN_PLACE, &minPhysMemFree, 1, MPI_LONG, MPI_MIN, comm, AT_,
"MPI_IN_PLACE",
"&minPhysMemFree");
353 MLong minMemAvailable = memAvailable;
354 MPI_Allreduce(MPI_IN_PLACE, &minMemAvailable, 1, MPI_LONG, MPI_MIN, comm, AT_,
"MPI_IN_PLACE",
"&minMemAvailable");
358 MLong totalPhysMem = 0;
359 MLong totalVirtMem = 0;
360 MLong totalPhysMemDiffSum = 0;
361 MLong totalVirtMemDiffSum = 0;
362 MLong totalPhysMemDiffMax = 0;
363 MLong totalVirtMemDiffMax = 0;
364 MLong maxStackMem = 0;
366 for(
MInt i = 0; i < noDomains; i++) {
368 totalPhysMem += physMemPerProcess[i];
369 totalVirtMem += virtMemPerProcess[i];
371 totalPhysMemDiffSum += physMemDiffPerProcess[i];
372 totalVirtMemDiffSum += virtMemDiffPerProcess[i];
374 totalPhysMemDiffMax = std::max(physMemDiffPerProcess[i], totalPhysMemDiffMax);
375 totalVirtMemDiffMax = std::max(virtMemDiffPerProcess[i], totalVirtMemDiffMax);
377 maxStackMem = std::max(stackMemPerProcess[i], maxStackMem);
382 getrlimit(RLIMIT_STACK, &rlim);
384 const MUlong rlim_stack = rlim.rlim_cur;
386 if(maxStackMem > 0.5 * rlim_stack) {
387 std::stringstream warning;
389 <<
"WARNING: maximum stack memory usage >50% of its limit, use 'ulimit -s unlimited' to remove this "
390 "memory restriction and avoid segmentation faults if the stack memory usage exceeds its limit."
392 warning <<
"WARNING: stack memory usage " << (
MFloat)maxStackMem <<
" KB; stack limit "
393 << (
MFloat)rlim_stack / 1024 <<
" KB" << std::endl
395 m_log << warning.str();
396 std::cerr << warning.str();
402 for(
MInt i = 0; i < noDomains; i++) {
403 m_log <<
" Process " << i <<
" - Current memory usage: physical = " << (
MFloat)physMemPerProcess[i] / 1024
404 <<
" MB; allocation = " << (
MFloat)virtMemPerProcess[i] / 1024 <<
" MB" << std::endl;
408 std::stringstream ss;
410 ss <<
"******************************* MEMORY STATISTICS *******************************" << std::endl;
411 ss <<
"***** Comment: " << comment <<
" - #ranks: " << noDomains << std::endl;
412 ss <<
"***** Location: " << at << std::endl;
413 ss <<
"***** " << std::endl;
415 ss <<
"***** Average memory usage: physical = " << (
MFloat)totalPhysMem / (noDomains * 1024)
416 <<
" MB; allocation = " << (
MFloat)totalVirtMem / (noDomains * 1024) <<
" MB\n";
419 ss <<
"***** Minimun memory usage: physical = "
420 << (
MFloat)*std::min_element(physMemPerProcess.
begin(), physMemPerProcess.
end()) / 1024
421 <<
" MB; allocation = " << (
MFloat)*std::min_element(virtMemPerProcess.
begin(), virtMemPerProcess.
end()) / 1024
423 ss <<
"***** Maximum memory usage: physical = "
424 << (
MFloat)*std::max_element(physMemPerProcess.
begin(), physMemPerProcess.
end()) / 1024
425 <<
" MB; allocation = " << (
MFloat)*std::max_element(virtMemPerProcess.
begin(), virtMemPerProcess.
end()) / 1024
427 ss <<
"***** Maximum diff in memory usage: physical = " << (
MFloat)totalPhysMemDiffMax / 1024
428 <<
" MB; allocation = " << (
MFloat)totalVirtMemDiffMax / 1024 <<
" MB\n";
431 ss <<
"***** Total physical memory usage (RAM): " << (
MFloat)totalPhysMem / (1024 * 1024) <<
" GB\n";
432 ss <<
"***** Diff total physical memory usage (RAM): " << (
MFloat)totalPhysMemDiffSum / (1024 * 1024) <<
" GB\n";
433 ss <<
"***** Total allocation size (Virtual Memory): " << (
MFloat)totalVirtMem / (1024 * 1024) <<
" GB"
435 ss <<
"***** Diff total allocation size (Virtual Memory): " << (
MFloat)totalVirtMemDiffSum / (1024 * 1024) <<
" GB"
437 ss <<
"***** " << std::endl;
438 ss <<
"***** Maximum stack memory: " << (
MFloat)maxStackMem <<
" KB; stack limit " << (
MFloat)rlim_stack / 1024
439 <<
" KB" << std::endl;
440 ss <<
"***** " << std::endl;
441 ss <<
"***** Minimum available memory per node (meminfo): " << (
MFloat)minMemAvailable / (1024 * 1024) <<
" GB"
443 ss <<
"***** Minimum free memory per node (RAM): " << (
MFloat)minPhysMemFree / (1024 * 1024) <<
" GB" << std::endl;
444 ss <<
"******************************* MEMORY STATISTICS *******************************" << std::endl << std::endl;
446 std::cout << ss.str() << std::endl;
447 m_log << ss.str() << std::endl;
void mDealloc()
Deallocates all memory allocated previously by mAlloc(...)
Environment for the program.
void close(MBool forceClose=false)
Pass the close call to the respective internal buffer.
This class is a ScratchSpace.
T * getPointer() const
Deprecated: use begin() instead!
void checkMultiSolverGridExtents(const MInt nDim, const MFloat *centerOfGravity, const MFloat lengthLevel0, const MInt minLevel, const MFloat *targetGridCenterOfGravity, const MFloat targetGridLengthLevel0, const MInt targetGridMinLevel)
Checks if the given grid extents and cell sizes match when creating a multisolver grid and correspond...
MInt copyFile(const MString &fromName, const MString &toName)
Copies file fromName to file toName.
MInt loadPointCoordinatesFromFile(const MString inputFileName, const MInt nDim, std::vector< MFloat > &coordinates)
Loads point coordinates from an input file.
Environment * mEnvironment
void mTerm(const MInt errorCode, const MString &location, const MString &message)
void writeMemoryStatistics(const MPI_Comm comm, const MInt noDomains, const MInt domainId, const MString at, const MString comment)
Write memory statistics.
MBool fileExists(const MString &fileName)
Returns true if the file fileName exists, false otherwise.
const MString const MString & message
MBool isApproxInt(const T &, const T)
MInt globalDomainId()
Return global domain id.
constexpr MFloat FPOW2(MInt x)
constexpr MFloat FFPOW2(MInt x)
std::basic_string< char > MString
int MPI_Abort(MPI_Comm comm, int errorcode, const MString &name)
same as MPI_Abort
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allreduce
int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Gather