MAIA bb96820c
Multiphysics at AIA
Loading...
Searching...
No Matches
parallelio_pnetcdf.cpp
Go to the documentation of this file.
1// Copyright (C) 2024 The m-AIA AUTHORS
2//
3// This file is part of m-AIA (https://git.rwth-aachen.de/aia/m-AIA/m-AIA)
4//
5// SPDX-License-Identifier: LGPL-3.0-only
6
7#include "parallelio.h"
8// needs to be included after parallelio.h!
10
11#include "maiapnetcdf.h"
12
13#include <cstdlib>
14#include <cstring>
15#include <sys/stat.h>
16#include "COMM/mpioverride.h"
17#include "MEMORY/scratch.h"
18#include "UTIL/debug.h"
19#include "UTIL/functions.h"
20#include "typetraits.h"
21
22#if not defined(MAIA_MS_COMPILER)
23#include <pwd.h>
24#include <unistd.h>
25#endif
26
27#ifdef _SX
28#include <sys/socket.h>
29#include <sys/types.h>
30#endif
31
32using namespace maia;
33using namespace parallel_io;
34using namespace std;
35
36// Check if valid Parallel NetCDF data file type was set
37#if(MAIA_NCMPI_FILE_TYPE != NC_64BIT_OFFSET) && (MAIA_NCMPI_FILE_TYPE != NC_64BIT_DATA)
38#error Bad value for MAIA_NCMPI_FILE_TYPE.
39#endif
40
41//------------------------------------------------------------------------------
42// Parallel NetCDF-specific methods
43//------------------------------------------------------------------------------
44
45// Use unnamed namespace for some PnetCDF-specific magic (a.k.a. type traits)
46namespace {
47
48template <class DataType>
49struct pnetcdf_traits {};
50
51// MFloat traits
52template <>
53struct pnetcdf_traits<MFloat> {
54 // Corresponding NetCDF data type
55 static nc_type type() { return NC_DOUBLE; }
56
57 // Write contiguously
58 static int ncmpi_put_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
59 const MFloat* buf) {
60 return ncmpi_put_vara_double_all(ncid, varid, start, count, buf);
61 }
62
63 // Write with stride
64 static int ncmpi_put_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
65 const MPI_Offset stride[], const MFloat* buf) {
66 return ncmpi_put_vars_double_all(ncid, varid, start, count, stride, buf);
67 }
68
69 // Read contiguously
70 static int ncmpi_get_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
71 MFloat* buf) {
72 return ncmpi_get_vara_double_all(ncid, varid, start, count, buf);
73 }
74
75 // Read with stride
76 static int ncmpi_get_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
77 const MPI_Offset stride[], MFloat* buf) {
78 return ncmpi_get_vars_double_all(ncid, varid, start, count, stride, buf);
79 }
80
81 // Set attribute
82 static int ncmpi_put_att_type(int ncid, int varid, const char* name, nc_type xtype, const MPI_Offset nelems,
83 const MFloat* buf) {
84 return ncmpi_put_att_double(ncid, varid, name, xtype, nelems, buf);
85 }
86
87 // Get attribute
88 static int ncmpi_get_att_type(int ncid, int varid, const char* name, MFloat* buf) {
89 return ncmpi_get_att_double(ncid, varid, name, buf);
90 }
91};
92
93// MInt traits
94template <>
95struct pnetcdf_traits<MInt> {
96 // Corresponding NetCDF data type
97 static nc_type type() { return NC_INT; }
98
99 // Write contiguously
100 static int ncmpi_put_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
101 const MInt* buf) {
102 return ncmpi_put_vara_int_all(ncid, varid, start, count, buf);
103 }
104
105 // Write with stride
106 static int ncmpi_put_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
107 const MPI_Offset stride[], const MInt* buf) {
108 return ncmpi_put_vars_int_all(ncid, varid, start, count, stride, buf);
109 }
110
111 // Read contiguously
112 static int ncmpi_get_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
113 MInt* buf) {
114 return ncmpi_get_vara_int_all(ncid, varid, start, count, buf);
115 }
116
117 // Read with stride
118 static int ncmpi_get_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
119 const MPI_Offset stride[], MInt* buf) {
120 return ncmpi_get_vars_int_all(ncid, varid, start, count, stride, buf);
121 }
122
123 // Set attribute
124 static int ncmpi_put_att_type(int ncid, int varid, const char* name, nc_type xtype, MPI_Offset nelems,
125 const MInt* buf) {
126 return ncmpi_put_att_int(ncid, varid, name, xtype, nelems, buf);
127 }
128
129 // Get attribute
130 static int ncmpi_get_att_type(int ncid, int varid, const char* name, MInt* buf) {
131 return ncmpi_get_att_int(ncid, varid, name, buf);
132 }
133};
134
135// MLong traits
136template <>
137struct pnetcdf_traits<MLong> {
138 // Corresponding NetCDF data type
139 static nc_type type() { return NC_INT64; }
140
141 // Write contiguously
142 static int ncmpi_put_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
143 const MLong* buf) {
144 return ncmpi_put_vara_long_all(ncid, varid, start, count, buf);
145 }
146
147 // Write with stride
148 static int ncmpi_put_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
149 const MPI_Offset stride[], const MLong* buf) {
150 return ncmpi_put_vars_long_all(ncid, varid, start, count, stride, buf);
151 }
152
153 // Read contiguously
154 static int ncmpi_get_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
155 MLong* buf) {
156 return ncmpi_get_vara_long_all(ncid, varid, start, count, buf);
157 }
158
159 // Read with stride
160 static int ncmpi_get_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
161 const MPI_Offset stride[], MLong* buf) {
162 return ncmpi_get_vars_long_all(ncid, varid, start, count, stride, buf);
163 }
164
165 // Set attribute
166 static int ncmpi_put_att_type(int ncid, int varid, const char* name, nc_type xtype, MPI_Offset nelems,
167 const MLong* buf) {
168 return ncmpi_put_att_long(ncid, varid, name, xtype, nelems, buf);
169 }
170
171 // Get attribute
172 static int ncmpi_get_att_type(int ncid, int varid, const char* name, MLong* buf) {
173 return ncmpi_get_att_long(ncid, varid, name, buf);
174 }
175};
176
177// MChar traits
178template <>
179struct pnetcdf_traits<MChar> {
180 // Corresponding NetCDF data type
181 static nc_type type() { return NC_CHAR; }
182
183 // Write contiguously
184 static int ncmpi_put_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
185 const MChar* buf) {
186 return ncmpi_put_vara_text_all(ncid, varid, start, count, buf);
187 }
188
189 // Write with stride
190 static int ncmpi_put_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
191 const MPI_Offset stride[], const MChar* buf) {
192 return ncmpi_put_vars_text_all(ncid, varid, start, count, stride, buf);
193 }
194
195 // Read contiguously
196 static int ncmpi_get_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
197 MChar* buf) {
198 return ncmpi_get_vara_text_all(ncid, varid, start, count, buf);
199 }
200
201 // Read with stride
202 static int ncmpi_get_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
203 const MPI_Offset stride[], MChar* buf) {
204 return ncmpi_get_vars_text_all(ncid, varid, start, count, stride, buf);
205 }
206
207 // Set attribute
208 static int ncmpi_put_att_type(int ncid, int varid, const char* name, nc_type NotUsed(xtype), MPI_Offset nelems,
209 const MChar* buf) {
210 // nc_type remains unused for _text API
211 return ncmpi_put_att_text(ncid, varid, name, nelems, buf);
212 }
213
214 // Get attribute
215 static int ncmpi_get_att_type(int ncid, int varid, const char* name, MChar* buf) {
216 return ncmpi_get_att_text(ncid, varid, name, buf);
217 }
218};
219
220// MUchar traits
221template <>
222struct pnetcdf_traits<MUchar> {
223 // Corresponding NetCDF data type
224 static nc_type type() { return NC_UBYTE; }
225
226 // Write contiguously
227 static int ncmpi_put_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
228 const MUchar* buf) {
229 return ncmpi_put_vara_uchar_all(ncid, varid, start, count, buf);
230 }
231
232 // Write with stride
233 static int ncmpi_put_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
234 const MPI_Offset stride[], const MUchar* buf) {
235 return ncmpi_put_vars_uchar_all(ncid, varid, start, count, stride, buf);
236 }
237
238 // Read contiguously
239 static int ncmpi_get_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
240 MUchar* buf) {
241 return ncmpi_get_vara_uchar_all(ncid, varid, start, count, buf);
242 }
243
244 // Read with stride
245 static int ncmpi_get_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
246 const MPI_Offset stride[], MUchar* buf) {
247 return ncmpi_get_vars_uchar_all(ncid, varid, start, count, stride, buf);
248 }
249
250 // Set attribute
251 static int ncmpi_put_att_type(int ncid, int varid, const char* name, nc_type xtype, MPI_Offset nelems,
252 const MUchar* buf) {
253 return ncmpi_put_att_uchar(ncid, varid, name, xtype, nelems, buf);
254 }
255
256 // Get attribute
257 static int ncmpi_get_att_type(int ncid, int varid, const char* name, MUchar* buf) {
258 return ncmpi_get_att_uchar(ncid, varid, name, buf);
259 }
260};
261
262// MUlong traits
263template <>
264struct pnetcdf_traits<MUlong> {
265 // Corresponding NetCDF data type
266 static nc_type type() { return NC_UINT64; }
267
268 // Write contiguously
269 static int ncmpi_put_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
270 const MUlong* buf) {
271 return ncmpi_put_vara_ulonglong_all(ncid, varid, start, count, (const long long unsigned int*)(buf));
272 }
273
274 // Write with stride
275 static int ncmpi_put_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
276 const MPI_Offset stride[], const MUlong* buf) {
277 return ncmpi_put_vars_ulonglong_all(ncid, varid, start, count, stride, (const long long unsigned int*)(buf));
278 }
279
280 // Read contiguously
281 static int ncmpi_get_vara_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
282 MUlong* buf) {
283 return ncmpi_get_vara_ulonglong_all(ncid, varid, start, count, (long long unsigned int*)(buf));
284 }
285
286 // Read with stride
287 static int ncmpi_get_vars_type_all(int ncid, int varid, const MPI_Offset start[], const MPI_Offset count[],
288 const MPI_Offset stride[], MUlong* buf) {
289 return ncmpi_get_vars_ulonglong_all(ncid, varid, start, count, stride, (long long unsigned int*)(buf));
290 }
291
292 // Set attribute
293 static int ncmpi_put_att_type(int ncid, int varid, const char* name, nc_type xtype, MPI_Offset nelems,
294 const MUlong* buf) {
295 return ncmpi_put_att_ulonglong(ncid, varid, name, xtype, nelems, (const long long unsigned int*)(buf));
296 }
297
298 // Get attribute
299 static int ncmpi_get_att_type(int ncid, int varid, const char* name, MUlong* buf) {
300 return ncmpi_get_att_ulonglong(ncid, varid, name, (long long unsigned int*)(buf));
301 }
302};
303
304} // namespace
305
306//
307//------------------------------------------------------------------------------
308// Static file system-related methods
309//------------------------------------------------------------------------------
322MBool ParallelIoPNetcdf::b_isValidFile(const MString& name, const MPI_Comm& mpiComm) {
323 TRACE();
324
325 MBool returnValue;
326
327 MInt status, fileId;
328 status = ncmpi_open(mpiComm, name.c_str(), NC_NOWRITE, globalMpiInfo(), &fileId);
329
330 if(status == NC_NOERR) {
331 returnValue = true;
332 status = ncmpi_close(fileId);
333 b_error(status, name, AT_);
334 } else {
335 returnValue = false;
336 }
337
338 return returnValue;
339}
340
341
350 TRACE();
351
352 return ".Netcdf";
353}
354
355
356//------------------------------------------------------------------------------
357// Constructor & Destructor
358//------------------------------------------------------------------------------
372ParallelIoPNetcdf::ParallelIoPNetcdf(const MString& fileName, MInt fileMode, const MPI_Comm& mpiComm)
373 : ParallelIoBase<ParallelIoPNetcdf>(fileName, fileMode, mpiComm) {
374 TRACE();
375
376#ifdef DISABLE_OUTPUT
377 if(m_fileMode != PIO_READ) return;
378#endif
379
380 switch(m_fileMode) {
381 case PIO_CREATE: {
382 // Create a new file (do not overwrite existing)
383 MInt status =
384 ncmpi_create(m_mpiComm, m_fileName.c_str(), NC_NOCLOBBER | MAIA_NCMPI_FILE_TYPE, globalMpiInfo(), &b_ncId);
385 b_error(status, m_fileName, AT_);
386 } break;
387
388 case PIO_REPLACE: {
389 // Create a new file (overwrite existing)
390 MInt status =
391 ncmpi_create(m_mpiComm, m_fileName.c_str(), NC_CLOBBER | MAIA_NCMPI_FILE_TYPE, globalMpiInfo(), &b_ncId);
392 b_error(status, m_fileName, AT_);
393 } break;
394
395 case PIO_APPEND: {
396 // Attempt to open an existing file to append data
397 MInt status =
398 ncmpi_open(m_mpiComm, m_fileName.c_str(), NC_WRITE | MAIA_NCMPI_FILE_TYPE, globalMpiInfo(), &b_ncId);
399 b_error(status, m_fileName, AT_);
400
401 // Set correct data mode status
402 b_ncDataMode = true;
403
404 // Read in all existing dimensions and populate dimensions map
405 int noDims;
406 status = ncmpi_inq_ndims(b_ncId, &noDims);
407 b_error(status, m_fileName, AT_);
408 for(int dimId = 0; dimId < noDims; dimId++) {
409 MPI_Offset dimLength;
410 status = ncmpi_inq_dimlen(b_ncId, dimId, &dimLength);
411 b_error(status, m_fileName, AT_);
412 if(b_ncDimensions.count(dimLength) == 0u) {
413 b_ncDimensions[dimLength] = NcDimProxy();
414 b_ncDimensions[dimLength].dimNo = dimLength;
415 b_ncDimensions[dimLength].dimId = dimId;
416 }
417 }
418
419 // Enter definition mode so that new attributes/variables may be added
420 b_ncRedef();
421 } break;
422
423 case PIO_READ: {
424 // Open file for reading
425 MInt status = ncmpi_open(m_mpiComm, m_fileName.c_str(), NC_NOWRITE, globalMpiInfo(), &b_ncId);
426 b_error(status, m_fileName, AT_);
427
428 // Set correct data mode status
429 b_ncDataMode = true;
430 } break;
431
432 default: {
433 mTerm(1, AT_, "Unsupported file mode.");
434 } break;
435 }
436
437#ifdef MAIA_NCMPI_PRINT_FILE_HINTS
438 if(m_domainId == 0) {
439 std::cerr << std::endl << "Created/replaced/opened file: " << m_fileName << std::endl;
441 }
442#endif
443}
444
445
453 TRACE();
454
455 // Check if file is already closed
456 if(b_ncId != -1) {
457 close();
458 }
459}
460
461
464// the file MPI communicator is destroyed before the file object goes out of scope).
466 TRACE();
467#ifdef MAIA_NCMPI_PRINT_FILE_HINTS
468 if(m_domainId == 0) {
469 std::cerr << std::endl << "Closing file: " << m_fileName << std::endl;
471 }
472#endif
473
474 MInt status = ncmpi_close(b_ncId);
475 b_error(status, m_fileName, AT_);
476 b_ncId = -1; // Reset Netcdf file id, calling any other function after close() should result in an invalid file id
477 // Netcdf error
478
479#ifdef MAIA_EXTRA_DEBUG
480 for(set<MString>::iterator it = m_unwrittenArrays.begin(); it != m_unwrittenArrays.end(); ++it) {
481 cerr << "Warning: array '" << *it << "' in file '" << m_fileName << "' was defined but never written. "
482 << "Make sure that this is the intended behavior." << endl;
483 }
484 for(set<MString>::iterator it = m_unwrittenScalars.begin(); it != m_unwrittenScalars.end(); ++it) {
485 cerr << "Warning: scalar '" << *it << "' in file '" << m_fileName << "' was defined but never written. "
486 << "Make sure that this is the intended behavior." << endl;
487 }
488#endif
489}
490
491//------------------------------------------------------------------------------
492// File methods
493//------------------------------------------------------------------------------
494
495
503 TRACE();
504
505 if(!b_ncDataMode) {
506 MInt status = ncmpi_enddef(b_ncId);
507 b_error(status, m_fileName, AT_);
508 b_ncDataMode = true;
509 }
510}
511
512
520 TRACE();
521
522 if(b_ncDataMode) {
523 MInt status = ncmpi_redef(b_ncId);
524 b_error(status, m_fileName, AT_);
525 b_ncDataMode = false;
526 }
527}
528
529
539 TRACE();
540
543}
544
545
546// Forward declaration of specialization for use in b_addAdditionalHeader
547template <>
549 const MString& name,
550 const MString& datasetName,
551 const size_type totalCount);
552
553
562 TRACE();
563
564 b_ncRedef();
565
566 // For a newly created or modified file, add some meta information
567 if(m_fileMode != PIO_READ) {
568 const MInt maxNoChars = 256;
569
570 // Get all meta-data on root process & communicate, since some of the C
571 // functions might not be thread-safe (at least getpwuid() is not)
572 MChar user[maxNoChars];
573 MChar host[maxNoChars];
574 MChar dir[maxNoChars];
575 MChar exec[maxNoChars];
576 MChar date[maxNoChars];
577
578 // Create object & initialize data
579 fill(user, user + maxNoChars, '\0');
580 fill(host, host + maxNoChars, '\0');
581 fill(dir, dir + maxNoChars, '\0');
582 fill(exec, exec + maxNoChars, '\0');
583 fill(date, date + maxNoChars, '\0');
584
585 if(m_domainId == 0) {
586 // Gets the current username
587 passwd* p;
588 p = getpwuid(getuid());
589 if(p) {
590 strncpy(user, p->pw_name, maxNoChars - 1);
591 } else {
592 strncpy(user, "n/a", maxNoChars - 1);
593 }
594
595 // Gets the current hostname
596 gethostname(host, maxNoChars - 1);
597 strcpy(&host[strlen(host)], " (");
598 strcpy(&host[strlen(host)], XSTRINGIFY(MAIA_HOST_STRING));
599 strcpy(&host[strlen(host)], ")");
600
601// Gets the current directory
602#ifdef MAIA_GCC_COMPILER
603#pragma GCC diagnostic push
604#pragma GCC diagnostic ignored "-Wunused-result"
605#endif
606
607 getcwd(dir, maxNoChars - 1);
608
609 readlink("/proc/self/exe", exec, maxNoChars - 1);
610
611#ifdef MAIA_GCC_COMPILER
612#pragma GCC diagnostic pop
613#endif
614
615 // Get the current time and write it to rawTime
616 time_t rawTime;
617 time(&rawTime);
618
619 // Convert to time struct
620 tm* timeInfo;
621 timeInfo = localtime(&rawTime);
622
623 // Format time to string and save to buffer
624 strftime(date, maxNoChars, "%Y-%m-%d %H:%M:%S", timeInfo);
625 }
626
627 // Pack data
628 const MInt noItems = 5;
629 MChar buffer[noItems * maxNoChars];
630 memcpy(buffer + 0 * maxNoChars, user, maxNoChars);
631 memcpy(buffer + 1 * maxNoChars, host, maxNoChars);
632 memcpy(buffer + 2 * maxNoChars, dir, maxNoChars);
633 memcpy(buffer + 3 * maxNoChars, exec, maxNoChars);
634 memcpy(buffer + 4 * maxNoChars, date, maxNoChars);
635
636 // Broadcast time from rank 0 to ensure that every rank has the same
637 // information
638 MPI_Bcast(&buffer, noItems * maxNoChars, MPI_CHAR, 0, m_mpiComm, AT_, "buffer");
639
640 // Unpack data
641 memcpy(user, buffer + 0 * maxNoChars, maxNoChars);
642 memcpy(host, buffer + 1 * maxNoChars, maxNoChars);
643 memcpy(dir, buffer + 2 * maxNoChars, maxNoChars);
644 memcpy(exec, buffer + 3 * maxNoChars, maxNoChars);
645 memcpy(date, buffer + 4 * maxNoChars, maxNoChars);
646 MString version = MString(XSTRINGIFY(MAIA_VERSION_STRING));
647 MString build = MString(XSTRINGIFY(MAIA_COMPILER_STRING)) + " " + MString(XSTRINGIFY(MAIA_BUILD_TYPE_STRING)) + " ("
648 + MString(XSTRINGIFY(MAIA_COMPILER_VERSION_STRING)) + ")";
649
651 // Add file attributes only needed for creation
652 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(user), "_meta_creation_user");
653 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(host), "_meta_creation_host");
654 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(dir), "_meta_creation_directory");
655 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(exec), "_meta_creation_executable");
657 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(date), "_meta_creation_date");
658 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(version, "_meta_creation_revision");
659 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(build, "_meta_creation_build");
660 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(ncmpi_inq_libvers(), "_meta_creation_pnetcdf_version");
661 } else if(m_fileMode == PIO_APPEND) {
662 // Add file attributes that should be set at each modification
663 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(user), "_meta_lastModified_user");
664 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(host), "_meta_lastModified_host");
665 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(dir), "_meta_lastModified_directory");
666 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(exec), "_meta_lastModified_executable");
667 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(m_noDomains, "_meta_lastModified_noDomains");
668 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(MString(date), "_meta_lastModified_date");
669 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(version, "_meta_lastModified_revision");
670 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(build, "_meta_lastModified_build");
671 ParallelIoBase<ParallelIoPNetcdf>::setAttribute(ncmpi_inq_libvers(), "_meta_lastModified_pnetcdf_version");
672 }
673 }
674}
675
676
684 TRACE();
685
686 // At the moment, nothing happens here
687}
688
689void ParallelIoPNetcdf::b_defineArray(maiabd_type type, const MString& name, const MString& path,
690 const size_type noDims, const size_type* totalCount) {
691 TRACE();
692 (void)type;
693 (void)name;
694 (void)path;
695 (void)noDims;
696 (void)totalCount;
697 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
698}
699
700
701//------------------------------------------------------------------------------
702// Define mode methods
703//------------------------------------------------------------------------------
717void ParallelIoPNetcdf::b_defineArray(maiabd_type type, const MString& name, size_type noDims, size_type* totalCount) {
718 TRACE();
719
720 b_ncRedef();
721
722 // Fill values
723 MFloat fillValueFloat = MFloatNaN;
724 MInt fillValueInt = std::numeric_limits<MInt>::max();
725 MLong fillValueLong = std::numeric_limits<MLong>::max();
726 MUlong fillValueUlong = std::numeric_limits<MUlong>::max();
727 MUchar fillValueChar = std::numeric_limits<MChar>::max();
728 MUchar fillValueUchar = std::numeric_limits<MUchar>::max();
729
730 // Determine NC data type
731 nc_type dataType;
732 [[maybe_unused]] void* fillValue = nullptr;
733 if(type == PIO_FLOAT) {
734 dataType = NC_DOUBLE;
735 fillValue = &fillValueFloat;
736 } else if(type == PIO_INT) {
737 dataType = NC_INT;
738 fillValue = &fillValueInt;
739 } else if(type == PIO_LONG) {
740 dataType = NC_INT64;
741 fillValue = &fillValueLong;
742 } else if(type == PIO_STRING) {
743 dataType = NC_CHAR;
744 fillValue = &fillValueChar;
745 } else if(type == PIO_UCHAR) {
746 dataType = NC_UBYTE;
747 fillValue = &fillValueUchar;
748 } else if(type == PIO_ULONGLONG) {
749 dataType = NC_UINT64;
750 fillValue = &fillValueUlong;
751 } else {
752 TERMM(1, "Invalid ParallelIo data type!");
753 }
754
755 // Determine whether (a) new dimension(s) needs to be created and if yes,
756 // create it
757 MInt dimId, status, varId;
758 MIntScratchSpace dimIds(noDims, FUN_, "dimIds");
759 for(size_type dId = 0; dId < noDims; dId++) {
760 if(b_ncDimensions.count(totalCount[dId]) != 0u) {
761 // Dimension was found, now get its id
762 dimId = b_ncDimensions.find(totalCount[dId])->second.dimId;
763 } else {
764 // Get next dimension name
765 MInt maxUsedDimensionNo = -1;
766 for(NcDimMap::const_iterator it = b_ncDimensions.begin(); it != b_ncDimensions.end(); it++) {
767 maxUsedDimensionNo = max(maxUsedDimensionNo, it->second.dimNo);
768 }
769
770 // Set dimension name according to found dimension numbers
771 const MInt dimNo = maxUsedDimensionNo + 1;
772 const MString dimensionName = "dim" + to_string(dimNo);
773
774 // Define new dimension in the file and store its information
775 status = ncmpi_def_dim(b_ncId, dimensionName.c_str(), totalCount[dId], &dimId);
776 b_error(status, dimensionName, AT_);
777
778 // Add dimension to map
779 b_ncDimensions[totalCount[dId]] = NcDimProxy();
780 b_ncDimensions[totalCount[dId]].dimNo = dimNo;
781 b_ncDimensions[totalCount[dId]].dimId = dimId;
782 }
783 dimIds[dId] = dimId;
784 }
785
786 // Define dimension and variable in the file
787 status = ncmpi_def_var(b_ncId, name.c_str(), dataType, noDims, &dimIds[0], &varId);
788 b_error(status, name, AT_);
789
790#if MAIA_NCMPI_FILL_VARIABLES
791 // NOTE: set fill mode and default fill value to detect unwritten variables/detect IO errors
792 status = ncmpi_def_var_fill(b_ncId, varId, 0, fillValue);
793 b_error(status, name, AT_);
794#endif
795}
796
797
809 TRACE();
810
811 // Choose the HDF5 specific native datatype or a string of variable length
812 b_ncRedef();
813
814 // Fill values
815 MFloat fillValueFloat = MFloatNaN;
816 MInt fillValueInt = std::numeric_limits<MInt>::max();
817 MLong fillValueLong = std::numeric_limits<MLong>::max();
818 MUlong fillValueUlong = std::numeric_limits<MUlong>::max();
819 MUchar fillValueChar = std::numeric_limits<MChar>::max();
820 MUchar fillValueUchar = std::numeric_limits<MUchar>::max();
821
822 // Determine NC data type
823 nc_type dataType;
824 [[maybe_unused]] void* fillValue = nullptr;
825 if(type == PIO_FLOAT) {
826 dataType = NC_DOUBLE;
827 fillValue = &fillValueFloat;
828 } else if(type == PIO_INT) {
829 dataType = NC_INT;
830 fillValue = &fillValueInt;
831 } else if(type == PIO_LONG) {
832 dataType = NC_INT64;
833 fillValue = &fillValueLong;
834 } else if(type == PIO_STRING) {
835 dataType = NC_CHAR;
836 fillValue = &fillValueChar;
837 } else if(type == PIO_UCHAR) {
838 dataType = NC_UBYTE;
839 fillValue = &fillValueUchar;
840 } else if(type == PIO_ULONGLONG) {
841 dataType = NC_UINT64;
842 fillValue = &fillValueUlong;
843 } else {
844 TERMM(1, "Invalid ParallelIo data type!");
845 }
846
847 // For a scalar, only the variable needs to be defined
848 MInt varId;
849 MInt status = ncmpi_def_var(b_ncId, name.c_str(), dataType, 0, nullptr, &varId);
850 b_error(status, name, AT_);
851
852#if MAIA_NCMPI_FILL_VARIABLES
853 // NOTE: set fill mode and default fill value to detect unwritten variables/detect IO errors
854 status = ncmpi_def_var_fill(b_ncId, varId, 0, fillValue);
855 b_error(status, name, AT_);
856#endif
857}
858
859
860//------------------------------------------------------------------------------
861// Inquiry methods
862//------------------------------------------------------------------------------
878MBool ParallelIoPNetcdf::b_hasDataset(const MString& name, const size_type noDimensions) {
879 TRACE();
880
881 // Get number of variable in file
882 MInt noVars;
883 MInt status = ncmpi_inq_nvars(b_ncId, &noVars);
884 b_error(status, m_fileName, AT_);
885
886 // Check each variable if
887 // - names match
888 // - has the given dimension (or match any dimension if -1 which is default)
889 MBool varExists = false;
890 for(MInt i = 0; i < noVars; i++) {
891 MChar varname[NC_MAX_NAME + 1];
892 status = ncmpi_inq_varname(b_ncId, i, varname);
893 b_error(status, m_fileName, AT_);
894
895 MInt nDims;
896 status = ncmpi_inq_varndims(b_ncId, i, &nDims);
897 b_error(status, varname, AT_);
898
899 if(name == varname && (nDims == noDimensions || noDimensions == -1)) {
900 varExists = true;
901 break;
902 }
903 }
904
905 return varExists;
906}
907
909 TRACE();
910
911 // pnetcdf doesn't support groups (hdf5 feature), therefore this
912 // function can only check for datasets
913 return b_hasDataset(path, -1);
914}
915
917 TRACE();
918
919 (void)path;
920 (void)name;
921 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
922 return false;
923}
924
925void ParallelIoPNetcdf::b_getDatasetNames(std::vector<MString>& names, const MString& path) {
926 TRACE();
927
928 (void)path;
929 (void)names;
930 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
931}
932
933void ParallelIoPNetcdf::b_getGroupNames(std::vector<MString>& names, const MString& path) {
934 TRACE();
935
936 (void)path;
937 (void)names;
938 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
939}
940
941ParallelIo::size_type ParallelIoPNetcdf::b_getDatasetNoDims(const MString& path, const MString& name) {
942 TRACE();
943
944 (void)path;
945 (void)name;
946 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
947 return -1;
948}
949
950void ParallelIoPNetcdf::b_getDatasetSize(const MString& name, const MString& path, size_type noDims, size_type* data) {
951 TRACE();
952
953 (void)path;
954 (void)name;
955 (void)noDims;
956 (void)data;
957 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
958}
959
960
973 TRACE();
974
975 // Get variable id
976 MInt varId;
977 MInt status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
978 b_error(status, name, AT_);
979
980 // Get variable type
981 nc_type ncType;
982 status = ncmpi_inq_vartype(b_ncId, varId, &ncType);
983 b_error(status, name, AT_);
984
985 // Translate NC type to ParallelIo type
986 MInt typeId;
987 if(ncType == NC_INT) {
988 typeId = PIO_INT;
989 } else if(ncType == NC_DOUBLE) {
990 typeId = PIO_FLOAT;
991 } else if(ncType == NC_INT64) {
992 typeId = PIO_LONG;
993 } else if(ncType == NC_CHAR) {
994 typeId = PIO_STRING;
995 } else if(ncType == NC_UBYTE) {
996 typeId = PIO_UCHAR;
997 } else if(ncType == NC_UINT64) {
998 typeId = PIO_ULONGLONG;
999 } else {
1000 typeId = PIO_UNKNOWN_TYPE;
1001 }
1002
1003 return typeId;
1004}
1005
1006
1020void ParallelIoPNetcdf::b_getDatasetNames(vector<MString>& names, const size_type dimension) {
1021 TRACE();
1022
1023 // Erase vector contents
1024 vector<MString>().swap(names);
1025
1026 MInt status;
1027
1028 // Get number of variable in file
1029 MInt noVars;
1030 status = ncmpi_inq_nvars(b_ncId, &noVars);
1031 b_error(status, m_fileName, AT_);
1032
1033 // Check each variable if
1034 // - has the specified number of dimensions or return all if dimension=-1
1035 // (default)
1036 for(MInt i = 0; i < noVars; i++) {
1037 MInt noDimensions;
1038 status = ncmpi_inq_varndims(b_ncId, i, &noDimensions);
1039 b_error(status, m_fileName, AT_);
1040
1041 if(noDimensions == dimension || dimension == -1) {
1042 // If this is a data file, get data file-specific name, otherwise just get
1043 // the variable name
1044 MChar varname[NC_MAX_NAME + 1];
1045 status = ncmpi_inq_varname(b_ncId, i, varname);
1046 b_error(status, m_fileName, AT_);
1047 names.emplace_back(varname);
1048 }
1049 }
1050}
1051
1052
1066ParallelIo::size_type ParallelIoPNetcdf::b_getDatasetNoDims(const MString& name) {
1067 TRACE();
1068
1069 // Get variable id
1070 MInt varId;
1071 MInt status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1072 b_error(status, name, AT_);
1073
1074 // Get number of variable dimensions
1075 MInt noDims;
1076 status = ncmpi_inq_varndims(b_ncId, varId, &noDims);
1077 b_error(status, name, AT_);
1078
1079 return noDims;
1080}
1081
1082
1094ParallelIo::size_type ParallelIoPNetcdf::b_getDatasetSize(const MString& name, const size_type dimensionId) {
1095 TRACE();
1096
1097 // Get variable id
1098 MInt varId;
1099 MInt status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1100 b_error(status, name, AT_);
1101
1102 // Get number of array dimensions
1103 size_type noDims = b_getDatasetNoDims(name);
1104
1105 // Get variable dimension
1106 MIntScratchSpace dimId(noDims, FUN_, "dimId");
1107 status = ncmpi_inq_vardimid(b_ncId, varId, &dimId[0]);
1108 b_error(status, name, AT_);
1109
1110 // Get variable size
1111 MPI_Offset arraySize;
1112 status = ncmpi_inq_dimlen(b_ncId, dimId[dimensionId], &arraySize);
1113 b_error(status, name, AT_);
1114
1115 return static_cast<size_type>(arraySize);
1116}
1117
1118
1131MBool ParallelIoPNetcdf::b_hasAttribute(const MString& name, const MString& datasetName) {
1132 TRACE();
1133
1134 // Determine variable id
1135 MInt varId;
1136 if(datasetName.empty()) {
1137 varId = NC_GLOBAL;
1138 } else {
1139 MInt status = ncmpi_inq_varid(b_ncId, datasetName.c_str(), &varId);
1140 b_error(status, datasetName, AT_);
1141 }
1142
1143 // Determine if attribute exists
1144 MInt attId;
1145 MInt status = ncmpi_inq_attid(b_ncId, varId, name.c_str(), &attId);
1146
1147 // Generate return falue
1148 MBool attributeExists;
1149 if(status == NC_NOERR) {
1150 attributeExists = true;
1151 } else if(status == NC_ENOTATT) {
1152 attributeExists = false;
1153 } else {
1154 attributeExists = false;
1155 b_error(status, name, AT_);
1156 }
1157
1158 return attributeExists;
1159}
1160
1161
1175 TRACE();
1176
1177 // Determine variable id
1178 MInt varId;
1179 if(datasetName.empty()) {
1180 varId = NC_GLOBAL;
1181 } else {
1182 MInt status = ncmpi_inq_varid(b_ncId, datasetName.c_str(), &varId);
1183 b_error(status, datasetName, AT_);
1184 }
1185
1186 // Get attribute type
1187 nc_type ncType;
1188 MInt status = ncmpi_inq_atttype(b_ncId, varId, name.c_str(), &ncType);
1189 b_error(status, name, AT_);
1190
1191 // Translate NC type to ParallelIo type
1192 MInt typeId;
1193 if(ncType == NC_INT) {
1194 typeId = PIO_INT;
1195 } else if(ncType == NC_DOUBLE) {
1196 typeId = PIO_FLOAT;
1197 } else if(ncType == NC_INT64) {
1198 typeId = PIO_LONG;
1199 } else if(ncType == NC_CHAR) {
1200 typeId = PIO_STRING;
1201 } else if(ncType == NC_UBYTE) {
1202 typeId = PIO_UCHAR;
1203 } else if(ncType == NC_UINT64) {
1204 typeId = PIO_ULONGLONG;
1205 } else {
1206 typeId = PIO_UNKNOWN_TYPE;
1207 }
1208
1209 return typeId;
1210}
1211
1212
1219 TRACE();
1220
1221 char value[MPI_MAX_INFO_VAL];
1222 MInt status, len, flag;
1223 MPI_Offset header_size, header_extent;
1224 MPI_Offset h_align = -1, v_align = -1, h_chunk = -1;
1225 MPI_Info info_used;
1226
1227 // Get header size
1228 status = ncmpi_inq_header_size(b_ncId, &header_size);
1229 b_error(status, m_fileName, AT_);
1230
1231 // Get maximum size of header (without the need to move data)
1232 status = ncmpi_inq_header_extent(b_ncId, &header_extent);
1233 b_error(status, m_fileName, AT_);
1234
1235 // Get file MPI information
1236 status = ncmpi_inq_file_info(b_ncId, &info_used);
1237 b_error(status, m_fileName, AT_);
1238
1239 // Get header align size (in bytes)
1240 MPI_Info_get_valuelen(info_used, "nc_header_align_size", &len, &flag, AT_);
1241 if(flag != 0) {
1242 MPI_Info_get(info_used, "nc_header_align_size", len + 1, value, &flag, AT_);
1243 h_align = strtoll(value, nullptr, 10);
1244 }
1245
1246 // Get variable align size (in bytes)
1247 MPI_Info_get_valuelen(info_used, "nc_var_align_size", &len, &flag, AT_);
1248 if(flag != 0) {
1249 MPI_Info_get(info_used, "nc_var_align_size", len + 1, value, &flag, AT_);
1250 v_align = strtoll(value, nullptr, 10);
1251 }
1252
1253 // Get header read chuck size (in bytes)
1254 MPI_Info_get_valuelen(info_used, "nc_header_read_chunk_size", &len, &flag, AT_);
1255 if(flag != 0) {
1256 MPI_Info_get(info_used, "nc_header_read_chunk_size", len + 1, value, &flag, AT_);
1257 h_chunk = strtoll(value, nullptr, 10);
1258 }
1259
1260 MPI_Info_free(&info_used, AT_);
1261
1262 // Output file hint information
1263 std::cerr << "##### PNetCDF file hints #####" << std::endl;
1264
1265 if(h_align == -1) {
1266 std::cerr << "nc_header_align_size is NOT set" << std::endl;
1267 } else {
1268 std::cerr << "nc_header_align_size set to = " << h_align << std::endl;
1269 }
1270
1271 if(v_align == -1) {
1272 std::cerr << "nc_var_align_size is NOT set" << std::endl;
1273 } else {
1274 std::cerr << "nc_var_align_size set to = " << v_align << std::endl;
1275 }
1276 if(h_chunk == -1) {
1277 std::cerr << "nc_header_read_chunk_size is NOT set" << std::endl;
1278 } else {
1279 std::cerr << "nc_header_read_chunk_size set to = " << h_chunk << std::endl;
1280 }
1281
1282 std::cerr << "header size = " << header_size << std::endl;
1283 std::cerr << "header extent = " << header_extent << std::endl;
1284
1285 // Check free header space in append mode
1286 if(m_fileMode == PIO_APPEND) {
1287 const MPI_Offset header_free = header_extent - header_size;
1288 std::cerr << "header free space (append mode) = " << header_free << std::endl;
1289 if(header_free < 1024) {
1290 std::cerr << "WARNING: ParallelIoPNetcdf file in append mode has less than 1KB of free header "
1291 "space. If this space is used up by adding new data to the header the entire data "
1292 "file has to be moved to make room for the new definitions which may cause MPI I/O "
1293 "errors."
1294 << std::endl;
1295 }
1296 }
1297
1298 std::cerr << "##### PNetCDF file hints #####" << std::endl;
1299}
1300
1301
1302template <class T>
1303void ParallelIoPNetcdf::b_writeArray(const T* array, const MString& path, const MString& name, const size_type noDims,
1304 const size_type* start, const size_type* count, const size_type* ghost) {
1305 (void)array;
1306 (void)path;
1307 (void)name;
1308 (void)noDims;
1309 (void)start;
1310 (void)count;
1311 (void)ghost;
1312 TRACE();
1313 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
1314}
1315
1316
1317//------------------------------------------------------------------------------
1318// Data mode methods
1319//------------------------------------------------------------------------------
1335template <class T>
1336void ParallelIoPNetcdf::b_writeArray(const T* const array, const MString& name, const size_type noDims,
1337 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
1338 const size_type noChunks, MPI_Offset diskStride) {
1339 TRACE();
1340
1341 b_ncEndDef();
1342
1343 // Get variable id
1344 MInt varId;
1345 MInt status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1346 b_error(status, name, AT_);
1347
1348 // Determine total data count
1349 size_type totalCount = 1;
1350 for(size_type d = 0; d < noDims; d++) {
1351 totalCount *= count[d];
1352 }
1353
1354 // Create temporary storage space if needed and set data pointers
1355 MInt tmpScratchSize = (memoryStride == 1) ? 1 : totalCount;
1356 ScratchSpace<T> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1357
1358 // Pack strided data
1359 const T* data = 0;
1360 if(memoryStride == 1) {
1361 data = array;
1362 } else {
1363 for(MPI_Offset i = 0; i < totalCount; i++) {
1364 tmpScratch[i] = array[memoryStride * i];
1365 }
1366 data = tmpScratch.data();
1367 }
1368
1369 // labels:IO this is a bugfix for the case when the last process in the communicator
1370 // has zero elements to write, which resulted in the error:
1371 //"NetCDF returns status -40: Index exceeds dimension bound"
1372 // solution taken from here:
1373 // http://lists.mcs.anl.gov/pipermail/parallel-netcdf/2004-December/000388.html
1374 if(count[0] == 0) {
1375 start[0] = 0;
1376 }
1377
1378 // Write array
1379 if(noChunks == 1) {
1380 // If number of chunks is one, write everything at once
1381 if(diskStride == 1) {
1382 status = pnetcdf_traits<T>::ncmpi_put_vara_type_all(b_ncId, varId, start, count, data);
1383 } else {
1384 status = pnetcdf_traits<T>::ncmpi_put_vars_type_all(b_ncId, varId, start, count, &diskStride, data);
1385 }
1386 b_error(status, name, AT_);
1387 } else {
1388 // Write in chunks
1389 MPI_Offset chunkSize = count[0] / noChunks;
1390 if(count[0] % noChunks > 0) {
1391 chunkSize += 1;
1392 }
1393
1394 // Determine number of entries for a fixed first dimension index
1395 size_type nDSize = 1;
1396 for(size_type d = 1; d < noDims; d++) {
1397 nDSize *= count[d];
1398 }
1399
1400 ScratchSpace<MPI_Offset> start_(noDims, FUN_, "start_");
1401 ScratchSpace<MPI_Offset> count_(noDims, FUN_, "count_");
1402
1403 std::copy(start, start + noDims, &start_[0]);
1404 std::copy(count, count + noDims, &count_[0]);
1405
1406 for(size_type i = 0; i < noChunks; i++) {
1407 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1408 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1409 const T* data_ = data + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1410 if(diskStride == 1) {
1411 status = pnetcdf_traits<T>::ncmpi_put_vara_type_all(b_ncId, varId, &start_[0], &count_[0], data_);
1412 } else {
1413 status = pnetcdf_traits<T>::ncmpi_put_vars_type_all(b_ncId, varId, &start_[0], &count_[0], &diskStride, data_);
1414 }
1415 b_error(status, name, AT_);
1416 }
1417 }
1418}
1419
1420
1435template <>
1436void ParallelIoPNetcdf::b_writeArray(const MString* array, const MString& name, const size_type noDims,
1437 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
1438 const size_type noChunks, MPI_Offset diskStride) {
1439 TRACE();
1440
1441 b_ncEndDef();
1442
1443 // Get variable id
1444 MInt varId;
1445 MInt status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1446 b_error(status, name, AT_);
1447
1448 // Determine total number of strings (last dimension is string length)
1449 size_type totalCount = 1;
1450 for(size_type d = 0; d < noDims - 1; d++) {
1451 totalCount *= count[d];
1452 }
1453
1454 // Determine length of one string
1455 size_type strLen = count[noDims - 1];
1456
1457 // Create temporary storage space if needed and set data pointers
1458 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1459 ScratchSpace<MString> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1460
1461 // Pack strided data
1462 const MString* data = 0;
1463 if(memoryStride == 1) {
1464 data = array;
1465 } else {
1466 for(MPI_Offset i = 0; i < totalCount; i++) {
1467 tmpScratch[i] = array[memoryStride * i];
1468 }
1469 data = tmpScratch.data();
1470 }
1471
1472 // labels:IO this is a bugfix for the case when the last process in the communicator
1473 // has zero elements to write, which resulted in the error:
1474 //"NetCDF returns status -40: Index exceeds dimension bound"
1475 // solution taken from here:
1476 // http://lists.mcs.anl.gov/pipermail/parallel-netcdf/2004-December/000388.html
1477 if(count[0] == 0) {
1478 start[0] = 0;
1479 }
1480
1481 // Create buffer for writing
1482 size_type nCount = totalCount * strLen;
1483 ScratchSpace<char> buf(nCount, FUN_, "buf");
1484
1485 // Copy data to buffer
1486 for(MPI_Offset i = 0; i < totalCount; i++) {
1487 data[i].copy(&buf[i * strLen], strLen, 0);
1488 }
1489
1490 // Write array
1491 if(noChunks == 1) {
1492 // If number of chunks is one, write everything at once
1493 if(diskStride == 1) {
1494 status = ncmpi_put_vara_text_all(b_ncId, varId, start, count, &buf[0]);
1495 } else {
1496 status = ncmpi_put_vars_text_all(b_ncId, varId, start, count, &diskStride, &buf[0]);
1497 }
1498 b_error(status, name, AT_);
1499 } else {
1500 // Write in chunks
1501 MPI_Offset chunkSize = count[0] / noChunks;
1502 if(count[0] % noChunks > 0) {
1503 chunkSize += 1;
1504 }
1505
1506 // Determine number of entries for a fixed first dimension index
1507 size_type nDSize = 1;
1508 for(size_type d = 1; d < noDims; d++) {
1509 nDSize *= count[d];
1510 }
1511
1512 ScratchSpace<MPI_Offset> start_(noDims, FUN_, "start_");
1513 ScratchSpace<MPI_Offset> count_(noDims, FUN_, "count_");
1514
1515 std::copy(start, start + noDims, &start_[0]);
1516 std::copy(count, count + noDims, &count_[0]);
1517
1518 for(size_type i = 0; i < noChunks; i++) {
1519 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1520 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1521 const char* buf_ = &buf[0] + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1522 if(diskStride == 1) {
1523 status = ncmpi_put_vara_text_all(b_ncId, varId, &start_[0], &count_[0], &buf_[0]);
1524 } else {
1525 status = ncmpi_put_vars_text_all(b_ncId, varId, &start_[0], &count_[0], &diskStride, &buf_[0]);
1526 }
1527 b_error(status, name, AT_);
1528 }
1529 }
1530}
1531
1532
1542template <class T>
1543void ParallelIoPNetcdf::b_writeScalar(const T scalar, const MString& name) {
1544 TRACE();
1545
1546 b_ncEndDef();
1547 MInt status;
1548
1549 // Get variable id
1550 MInt varId;
1551 status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1552 b_error(status, name, AT_);
1553
1554 // Determine offsets
1555 MPI_Offset start = 0;
1556 MPI_Offset count = 1;
1557
1558 // Write scalar
1559 status = pnetcdf_traits<T>::ncmpi_put_vara_type_all(b_ncId, varId, &start, &count, &scalar);
1560 b_error(status, name, AT_);
1561}
1562
1563
1573template <>
1574void ParallelIoPNetcdf::b_writeScalar(const MString& scalar, const MString& name) {
1575 TRACE();
1576
1577 b_ncEndDef();
1578 MInt status;
1579
1580 // Get variable id
1581 MInt varId;
1582 status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1583 b_error(status, name, AT_);
1584
1585 // Determine offsets
1586 MPI_Offset start = 0;
1587 MPI_Offset count = 1;
1588
1589 // Write scalar
1590 status = ncmpi_put_vara_text_all(b_ncId, varId, &start, &count, scalar.c_str());
1591 b_error(status, name, AT_);
1592}
1593
1594template <class T>
1595void ParallelIoPNetcdf::b_readArray(T* array, const MString& path, const MString& name, const size_type noDims,
1596 const size_type* start, const size_type* count) {
1597 TRACE();
1598
1599 (void)array;
1600 (void)path;
1601 (void)name;
1602 (void)noDims;
1603 (void)start;
1604 (void)count;
1605 mTerm(1, AT_, "Group functionality not supported by PNetcdf backend");
1606}
1607
1608
1625template <class T>
1626void ParallelIoPNetcdf::b_readArray(T* array, const MString& name, const size_type noDims, MPI_Offset* start,
1627 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
1628 MPI_Offset diskStride) {
1629 TRACE();
1630
1631 b_ncEndDef();
1632
1633 // Get variable id
1634 MInt varId;
1635 MInt status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1636 b_error(status, name, AT_);
1637
1638 // Determine total data count
1639 size_type totalCount = 1;
1640 for(size_type d = 0; d < noDims; d++) {
1641 totalCount *= count[d];
1642 }
1643
1644 // Create temporary storage space if needed and set data pointers
1645 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1646 ScratchSpace<T> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1647 T* data = nullptr;
1648 if(memoryStride == 1) {
1649 data = array;
1650 } else {
1651 data = tmpScratch.data();
1652 }
1653
1654 // See b_writeArray(T*) for explanation
1655 if(count[0] == 0) {
1656 start[0] = 0;
1657 }
1658
1659 // Read array
1660 if(noChunks == 1) {
1661 // If number of chunks is one, read everything at once
1662 if(diskStride == 1) {
1663 status = pnetcdf_traits<T>::ncmpi_get_vara_type_all(b_ncId, varId, start, count, data);
1664 } else {
1665 status = pnetcdf_traits<T>::ncmpi_get_vars_type_all(b_ncId, varId, start, count, &diskStride, data);
1666 }
1667 b_error(status, name, AT_);
1668 } else {
1669 // Read in chunks
1670 MPI_Offset chunkSize = count[0] / noChunks;
1671 if(count[0] % noChunks > 0) {
1672 chunkSize += 1;
1673 }
1674
1675 // Determine number of entries for a fixed first dimension index
1676 size_type nDSize = 1;
1677 for(size_type d = 1; d < noDims; d++) {
1678 nDSize *= count[d];
1679 }
1680
1681 ScratchSpace<MPI_Offset> start_(noDims, FUN_, "start_");
1682 ScratchSpace<MPI_Offset> count_(noDims, FUN_, "count_");
1683
1684 std::copy(start, start + noDims, &start_[0]);
1685 std::copy(count, count + noDims, &count_[0]);
1686
1687 for(size_type i = 0; i < noChunks; i++) {
1688 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1689 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1690 T* data_ = data + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1691 if(diskStride == 1) {
1692 status = pnetcdf_traits<T>::ncmpi_get_vara_type_all(b_ncId, varId, &start_[0], &count_[0], data_);
1693 } else {
1694 status = pnetcdf_traits<T>::ncmpi_get_vars_type_all(b_ncId, varId, &start_[0], &count_[0], &diskStride, data_);
1695 }
1696 b_error(status, name, AT_);
1697 }
1698 }
1699
1700 // Unpack strided data if necessary
1701 if(memoryStride != 1) {
1702 for(MPI_Offset i = 0; i < totalCount; i++) {
1703 array[memoryStride * i] = tmpScratch[i];
1704 }
1705 }
1706}
1707
1708
1725template <>
1726void ParallelIoPNetcdf::b_readArray(MString* array, const MString& name, const size_type noDims, MPI_Offset* start,
1727 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
1728 MPI_Offset diskStride) {
1729 TRACE();
1730
1731 b_ncEndDef();
1732
1733 // Get variable id
1734 MInt varId;
1735 MInt status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1736 b_error(status, name, AT_);
1737
1738 // Determine total data count
1739 size_type totalCount = 1;
1740 for(size_type d = 0; d < noDims - 1; d++) {
1741 totalCount *= count[d];
1742 }
1743
1744 // Determine length of one string
1745 size_type strLen = count[noDims - 1];
1746
1747 // Create temporary storage space if needed and set data pointers
1748 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1749 ScratchSpace<MString> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1750 MString* data = nullptr;
1751 if(memoryStride == 1) {
1752 data = array;
1753 } else {
1754 data = tmpScratch.data();
1755 }
1756
1757 // Create buffer for reading
1758 size_type nCount = totalCount * strLen;
1759 ScratchSpace<char> buf(nCount, FUN_, "buf");
1760
1761 // Read array
1762 if(noChunks == 1) {
1763 // If number of chunks is one, read everything at once
1764 if(diskStride == 1) {
1765 status = ncmpi_get_vara_text_all(b_ncId, varId, start, count, &buf[0]);
1766 } else {
1767 status = ncmpi_get_vars_text_all(b_ncId, varId, start, count, &diskStride, &buf[0]);
1768 }
1769 b_error(status, name, AT_);
1770
1771 // Extract strings from buffer
1772 for(size_type i = 0; i < totalCount; i++) {
1773 MString tmp;
1774 tmp.append(&buf[i * strLen], strLen);
1775 data[i].append(tmp.c_str(), 0, strLen);
1776 }
1777 } else {
1778 // Read in chunks
1779 MPI_Offset chunkSize = count[0] / noChunks;
1780 if(count[0] % noChunks > 0) {
1781 chunkSize += 1;
1782 }
1783
1784 ScratchSpace<MPI_Offset> start_(noDims, FUN_, "start_");
1785 ScratchSpace<MPI_Offset> count_(noDims, FUN_, "count_");
1786
1787 std::copy(start, start + noDims, &start_[0]);
1788 std::copy(count, count + noDims, &count_[0]);
1789
1790 for(size_type i = 0; i < noChunks; i++) {
1791 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1792 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1793 if(diskStride == 1) {
1794 status = ncmpi_get_vara_text_all(b_ncId, varId, &start_[0], &count_[0], &buf[0]);
1795 } else {
1796 status = ncmpi_get_vars_text_all(b_ncId, varId, &start_[0], &count_[0], &diskStride, &buf[0]);
1797 }
1798
1799 b_error(status, name, AT_);
1800
1801 // Extract strings from buffer
1802 for(size_type j = start_[0]; j < totalCount; j++) {
1803 MString tmp;
1804 tmp.append(&buf[(j - start_[0]) * strLen], strLen);
1805 data[j].append(tmp, 0, strLen);
1806 }
1807 }
1808 }
1809
1810 // Unpack strided data if necessary
1811 if(memoryStride != 1) {
1812 for(MPI_Offset i = 0; i < totalCount; i++) {
1813 array[memoryStride * i] = tmpScratch[i];
1814 }
1815 }
1816}
1817
1818
1830template <class T>
1831void ParallelIoPNetcdf::b_readScalar(T* scalar, const MString& name) {
1832 TRACE();
1833
1834 b_ncEndDef();
1835 MInt status;
1836
1837 // Get variable id
1838 MInt varId;
1839 status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1840 b_error(status, name, AT_);
1841
1842 // Determine offsets
1843 MPI_Offset start = 0;
1844 MPI_Offset count = 1;
1845
1846 // Read scalar
1847 status = pnetcdf_traits<T>::ncmpi_get_vara_type_all(b_ncId, varId, &start, &count, scalar);
1848 b_error(status, name, AT_);
1849}
1850
1851
1863template <>
1865 TRACE();
1866
1867 b_ncEndDef();
1868 MInt status;
1869
1870 // Get variable id
1871 MInt varId;
1872 status = ncmpi_inq_varid(b_ncId, name.c_str(), &varId);
1873 b_error(status, name, AT_);
1874
1875 // Determine offsets
1876 MPI_Offset start = 0;
1877 MPI_Offset count = 1;
1878
1879 // Read scalar (one char)
1880 status = ncmpi_get_vara_text_all(b_ncId, varId, &start, &count, (char*)scalar);
1881 b_error(status, name, AT_);
1882}
1883
1884//------------------------------------------------------------------------------
1885// Attribute methods
1886//------------------------------------------------------------------------------
1887
1888
1900template <class T>
1901void ParallelIoPNetcdf::b_setAttribute(const T* value, const MString& name, const MString& datasetName,
1902 const size_type totalCount) {
1903 TRACE();
1904
1905 // Determine variable id
1906 MInt varId;
1907 if(datasetName.empty()) {
1908 varId = NC_GLOBAL;
1909 } else {
1910 MInt status = ncmpi_inq_varid(b_ncId, datasetName.c_str(), &varId);
1911 b_error(status, datasetName, AT_);
1912 }
1913
1914 // If attribute does not exist, go to define mode
1915 if(!b_hasAttribute(name, datasetName)) {
1916 b_ncRedef();
1917 }
1918
1919 // Write attribute
1920 MInt status =
1921 pnetcdf_traits<T>::ncmpi_put_att_type(b_ncId, varId, name.c_str(), pnetcdf_traits<T>::type(), totalCount, value);
1922 b_error(status, datasetName + "::" + name, AT_);
1923}
1924
1925
1937template <>
1938void ParallelIoPNetcdf::b_setAttribute(const MString* value, const MString& name, const MString& datasetName,
1939 const size_type totalCount) {
1940 TRACE();
1941
1942 if(totalCount > 1) {
1943 mTerm(1, AT_, "Array of strings attributes not yet supported.");
1944 }
1945
1946 // Determine variable id
1947 MInt varId;
1948 if(datasetName.empty()) {
1949 varId = NC_GLOBAL;
1950 } else {
1951 // If this is a data file, get data file-specific name
1952 MInt status = ncmpi_inq_varid(b_ncId, datasetName.c_str(), &varId);
1953 b_error(status, datasetName, AT_);
1954 }
1955
1956 // If attribute does not exist or is of greater size, go to define mode
1957 if(!b_hasAttribute(name, datasetName)) {
1958 b_ncRedef();
1959 } else {
1960 MPI_Offset length;
1961 MInt status = ncmpi_inq_attlen(b_ncId, varId, name.c_str(), &length);
1962 b_error(status, datasetName + "::" + name, AT_);
1963
1964 if(length < static_cast<MPI_Offset>(value->length())) {
1965 b_ncRedef();
1966 }
1967 }
1968
1969 // Write attribute
1970 MInt status = ncmpi_put_att_text(b_ncId, varId, name.c_str(), value->length(), (*value).c_str());
1971 b_error(status, datasetName + "::" + name, AT_);
1972}
1973
1974
1986template <class T>
1987void ParallelIoPNetcdf::b_getAttribute(T* const value, const MString& name, const MString& datasetName,
1988 const size_type totalCount) {
1989 TRACE();
1990
1991 // Determine variable id
1992 MInt varId;
1993 if(datasetName.empty()) {
1994 varId = NC_GLOBAL;
1995 } else {
1996 MInt status = ncmpi_inq_varid(b_ncId, datasetName.c_str(), &varId);
1997 b_error(status, datasetName, AT_);
1998 }
1999
2000 // Get attribute length
2001 MInt status;
2002 MPI_Offset length;
2003 status = ncmpi_inq_attlen(b_ncId, varId, name.c_str(), &length);
2004 b_error(status, datasetName + "::" + name, AT_);
2005
2006 if(length != (MPI_Offset)totalCount) {
2007 TERMM(1, "Requested attribute (" + name + ") has different number of elements (" + to_string(length)
2008 + ") than specified (" + to_string(totalCount)
2009 + "). Use getAttributeCount() to query number of elements first");
2010 }
2011
2012 // Read attribute
2013 status = pnetcdf_traits<T>::ncmpi_get_att_type(b_ncId, varId, name.c_str(), value);
2014 b_error(status, datasetName + "::" + name, AT_);
2015}
2016
2017
2029template <>
2030void ParallelIoPNetcdf::b_getAttribute(MString* const value, const MString& name, const MString& datasetName,
2031 const size_type totalCount) {
2032 TRACE();
2033
2034 if(totalCount > 1) {
2035 mTerm(1, AT_, "Array of strings attributes not yet supported.");
2036 }
2037
2038 // Determine variable id
2039 MInt varId;
2040 if(datasetName.empty()) {
2041 varId = NC_GLOBAL;
2042 } else {
2043 MInt status = ncmpi_inq_varid(b_ncId, datasetName.c_str(), &varId);
2044 b_error(status, datasetName, AT_);
2045 }
2046
2047 // Get attribute length
2048 MInt status;
2049 MPI_Offset length;
2050 status = ncmpi_inq_attlen(b_ncId, varId, name.c_str(), &length);
2051 b_error(status, datasetName + "::" + name, AT_);
2052
2053 // Read attribute
2054 ScratchSpace<MChar> tmpScratch(length, FUN_, "tmpScratch");
2055 status = ncmpi_get_att_text(b_ncId, varId, name.c_str(), tmpScratch.data());
2056 b_error(status, datasetName + "::" + name, AT_);
2057 value->assign(tmpScratch.data(), length);
2058}
2059
2060
2061void ParallelIoPNetcdf::b_getAttributeCount(const MString& name, size_type* totalCount, const MString& datasetName) {
2062 TRACE();
2063
2064 // Determine variable id
2065 MInt varId;
2066 if(datasetName.empty()) {
2067 varId = NC_GLOBAL;
2068 } else {
2069 MInt status = ncmpi_inq_varid(b_ncId, datasetName.c_str(), &varId);
2070 b_error(status, datasetName, AT_);
2071 }
2072
2073 // Get attribute length
2074 MInt status;
2075 MPI_Offset length;
2076 status = ncmpi_inq_attlen(b_ncId, varId, name.c_str(), &length);
2077 b_error(status, datasetName + "::" + name, AT_);
2078
2079 *totalCount = (size_type)length;
2080}
2081
2082
2083//------------------------------------------------------------------------------
2084// Auxiliary methods
2085//------------------------------------------------------------------------------
2100void ParallelIoPNetcdf::b_error(MInt status, const MString& name, const MString& location) {
2101 // TRACE(); //<- this function is called to often and not really important.
2102 if(status != NC_NOERR) {
2103 cerr << endl;
2104 cerr << "*** ERROR in parallelio_pnetcdf ***" << endl;
2105 cerr << "NetCDF error in '" << location << "'" << endl;
2106 cerr << "NetCDF returns status " << status << ": " << ncmpi_strerror(status) << endl;
2107 cerr << "The file/variable/attribute in question was: " << name << endl;
2108 cerr << endl;
2109 TERMM(1, "NetCDF error in ParallelIo.");
2110 }
2111}
2112
2113
2114// Explicit instantiations for all supported types
2115// Write methods
2116template void ParallelIoPNetcdf::b_writeArray(const MFloat* const array, const MString& name, const size_type noDims,
2117 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2118 const size_type noChunks, MPI_Offset diskStride);
2119template void ParallelIoPNetcdf::b_writeArray(const MInt* const array, const MString& name, const size_type noDims,
2120 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2121 const size_type noChunks, MPI_Offset diskStride);
2122template void ParallelIoPNetcdf::b_writeArray(const MLong* const array, const MString& name, const size_type noDims,
2123 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2124 const size_type noChunks, MPI_Offset diskStride);
2125template void ParallelIoPNetcdf::b_writeArray(const MUchar* const array, const MString& name, const size_type noDims,
2126 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2127 const size_type noChunks, MPI_Offset diskStride);
2128template void ParallelIoPNetcdf::b_writeScalar(const MFloat scalar, const MString& name);
2129template void ParallelIoPNetcdf::b_writeScalar(const MInt scalar, const MString& name);
2130template void ParallelIoPNetcdf::b_writeScalar(const MLong scalar, const MString& name);
2131template void ParallelIoPNetcdf::b_writeScalar(const MUchar scalar, const MString& name);
2132// Read methods
2133template void ParallelIoPNetcdf::b_readArray(MFloat* const array, const MString& name, const size_type noDims,
2134 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2135 const size_type noChunks, MPI_Offset diskStride);
2136template void ParallelIoPNetcdf::b_readArray(MInt* const array, const MString& name, const size_type noDims,
2137 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2138 const size_type noChunks, MPI_Offset diskStride);
2139template void ParallelIoPNetcdf::b_readArray(MLong* const array, const MString& name, const size_type noDims,
2140 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2141 const size_type noChunks, MPI_Offset diskStride);
2142template void ParallelIoPNetcdf::b_readArray(MUchar* const array, const MString& name, const size_type noDims,
2143 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2144 const size_type noChunks, MPI_Offset diskStride);
2145template void ParallelIoPNetcdf::b_readScalar(MFloat* scalar, const MString& name);
2146template void ParallelIoPNetcdf::b_readScalar(MInt* scalar, const MString& name);
2147template void ParallelIoPNetcdf::b_readScalar(MLong* scalar, const MString& name);
2148template void ParallelIoPNetcdf::b_readScalar(MUchar* scalar, const MString& name);
2149// Attribute methods
2150template void ParallelIoPNetcdf::b_setAttribute(const MFloat* value, const MString& name, const MString& datasetName,
2151 const size_type totalCount);
2152template void ParallelIoPNetcdf::b_setAttribute(const MInt* value, const MString& name, const MString& datasetName,
2153 const size_type totalCount);
2154template void ParallelIoPNetcdf::b_setAttribute(const MLong* value, const MString& name, const MString& datasetName,
2155 const size_type totalCount);
2156template void ParallelIoPNetcdf::b_setAttribute(const MUchar* value, const MString& name, const MString& datasetName,
2157 const size_type totalCount);
2158template void ParallelIoPNetcdf::b_setAttribute(const MUlong* value, const MString& name, const MString& datasetName,
2159 const size_type totalCount);
2160template void ParallelIoPNetcdf::b_getAttribute(MFloat* const value, const MString& name, const MString& datagetName,
2161 const size_type totalCount);
2162template void ParallelIoPNetcdf::b_getAttribute(MInt* const value, const MString& name, const MString& datagetName,
2163 const size_type totalCount);
2164template void ParallelIoPNetcdf::b_getAttribute(MLong* const value, const MString& name, const MString& datagetName,
2165 const size_type totalCount);
2166template void ParallelIoPNetcdf::b_getAttribute(MUchar* const value, const MString& name, const MString& datagetName,
2167 const size_type totalCount);
2168template void ParallelIoPNetcdf::b_getAttribute(MUlong* const value, const MString& name, const MString& datagetName,
2169 const size_type totalCount);
This class is intended to do all the heavy lifting when it comes to reading and writing "big data" fi...
Definition: parallelio.h:101
std::set< MString > m_unwrittenScalars
Definition: parallelio.h:255
MLong size_type
Type used for all size- and offset-related values.
Definition: parallelio.h:123
std::set< MString > m_unwrittenArrays
Definition: parallelio.h:254
void setAttribute(const T &value, const MString &name, const MString &datasetName="")
Set a file or dataset attribute. [MPI]
Definition: parallelio.h:1438
MInt b_getDatasetType(const MString &name)
Returns the data type of a dataset in the file (can be array, multi-D array or scalar).
static const MString b_fileExt()
Returns backend-specific ending of filename (either ".Netcdf" or ".Hdf5")
void b_getDatasetNames(std::vector< MString > &names, const size_type dimension)
Returns a vector of all existing datasets with the given number of dimensions in the file (if any)....
void b_writeArray(const T *const array, const MString &name, const size_type noDims, MPI_Offset *start, MPI_Offset *count, MPI_Offset memoryStride, const size_type noChunks, MPI_Offset diskStride)
Writes array data to file (generic version). [MPI]
void b_saveHeader()
Adds all additional header information that are needed in the file.
~ParallelIoPNetcdf() override
Calls close(). [MPI]
void b_readScalar(T *scalar, const MString &name)
Read scalar data from file (generic version). [MPI]
void b_printFileHints()
Print PNetCDF file hints to cerr.
void b_writeAdditionalData()
Write additional data to file (NetCDF-specific). [MPI]
void b_getAttribute(T *const value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Retrieve an attribute from file (generic version).
void b_getAttributeCount(const MString &name, size_type *totalCount, const MString &datasetName="")
void b_getGroupNames(std::vector< MString > &names, const MString &path)
static MBool b_isValidFile(const MString &name, const MPI_Comm &mpiComm)
Check if specified file is a valid HDF5 file (i.e. can be opened). [MPI]
void b_defineScalar(maiabd_type type, const MString &name)
Defines a scalar in the file. [MPI]
void b_defineArray(maiabd_type type, const MString &name, size_type noDims, size_type *totalCount)
Defines an array in the file. [MPI]
void b_setAttribute(const T *value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Set an attribute in the file (generic version).
size_type b_getDatasetNoDims(const MString &name)
Get number of dimensions of a dataset with the given name.
size_type b_getDatasetSize(const MString &name, const size_type dimensionId=0)
Get the length of one dimension of an arbitrary array in the file.
MBool b_hasObject(const MString &path)
MBool b_hasDataset(const MString &name, const size_type dimension)
Check if the file contains a dataset with the given name and number of dimensions.
MInt b_getAttributeType(const MString &name, const MString &datasetName="")
Returns the data type of an attribute in the file.
void b_addAdditionalHeader()
Write additional headers to file (e.g. grid file name, creation date etc.). [MPI]
void b_readArray(T *array, const MString &name, const size_type noDims, MPI_Offset *start, MPI_Offset *count, MPI_Offset memoryStride, const size_type noChunks, MPI_Offset diskStride)
Read array data from file (generic version). [MPI]
static void b_error(MInt status, const MString &name, const MString &location)
Check the status code of a HDF5 operation and output a meaningful message.
void close()
Close the file (normally called by the destructor but needs to be explicitely called earlier in speci...
MBool b_hasAttribute(const MString &name, const MString &datasetName="")
Check if a given attribute exists in the file.
void b_ncEndDef()
Leave the define mode (NetCDF-specific). [MPI]
void b_writeScalar(const T scalar, const MString &name)
Writes scalar data to file (generic version). [MPI]
ParallelIoPNetcdf(const MString &fileName, MInt fileMode, const MPI_Comm &mpiComm)
Creates a new object to read and write big data files. [MPI]
void b_ncRedef()
Enter define mode (NetCDF-specific). [MPI]
This class is a ScratchSpace.
Definition: scratch.h:758
pointer data()
Definition: scratch.h:289
void mTerm(const MInt errorCode, const MString &location, const MString &message)
Definition: functions.cpp:29
const MString & location
Definition: functions.h:37
const MPI_Info & globalMpiInfo()
Return global MPI information.
int32_t MInt
Definition: maiatypes.h:62
unsigned char MUchar
Definition: maiatypes.h:57
std::basic_string< char > MString
Definition: maiatypes.h:55
double MFloat
Definition: maiatypes.h:52
int64_t MLong
Definition: maiatypes.h:64
bool MBool
Definition: maiatypes.h:58
char MChar
Definition: maiatypes.h:56
uint64_t MUlong
Definition: maiatypes.h:65
int MPI_Info_get(MPI_Info info, const char *key, int valuelen, char *value, int *flag, const MString &name)
same as MPI_Info_get
int MPI_Info_free(MPI_Info *info, const MString &name)
same as MPI_Info_free
int MPI_Info_get_valuelen(MPI_Info info, const char *key, int *valuelen, int *flag, const MString &name)
same as MPI_Info_get_valuelen
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, const MString &name, const MString &varname)
same as MPI_Bcast
const MInt PIO_UNKNOWN_TYPE
Definition: parallelio.h:44
const MInt PIO_REPLACE
Definition: parallelio.h:36
const MInt PIO_STRING
Definition: parallelio.h:52
const MInt PIO_ULONGLONG
Definition: parallelio.h:56
const MInt PIO_UCHAR
Definition: parallelio.h:54
const MInt PIO_INT
Definition: parallelio.h:48
const MInt PIO_LONG
Definition: parallelio.h:50
const MInt PIO_APPEND
Definition: parallelio.h:38
const MInt PIO_CREATE
< File mode to create a new file. Aborts if file already exists
Definition: parallelio.h:34
const MInt PIO_FLOAT
Definition: parallelio.h:46
const MInt PIO_READ
Definition: parallelio.h:40
Namespace for auxiliary functions/classes.