MAIA bb96820c
Multiphysics at AIA
Loading...
Searching...
No Matches
parallelio_hdf5.cpp
Go to the documentation of this file.
1// Copyright (C) 2024 The m-AIA AUTHORS
2//
3// This file is part of m-AIA (https://git.rwth-aachen.de/aia/m-AIA/m-AIA)
4//
5// SPDX-License-Identifier: LGPL-3.0-only
6
7#include "parallelio.h"
8
9#if defined(WITH_HDF5)
10#include "parallelio_hdf5.h"
11
12// Due to consistency with Netcdf: Maximal name length for datasets is set
13// to NC_MAX_NAME
14#include "hdf5_hl.h"
15
16
17#include <cstdlib>
18#include <cstring>
19#include <numeric>
20#include <string.h>
21#include <sys/stat.h>
22#include "COMM/mpioverride.h"
23#include "MEMORY/scratch.h"
24#include "UTIL/debug.h"
25#include "UTIL/functions.h"
26#include "typetraits.h"
27
28#if not defined(MAIA_WINDOWS)
29#include <pwd.h>
30#include <unistd.h>
31#include "maiapnetcdf.h"
32#else
33#define NC_MAX_NAME 256
34#include <WinSock2.h>
35#include <direct.h>
36#endif
37
38using namespace maia;
39using namespace parallel_io;
40using namespace std;
41
42//------------------------------------------------------------------------------
43// HDF5-specific methods
44//------------------------------------------------------------------------------
45
46// Use unnamed namespace for some HDF5-specific magic (a.k.a. type traits)
47namespace {
48
49template <class DataType>
50struct hdf5_traits {};
51
52// MFloat traits
53template <>
54struct hdf5_traits<MFloat> {
55 // Corresponding HDF5 data type
56 static hid_t type() { return H5T_NATIVE_DOUBLE; }
57};
58
59// MInt traits
60template <>
61struct hdf5_traits<MInt> {
62 // Corresponding HDF5 data type
63 static hid_t type() { return H5T_NATIVE_INT; }
64};
65
66// MLong traits
67template <>
68struct hdf5_traits<MLong> {
69 // Corresponding HDF5 data type
70 static hid_t type() { return H5T_NATIVE_LONG; }
71};
72
73// MChar traits
74template <>
75struct hdf5_traits<MChar> {
76 // Corresponding HDF5 data type
77 static hid_t type() { return H5T_NATIVE_CHAR; }
78};
79
80// MUchar traits
81template <>
82struct hdf5_traits<MUchar> {
83 // Corresponding HDF5 data type
84 static hid_t type() { return H5T_NATIVE_UCHAR; }
85};
86
87// MUlong traits
88template <>
89struct hdf5_traits<MUlong> {
90 // Corresponding HDF5 data type
91 static hid_t type() { return H5T_NATIVE_ULLONG; }
92};
93
94} // namespace
95
96/*
97// HDF5 specific IO is not tested with valgrind, yet. "maiapnetcdf.h" handles
98// some issues regarding valgrind and errors of the third party library Parallel
99// Netcdf (-> wiki (Version: Nov 2015)).
100// There is no such "hdf5.h" for Hdf5. Additionally some testcases might
101// suppress valgrind errors if you run them with Parallel Netcdf. Probably,
102// these testcases have also surpress according Hdf5 errors.
103//
104// The HDF5 API defines aliases for datatypes which are used for specific
105// purposes. Examples are:
106// hid_t: type for "managing references to nodes"(identifier). Each node
107// reference is represented as an integer. All identifiers have to be closed
108// after usage!
109// herr_t: type for handling error codes. Mainly used to signal whether a
110// function call was successful.
111// hsize_t: represents a native multiple-precision integer.
112*/
113
114//
115//------------------------------------------------------------------------------
116// Static file system-related methods
117//------------------------------------------------------------------------------
129MBool ParallelIoHdf5::b_isValidFile(const MString& name, const MPI_Comm& /*not used*/) {
130 TRACE();
131
132 // WARNING: This method is untested, yet. It is implemented due to be
133 // consistent with parallel NetCDF. However, it was not used by any of the
134 // testcases.If your code uses this part of the code, please make sure that
135 // the I/O still works as expected and then remove this warning as well as
136 // the subsequent TERMM().
137 MBool returnValue;
138
139 herr_t status;
140 hid_t fileId;
141 fileId = H5Fopen(name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
142 if(fileId > 0) {
143 returnValue = true;
144 status = H5Fclose(fileId);
145 b_error(status, name, AT_);
146 } else {
147 returnValue = false;
148 }
149
150 return returnValue;
151}
152
153
162 TRACE();
163
164 return ".Hdf5";
165}
166
167
168//------------------------------------------------------------------------------
169// Constructor & Destructor
170//------------------------------------------------------------------------------
185ParallelIoHdf5::ParallelIoHdf5(const MString& fileName, MInt fileMode, const MPI_Comm& mpiComm)
186 : ParallelIoBase<ParallelIoHdf5>(fileName, fileMode, mpiComm) {
187 TRACE();
188
189#ifdef DISABLE_OUTPUT
190 if(m_fileMode != PIO_READ) return;
191#endif
192
193 // Store MPI IO communicator
194 b_h5FileXferHandle = H5Pcreate(H5P_FILE_ACCESS);
195 herr_t status = H5Pset_fapl_mpio(b_h5FileXferHandle, m_mpiComm, m_mpiInfo);
196 b_error(status, m_fileName, AT_);
197
198 // Setting b_h5DatasetXferHandle to collective access
199 b_h5DatasetXferHandle = H5Pcreate(H5P_DATASET_XFER);
200
201 status = H5Pset_dxpl_mpio(b_h5DatasetXferHandle, H5FD_MPIO_COLLECTIVE);
202 b_error(status, m_fileName, AT_);
203
204 switch(m_fileMode) {
205 case PIO_CREATE: {
206 b_h5Id = H5Fcreate(m_fileName.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, b_h5FileXferHandle);
207 b_error(b_h5Id, m_fileName + " :: CREATE", AT_);
208 } break;
209
210 case PIO_REPLACE: {
211 b_h5Id = H5Fcreate(m_fileName.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, b_h5FileXferHandle);
212 b_error(b_h5Id, m_fileName + " :: REPLACE", AT_);
213 } break;
214
215 case PIO_APPEND: {
216 // Attempt to open an existing file to append data
217 b_h5Id = H5Fopen(m_fileName.c_str(), H5F_ACC_RDWR, b_h5FileXferHandle);
218 b_error(b_h5Id, m_fileName + " :: APPEND", AT_);
219 } break;
220
221 case PIO_READ: {
222 b_h5Id = H5Fopen(m_fileName.c_str(), H5F_ACC_RDONLY, b_h5FileXferHandle);
223 b_error(b_h5Id, m_fileName + " :: READ", AT_);
224 } break;
225
226 default: {
227 mTerm(1, AT_, "Unsupported file mode.");
228 } break;
229 }
230}
231
239 TRACE();
240
241 // Close all identifiers used for collective writing
242 herr_t status = H5Pclose(b_h5DatasetXferHandle);
243 b_error(status, m_fileName, AT_);
244 status = H5Pclose(b_h5FileXferHandle);
245 b_error(status, m_fileName, AT_);
246 status = H5Fclose(b_h5Id);
247 b_error(status, m_fileName, AT_);
248
249#ifdef MAIA_EXTRA_DEBUG
250 for(set<MString>::iterator it = m_unwrittenArrays.begin(); it != m_unwrittenArrays.end(); ++it) {
251 cerr << "Warning: array '" << *it << "' in file '" << m_fileName << "' was defined but never written. "
252 << "Make sure that this is the intended behavior." << endl;
253 }
254 for(set<MString>::iterator it = m_unwrittenScalars.begin(); it != m_unwrittenScalars.end(); ++it) {
255 cerr << "Warning: scalar '" << *it << "' in file '" << m_fileName << "' was defined but never written. "
256 << "Make sure that this is the intended behavior." << endl;
257 }
258#endif
259}
260
261
262//------------------------------------------------------------------------------
263// File methods
264//------------------------------------------------------------------------------
274 TRACE();
275
278}
279
280
281// Forward declaration of specialization for use in b_addAdditionalHeader
282template <>
283void ParallelIoHdf5::b_setAttribute(const MString* value, const MString& name, const MString& datasetName,
284 const size_type totalCount);
285
286
295 TRACE();
296
297 // For a newly created or modified file, add some meta information
298 if(m_fileMode != PIO_READ) {
299 const MInt maxNoChars = 256;
300
301 // Get all meta-data on root process & communicate, since some of the C
302 // functions might not be thread-safe (at least getpwuid() is not)
303 MChar user[maxNoChars];
304 MChar host[maxNoChars];
305 MChar dir[maxNoChars];
306 MChar date[maxNoChars];
307
308 // Create object & initialize data
309 fill(user, user + maxNoChars, '\0');
310 fill(host, host + maxNoChars, '\0');
311 fill(dir, dir + maxNoChars, '\0');
312 fill(date, date + maxNoChars, '\0');
313
314 if(m_domainId == 0) {
315 // Gets the current username
316 passwd* p;
317#if not defined(MAIA_WINDOWS)
318 p = getpwuid(getuid());
319 if(p) {
320 strncpy(user, p->pw_name, maxNoChars - 1);
321 } else {
322 strncpy(user, "n/a", maxNoChars - 1);
323 }
324#else
325 strncpy(user, "windows", maxNoChars - 1);
326#endif
327
328 // Gets the current hostname
329 gethostname(host, maxNoChars - 1);
330
331 // Gets the current directory
332#if defined(MAIA_WINDOWS)
333 _getcwd(dir, maxNoChars - 1);
334#else
335 char* retv = getcwd(dir, maxNoChars - 1);
336 ASSERT(retv != nullptr, "");
337#endif
338
339 // Get the current time and write it to rawTime
340 time_t rawTime;
341 time(&rawTime);
342
343 // Convert to time struct
344 tm* timeInfo;
345 timeInfo = localtime(&rawTime);
346
347 // Format time to string and save to buffer
348 strftime(date, maxNoChars, "%Y-%m-%d %H:%M:%S", timeInfo);
349 }
350
351 // Pack data
352 const MInt noItems = 4;
353 MChar buffer[noItems * maxNoChars];
354 memcpy(buffer + 0 * maxNoChars, user, maxNoChars);
355 memcpy(buffer + 1 * maxNoChars, host, maxNoChars);
356 memcpy(buffer + 2 * maxNoChars, dir, maxNoChars);
357 memcpy(buffer + 3 * maxNoChars, date, maxNoChars);
358
359 // Broadcast time from rank 0 to ensure that every rank has the same
360 // information
361 MPI_Bcast(&buffer, 4 * maxNoChars, MPI_CHAR, 0, m_mpiComm, AT_, "buffer");
362
363 // Unpack data
364 memcpy(user, buffer + 0 * maxNoChars, maxNoChars);
365 memcpy(host, buffer + 1 * maxNoChars, maxNoChars);
366 memcpy(dir, buffer + 2 * maxNoChars, maxNoChars);
367 memcpy(date, buffer + 3 * maxNoChars, maxNoChars);
368
369 MString version = MString(XSTRINGIFY(MAIA_VERSION_STRING));
370 MString build = MString(XSTRINGIFY(MAIA_COMPILER_STRING)) + " " + MString(XSTRINGIFY(MAIA_BUILD_TYPE_STRING)) + " ("
371 + MString(XSTRINGIFY(MAIA_COMPILER_VERSION_STRING)) + ")";
372
374 // Add file attributes only needed for creation
375 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(user), "meta_creation_user");
376 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(host), "meta_creation_host");
377 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(dir), "meta_creation_directory");
378 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(date), "meta_creation_date");
380 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(date), "_meta_creation_date");
381 ParallelIoBase<ParallelIoHdf5>::setAttribute(version, "_meta_creation_revision");
382 ParallelIoBase<ParallelIoHdf5>::setAttribute(build, "_meta_creation_build");
383 }
384
385 // Add file attributes that should be set at each modification
386 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(user), "meta_lastModified_user");
387 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(host), "meta_lastModified_host");
388 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(dir), "meta_lastModified_directory");
389 ParallelIoBase<ParallelIoHdf5>::setAttribute(MString(date), "meta_lastModified_date");
390 ParallelIoBase<ParallelIoHdf5>::setAttribute(m_noDomains, "meta_lastModified_noDomains");
391 ParallelIoBase<ParallelIoHdf5>::setAttribute(version, "_meta_lastModified_revision");
392 ParallelIoBase<ParallelIoHdf5>::setAttribute(build, "_meta_lastModified_build");
393 }
394}
395
396
404 TRACE();
405
406 // At the moment, nothing happens here
407}
408
409
410//------------------------------------------------------------------------------
411// Define mode methods
412//------------------------------------------------------------------------------
426void ParallelIoHdf5::b_defineArray(maiabd_type type, const MString& name, size_type noDims, size_type* totalCount) {
427 TRACE();
428
429 // Choose the HDF5 specific native datatype or a string of variable length
430 herr_t status = -1;
431 hid_t dtype_id;
432 switch(type) {
433 case PIO_FLOAT: {
434 dtype_id = H5T_NATIVE_DOUBLE;
435 break;
436 }
437 case PIO_INT: {
438 dtype_id = H5T_NATIVE_INT;
439 break;
440 }
441 case PIO_LONG: {
442 dtype_id = H5T_NATIVE_LONG;
443 break;
444 }
445 case PIO_STRING: {
446 dtype_id = H5Tcopy(H5T_C_S1);
447 b_error(dtype_id, name, AT_);
448 status = H5Tset_size(dtype_id, H5T_VARIABLE);
449 b_error(dtype_id, name, AT_);
450 break;
451 }
452 case PIO_UCHAR: {
453 dtype_id = H5T_NATIVE_UCHAR;
454 break;
455 }
456 case PIO_ULONGLONG: {
457 dtype_id = H5T_NATIVE_ULLONG;
458 break;
459 }
460
461 default: {
462 TERMM(1, "Invalid ParallelIo data type!");
463 }
464 }
465
466 // Allocate Dataspace as big as specified by totalCount and noDims
467 hid_t dspace_id = H5Screate_simple(noDims, (hsize_t*)(totalCount), nullptr);
468 b_error(dspace_id, name, AT_);
469 hid_t dset_id = H5Dcreate2(b_h5Id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
470 b_error(dset_id, name, AT_);
471
472 // Close all identifiers which have been created (except b_h5Id)
473 status = H5Dclose(dset_id);
474 b_error(status, name, AT_);
475 status = H5Sclose(dspace_id);
476 b_error(status, name, AT_);
477 if(type == PIO_STRING) {
478 status = H5Tclose(dtype_id);
479 b_error(status, name, AT_);
480 }
481}
482
483
495 TRACE();
496
497 // Choose the HDF5 specific native datatype or a string of variable length
498 herr_t status = -1;
499 hid_t dtype_id;
500 switch(type) {
501 case PIO_FLOAT: {
502 dtype_id = H5T_NATIVE_DOUBLE;
503 break;
504 }
505 case PIO_INT: {
506 dtype_id = H5T_NATIVE_INT;
507 break;
508 }
509 case PIO_LONG: {
510 dtype_id = H5T_NATIVE_LONG;
511 break;
512 }
513 case PIO_STRING: {
514 dtype_id = H5Tcopy(H5T_C_S1);
515 b_error(dtype_id, name, AT_);
516 status = H5Tset_size(dtype_id, H5T_VARIABLE);
517 b_error(dtype_id, name, AT_);
518 break;
519 }
520 case PIO_UCHAR: {
521 dtype_id = H5T_NATIVE_UCHAR;
522 break;
523 }
524 case PIO_ULONGLONG: {
525 dtype_id = H5T_NATIVE_ULLONG;
526 break;
527 }
528 default: {
529 TERMM(1, "Invalid ParallelIo data type!");
530 }
531 }
532
533 // Create a dataspace and dataset for a scalar
534 hid_t dspace_id = H5Screate(H5S_SCALAR);
535 b_error(dspace_id, name, AT_);
536 hid_t dset_id = H5Dcreate2(b_h5Id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
537 b_error(dset_id, name, AT_);
538
539 // Close all identifiers which have been created (except b_h5Id)
540 status = H5Dclose(dset_id);
541 b_error(status, name, AT_);
542 status = H5Sclose(dspace_id);
543 b_error(status, name, AT_);
544 if(type == PIO_STRING) {
545 status = H5Tclose(dtype_id);
546 b_error(status, name, AT_);
547 }
548}
549
550
551//------------------------------------------------------------------------------
552// Inquiry methods
553//------------------------------------------------------------------------------
569MBool ParallelIoHdf5::b_hasDataset(const MString& name, const size_type noDimensions) {
570 TRACE();
571
572 MBool varExists = false;
573
574 herr_t status = -1;
575 hid_t dset_id = -1, dspace_id = -1;
576 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
577 b_error(link_id, name, AT_);
578 htri_t linkExists = H5Lexists(b_h5Id, name.c_str(), link_id);
579 b_error(linkExists, name, AT_);
580 if(linkExists != 0) {
581 dset_id = H5Dopen2(b_h5Id, name.c_str(), H5P_DEFAULT);
582 b_error(dset_id, name, AT_);
583 dspace_id = H5Dget_space(dset_id);
584 b_error(dspace_id, name, AT_);
585 int nDims = H5Sget_simple_extent_ndims(dspace_id);
586 b_error(nDims, name, AT_);
587 if(nDims == noDimensions || noDimensions == -1) {
588 varExists = true;
589 }
590 }
591
592 // Close all previously created identifiers (except b_h5Id)
593 if(dset_id > 0) {
594 status = H5Sclose(dspace_id);
595 b_error(status, name, AT_);
596 status = H5Dclose(dset_id);
597 b_error(status, name, AT_);
598 }
599 status = H5Pclose(link_id);
600 b_error(status, name, AT_);
601
602 return varExists;
603}
604
605
619 TRACE();
620
621 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
622 b_error(link_id, name, AT_);
623 // Handle whether or not path was given
624 MBool exists = false;
625 hid_t loc_id = 0;
626 MBool checkGroup = false;
627 if(path.empty()) {
628 loc_id = b_h5Id;
629 } else {
630 herr_t pathExists = H5Lexists(b_h5Id, path.c_str(), link_id);
631 if(pathExists != 0) {
632 // Group path exists, opening it
633 loc_id = H5Oopen(b_h5Id, path.c_str(), H5P_DEFAULT);
634 b_error(loc_id, path, AT_);
635 } else {
636 // Group path doesn't exist, then object cannot exist
637 H5Pclose(link_id);
638 return 0;
639 }
640
641 checkGroup = true;
642 }
643
644 // Check if object exists
645 herr_t status = H5Lexists(loc_id, name.c_str(), link_id);
646 if(status != 0) {
647 exists = true;
648 }
649
650 H5Pclose(link_id);
651 if(checkGroup) H5Oclose(loc_id);
652 return exists;
653}
654
667 TRACE();
668
669 // Determine variable id
670 hid_t dset_id = H5Dopen2(b_h5Id, name.c_str(), H5P_DEFAULT);
671 b_error(dset_id, name, AT_);
672 hid_t dtype_id = H5Dget_type(dset_id);
673 b_error(dtype_id, name, AT_);
674
675 MInt typeId;
676 if(H5Tequal(dtype_id, H5T_NATIVE_INT) > 0) {
677 typeId = PIO_INT;
678 } else if(H5Tequal(dtype_id, H5T_NATIVE_DOUBLE) > 0) {
679 typeId = PIO_FLOAT;
680 } else if(H5Tequal(dtype_id, H5T_NATIVE_LONG) > 0) {
681 typeId = PIO_LONG;
682 } else if(H5Tget_class(dtype_id) == H5T_STRING || H5Tequal(dtype_id, H5T_C_S1) > 0) {
683 typeId = PIO_STRING;
684 } else if(H5Tequal(dtype_id, H5T_NATIVE_UCHAR) > 0) {
685 typeId = PIO_UCHAR;
686 } else if(H5Tequal(dtype_id, H5T_NATIVE_ULLONG) > 0) {
687 typeId = PIO_ULONGLONG;
688 } else {
689 typeId = PIO_UNKNOWN_TYPE;
690 TERMM(1, "ERROR: Unknown type for dataset " + name + "!");
691 }
692
693 // Close all previously created identifiers (except b_h5Id)
694 herr_t status = H5Tclose(dtype_id);
695 b_error(status, name, AT_);
696 status = H5Dclose(dset_id);
697 b_error(status, name, AT_);
698
699 return typeId;
700}
701
702
716void ParallelIoHdf5::b_getDatasetNames(vector<MString>& names, const size_type dimension) {
717 TRACE();
718
719 vector<MString>().swap(names);
720
721 herr_t status = -1;
722
723 // Get access to the root group "/" (currently group functionality of HDF5 is
724 // completely ignored, thus it is the only existent group).
725 hid_t group_id = H5Gopen(b_h5Id, "/", H5P_DEFAULT);
726 b_error(group_id, m_fileName, AT_);
727 // Iterate through each object in the group.
728 hsize_t noObj = 0;
729 status = H5Gget_num_objs(group_id, &noObj);
730 b_error(status, m_fileName, AT_);
731 for(MInt idx = 0; idx < (signed)noObj; idx++) {
732 // Check if the current object is a dataset
733 int obj_type = H5Gget_objtype_by_idx(group_id, (size_t)idx);
734 b_error(obj_type, m_fileName, AT_);
735 if(obj_type == H5G_DATASET) {
736 // Determine the name and the dimension of the dataset
737 // The maximal name length is NC_MAX_NAME for consistency with PnetCDF
738 char DatasetName[NC_MAX_NAME];
739 status = H5Gget_objname_by_idx(group_id, (hsize_t)idx, DatasetName, NC_MAX_NAME);
740 b_error(status, m_fileName, AT_);
741 hid_t dset_id = H5Dopen(b_h5Id, DatasetName, H5P_DEFAULT);
742 b_error(dset_id, m_fileName + " :: " + string(DatasetName), AT_);
743 hid_t dspace_id = H5Dget_space(dset_id);
744 b_error(dspace_id, m_fileName + " :: " + string(DatasetName), AT_);
745 int noDims = H5Sget_simple_extent_ndims(dspace_id);
746 b_error(noDims, m_fileName + " :: " + string(DatasetName), AT_);
747 if(noDims == dimension || dimension == -1) {
748 names.emplace_back(DatasetName);
749 }
750 // Close all previously created identifiers (except b_h5Id)
751 status = H5Sclose(dspace_id);
752 b_error(dspace_id, m_fileName + " :: " + string(DatasetName), AT_);
753 status = H5Dclose(dset_id);
754 b_error(dspace_id, m_fileName + " :: " + string(DatasetName), AT_);
755 }
756 }
757 status = H5Gclose(group_id);
758 b_error(status, m_fileName, AT_);
759}
760
761
775ParallelIo::size_type ParallelIoHdf5::b_getDatasetNoDims(const MString& name) {
776 TRACE();
777
778 // Get variable id
779 hid_t dset_id = H5Dopen2(b_h5Id, name.c_str(), H5P_DEFAULT);
780 b_error(dset_id, name, AT_);
781
782 // Get number of variable dimensions
783 hid_t dspace_id = H5Dget_space(dset_id);
784 b_error(dspace_id, name, AT_);
785 int noDims = H5Sget_simple_extent_ndims(dspace_id);
786 b_error(noDims, name, AT_);
787
788 // Close all previously created identifiers (except b_h5Id)
789 herr_t status = H5Sclose(dspace_id);
790 b_error(status, name, AT_);
791 status = H5Dclose(dset_id);
792 b_error(status, name, AT_);
793
794 return noDims;
795}
796
797
809ParallelIo::size_type ParallelIoHdf5::b_getDatasetSize(const MString& name, const size_type dimensionId) {
810 TRACE();
811
812 // Get variable id
813 hid_t dset_id = H5Dopen2(b_h5Id, name.c_str(), H5P_DEFAULT);
814 b_error(dset_id, name, AT_);
815
816 // Get number of array dimensions
817 size_type noDims = b_getDatasetNoDims(name);
818
819 // Get variable size for each dimension
820 hid_t dspace_id = H5Dget_space(dset_id);
821 b_error(dspace_id, name, AT_);
822
823 ScratchSpace<hsize_t> dimId(noDims, FUN_, "dimId");
824
825 herr_t status = H5Sget_simple_extent_dims(dspace_id, &dimId[0], nullptr);
826 b_error(status, name, AT_);
827 hid_t dtype_id = H5Dget_type(dset_id);
828 b_error(dtype_id, name, AT_);
829
830 // Close all previously created identifiers (except b_h5Id)
831 status = H5Sclose(dspace_id);
832 b_error(status, name, AT_);
833 status = H5Dclose(dset_id);
834 b_error(status, name, AT_);
835
836 return static_cast<size_type>(dimId[dimensionId]);
837}
838
839//------------------------------------------------------------------------------
840// Attribute methods
841//------------------------------------------------------------------------------
842
854template <>
855void ParallelIoHdf5::b_getAttribute(MString* value, const MString& name, const MString& datasetName,
856 const size_type totalCount) {
857 TRACE();
858
859 if(totalCount > 1) {
860 mTerm(1, AT_, "String array attributes not supported.");
861 }
862
863 // Open the attribute
864 hid_t attribute_id;
865 if(datasetName == "") { // Attribute is attached to the file
866 attribute_id = H5Aopen(b_h5Id, name.c_str(), H5P_DEFAULT);
867 b_error(attribute_id, name, AT_);
868 } else { // Attribute is attached to a dataset
869 attribute_id = H5Aopen_by_name(b_h5Id, datasetName.c_str(), name.c_str(), H5P_DEFAULT, H5P_DEFAULT);
870 b_error(attribute_id, name, AT_);
871 }
872 hid_t attribute_type_id = H5Aget_type(attribute_id);
873 b_error(attribute_type_id, name, AT_);
874 hsize_t length = H5Tget_size(attribute_type_id);
875
876 hid_t filetype = H5Tcopy(H5T_C_S1);
877 H5Tset_size(filetype, length);
878 ScratchSpace<char> buf(length, FUN_, "buf");
879
880 herr_t status = H5Aread(attribute_id, filetype, &buf[0]);
881 b_error(status, name, AT_);
882
883 // Extract strings from buffer
884 value[0].append(&buf[0], length);
885
886 // Close all identifiers which have been created (except b_h5Id)
887 status = H5Tclose(attribute_type_id);
888 b_error(status, name, AT_);
889 status = H5Aclose(attribute_id);
890 b_error(status, name, AT_);
891}
892
904template <class T>
905void ParallelIoHdf5::b_getAttribute(T* value, const MString& name, const MString& datasetName,
906 const size_type totalCount) {
907 TRACE();
908
909 // Open the attribute
910 hid_t attribute_id;
911 if(datasetName == "") { // Attribute is attached to the file
912 attribute_id = H5Aopen(b_h5Id, name.c_str(), H5P_DEFAULT);
913 b_error(attribute_id, name, AT_);
914 } else { // Attribute is attached to a dataset
915 attribute_id = H5Aopen_by_name(b_h5Id, datasetName.c_str(), name.c_str(), H5P_DEFAULT, H5P_DEFAULT);
916 b_error(attribute_id, name, AT_);
917 }
918
919 hid_t attribute_type_id = H5Aget_type(attribute_id);
920 b_error(attribute_type_id, name, AT_);
921
922 hsize_t length = H5Aget_storage_size(attribute_id) / H5Tget_size(attribute_type_id);
923 if(length != (hsize_t)totalCount) {
924 TERMM(1, "Requested attribute (" + name
925 + ") has different number of elements than given. Use getAttributeCount() to "
926 "query number of elements first");
927 }
928
929 herr_t status = H5Aread(attribute_id, attribute_type_id, value);
930 b_error(status, name, AT_);
931
932 // Close all identifiers which have been created (except b_h5Id)
933 status = H5Tclose(attribute_type_id);
934 b_error(status, name, AT_);
935 status = H5Aclose(attribute_id);
936 b_error(status, name, AT_);
937}
938
939
954 TRACE();
955
956 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
957 b_error(link_id, name, AT_);
958 // Handle whether or not path was given
959 hid_t loc_id = 0;
960 MBool checkGroup = false;
961 if(path.empty()) {
962 loc_id = b_h5Id;
963 } else {
964 herr_t pathExists = H5Lexists(b_h5Id, path.c_str(), link_id);
965 if(pathExists != 0) {
966 // Group path exists, opening it
967 loc_id = H5Oopen(b_h5Id, path.c_str(), H5P_DEFAULT);
968 b_error(loc_id, path, AT_);
969 } else {
970 // Group path doesn't exist, then object cannot exist
971 H5Pclose(link_id);
972 return 0;
973 }
974
975 checkGroup = true;
976 }
977
978 // Check if object exists
979 herr_t status = H5Lexists(loc_id, name.c_str(), link_id);
980 if(status == 0) { // Object does not exist, check if attribute exists
981 status = H5Aexists(loc_id, name.c_str());
982 }
983
984 H5Pclose(link_id);
985 if(checkGroup) H5Oclose(loc_id);
986 return status;
987}
988
990 TRACE();
991
992 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
993 b_error(link_id, path, AT_);
994 // Handle whether or not path was given
995 MBool exists = false;
996 if(path.empty()) {
997 exists = true;
998 } else {
999 herr_t pathExists = H5Lexists(b_h5Id, path.c_str(), link_id);
1000 if(pathExists != 0) {
1001 exists = true;
1002 } else {
1003 exists = false;
1004 }
1005 }
1006
1007 H5Pclose(link_id);
1008 return exists;
1009}
1010
1011
1012void ParallelIoHdf5::b_getAttributeCount(const MString& name, size_type* totalCount, const MString& datasetName) {
1013 TRACE();
1014
1015 // Open the attribute
1016 hid_t attribute_id;
1017 if(datasetName.empty()) { // Attribute is attached to the file
1018 attribute_id = H5Aopen(b_h5Id, name.c_str(), H5P_DEFAULT);
1019 b_error(attribute_id, name, AT_);
1020 } else { // Attribute is attached to a dataset
1021 attribute_id = H5Aopen_by_name(b_h5Id, datasetName.c_str(), name.c_str(), H5P_DEFAULT, H5P_DEFAULT);
1022 b_error(attribute_id, name, AT_);
1023 }
1024
1025 hid_t attribute_type_id = H5Aget_type(attribute_id);
1026 b_error(attribute_type_id, name, AT_);
1027
1028 hsize_t length = H5Aget_storage_size(attribute_id) / H5Tget_size(attribute_type_id);
1029
1030 *totalCount = (size_type)length;
1031
1032 // Close all identifiers which have been created (except b_h5Id)
1033 herr_t status = H5Tclose(attribute_type_id);
1034 b_error(status, name, AT_);
1035 status = H5Aclose(attribute_id);
1036 b_error(status, name, AT_);
1037}
1038
1039
1052MInt ParallelIoHdf5::b_getAttributeType(const MString& name, const MString& datasetName) {
1053 TRACE();
1054
1055 // TERMM(1, "untested Hdf5 specific I/O method, please see comment for how"
1056 // " to proceed");
1057 // WARNING: This method is untested, yet. It is implemented due to be
1058 // consistent with parallel NetCDF. However, it was not used by any of the
1059 // testcases.If your code uses this part of the code, please make sure that
1060 // the I/O still works as expected and then remove this warning as well as
1061 // the subsequent TERMM().
1062 // Determine variable id
1063 hid_t varId;
1064 if(datasetName.empty()) { // File attribute
1065 varId = b_h5Id;
1066 } else { // Dataset attribute
1067 varId = H5Dopen2(b_h5Id, datasetName.c_str(), H5P_DEFAULT);
1068 b_error(varId, name, AT_);
1069 }
1070
1071 // Get attribute type
1072 hid_t attribute_id = H5Aopen(varId, name.c_str(), H5P_DEFAULT);
1073 b_error(attribute_id, name, AT_);
1074 hid_t attrType_id = H5Aget_type(attribute_id);
1075 b_error(attrType_id, name, AT_);
1076
1077 // Translate native HDF5 data type to ParallelIo type
1078 MInt typeId;
1079 if(H5Tequal(attrType_id, H5T_NATIVE_INT) > 0) {
1080 typeId = PIO_INT;
1081 } else if(H5Tequal(attrType_id, H5T_NATIVE_DOUBLE) > 0) {
1082 typeId = PIO_FLOAT;
1083 } else if(H5Tequal(attrType_id, H5T_NATIVE_LONG) > 0) {
1084 typeId = PIO_LONG;
1085 } else if(H5Tequal(attrType_id, H5T_C_S1) > 0) {
1086 typeId = PIO_STRING;
1087 } else if(H5Tequal(attrType_id, H5T_NATIVE_UCHAR) > 0) {
1088 typeId = PIO_UCHAR;
1089 } else if(H5Tequal(attrType_id, H5T_NATIVE_ULLONG) > 0) {
1090 typeId = PIO_ULONGLONG;
1091 } else {
1092 typeId = PIO_UNKNOWN_TYPE;
1093 }
1094
1095 // Close all previously created identifiers (except b_h5Id)
1096 herr_t status;
1097 if(varId != b_h5Id) {
1098 status = H5Dclose(varId);
1099 b_error(status, name, AT_);
1100 }
1101 status = H5Aclose(attribute_id);
1102 b_error(status, name, AT_);
1103 status = H5Tclose(attrType_id);
1104 b_error(status, name, AT_);
1105
1106 return typeId;
1107}
1108
1109//------------------------------------------------------------------------------
1110// Data mode methods
1111//------------------------------------------------------------------------------
1126template <>
1127void ParallelIoHdf5::b_writeArray(const MString* array, const MString& name, const size_type noDims, MPI_Offset* start,
1128 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
1129 MPI_Offset diskStride) {
1130 TRACE();
1131 TERMM(1, "untested Hdf5 specific I/O method, please see comment for how"
1132 " to proceed");
1133 // WARNING: This method is untested, yet. It is implemented due to be
1134 // consistent with parallel NetCDF. However, it was not used by any of the
1135 // testcases.If your code uses this part of the code, please make sure that
1136 // the I/O still works as expected and then remove this warning as well as
1137 // the subsequent TERMM().
1138
1139 // Get variable id
1140 hid_t dset_id = H5Dopen2(b_h5Id, name.c_str(), H5P_DEFAULT);
1141 b_error(dset_id, name, AT_);
1142 hid_t dspace_id = H5Dget_space(dset_id);
1143 b_error(dspace_id, name, AT_);
1144 hid_t dtype_id = H5Tcopy(H5T_C_S1);
1145 b_error(dtype_id, name, AT_);
1146 herr_t status = H5Tset_size(dtype_id, H5T_VARIABLE);
1147 b_error(status, name, AT_);
1148
1149 // Determine total number of strings (last dimension is string length)
1150 size_type totalCount = 1;
1151 for(size_type d = 0; d < noDims - 1; d++) {
1152 totalCount *= count[d];
1153 }
1154
1155 // Determine length of one string
1156 size_type strLen = count[noDims - 1];
1157
1158 // Create temporary storage space if needed and set data pointers
1159 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1160 ScratchSpace<MString> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1161
1162 // Pack strided data
1163 const MString* data = 0;
1164 if(memoryStride == 1) {
1165 data = array;
1166 } else {
1167 for(MPI_Offset i = 0; i < totalCount; i++) {
1168 tmpScratch[i] = array[memoryStride * i];
1169 }
1170 // data = tmpScratch.begin();
1171 data = &tmpScratch[0];
1172 }
1173
1174 // Create buffer for writing
1175 size_type nCount = totalCount * strLen;
1176 ScratchSpace<char> buf(nCount, FUN_, "buf");
1177
1178 // Copy data to buffer
1179 for(MPI_Offset i = 0; i < totalCount; i++) {
1180 data[i].copy(&buf[i * strLen], strLen, 0);
1181 }
1182
1183 hid_t memory_dspace_id = -1;
1184 // Write array
1185 if(noChunks == 1) {
1186 // If number of chunks is one, write everything at once
1187 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count, nullptr);
1188 b_error(memory_dspace_id, name, AT_);
1189 if(diskStride == 1) {
1190 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, nullptr, (hsize_t*)count, nullptr);
1191 } else {
1192 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, (hsize_t*)diskStride, (hsize_t*)count,
1193 nullptr);
1194 }
1195 b_error(status, name, AT_);
1196 status = H5Dwrite(dset_id, dtype_id, memory_dspace_id, dspace_id, b_h5DatasetXferHandle, &buf[0]);
1197 b_error(status, name, AT_);
1198 } else {
1199 // Write in chunks
1200 MPI_Offset chunkSize = count[0] / noChunks;
1201 if(count[0] % noChunks > 0) {
1202 chunkSize += 1;
1203 }
1204
1205 // Determine number of entries for a fixed first dimension index
1206 size_type nDSize = 1;
1207 for(size_type d = 1; d < noDims; d++) {
1208 nDSize *= count[d];
1209 }
1210
1211 ScratchSpace<hsize_t> start_(noDims, FUN_, "start_");
1212 ScratchSpace<hsize_t> count_(noDims, FUN_, "count_");
1213
1214 std::copy(start, start + noDims, &start_[0]);
1215 std::copy(count, count + noDims, &count_[0]);
1216
1217 for(size_type i = 0; i < noChunks; i++) {
1218 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1219 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1220 const char* buf_ = &buf[0] + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1221 memory_dspace_id = H5Screate_simple(noDims, &count_[0], nullptr);
1222 b_error(memory_dspace_id, name, AT_);
1223 if(diskStride == 1) {
1224 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], nullptr, &count_[0], nullptr);
1225 } else {
1226 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], (hsize_t*)diskStride, &count_[0], nullptr);
1227 }
1228 b_error(status, name, AT_);
1229 status = H5Dwrite(dset_id, dtype_id, memory_dspace_id, dspace_id, b_h5DatasetXferHandle, &buf_[0]);
1230 b_error(status, name, AT_);
1231 }
1232 }
1233 // Close all identifiers which have been created (except b_h5Id)
1234 status = H5Sclose(memory_dspace_id);
1235 b_error(status, name, AT_);
1236 status = H5Sclose(dspace_id);
1237 b_error(status, name, AT_);
1238 status = H5Dclose(dset_id);
1239 b_error(status, name, AT_);
1240 status = H5Tclose(dtype_id);
1241 b_error(status, name, AT_);
1242}
1243
1244
1261template <>
1262void ParallelIoHdf5::b_readArray(MString* array, const MString& name, const size_type noDims, MPI_Offset* start,
1263 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
1264 MPI_Offset diskStride) {
1265 TRACE();
1266
1267 // Get variable id
1268 hid_t dset_id = H5Dopen(b_h5Id, name.c_str(), H5P_DEFAULT);
1269 b_error(dset_id, name, AT_);
1270 hid_t dspace_id = H5Dget_space(dset_id);
1271 b_error(dspace_id, name, AT_);
1272 hid_t dtype_id = H5Dget_type(dset_id); // H5Tcopy(H5T_C_S1);
1273
1274
1275 int length = (int)H5Tget_size(dtype_id);
1276
1277 if(H5Tget_cset(dtype_id)) {
1278 dtype_id = H5Tcopy(H5T_C_S1);
1279 H5Tset_cset(dtype_id, H5T_CSET_UTF8);
1280 } else {
1281 dtype_id = H5Tcopy(H5T_C_S1);
1282 }
1283
1284 b_error(dtype_id, name, AT_);
1285 // herr_t status = H5Tset_size(dtype_id, 1);
1286 herr_t status = H5Tset_size(dtype_id, length);
1287 b_error(status, name, AT_);
1288
1289 // Determine total data count
1290 size_type totalCount = 1;
1291 for(size_type d = 0; d < noDims - 1; d++) {
1292 totalCount *= count[d];
1293 }
1294
1295 // Determine length of one string
1296 size_type strLen = length; // count[noDims - 1];
1297
1298 // Create temporary storage space if needed and set data pointers
1299 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1300 ScratchSpace<MString> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1301 MString* data = 0;
1302 if(memoryStride == 1) {
1303 data = array;
1304 } else {
1305 // data = tmpScratch.begin();
1306 data = &tmpScratch[0];
1307 }
1308
1309 // Create buffer for reading
1310 size_type nCount = totalCount * strLen;
1311 ScratchSpace<char> buf(nCount, FUN_, "buf");
1312
1313 hid_t memory_dspace_id = -1;
1314 // Read array
1315 if(noChunks == 1) {
1316 // If number of chunks is one, write everything at once
1317
1318 count[0] = 1;
1319
1320 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count, nullptr);
1321 b_error(memory_dspace_id, name, AT_);
1322 // if ( diskStride == 1 ) {
1323 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, nullptr, (hsize_t*)count, nullptr);
1324 // }
1325 // else {
1326 // status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, (hsize_t*)diskStride,
1327 // (hsize_t*)count, nullptr);
1328 // }
1329 b_error(status, name, AT_);
1330 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, &buf[0]);
1331 b_error(status, name, AT_);
1332
1333 // Extract strings from buffer
1334 for(size_type i = 0; i < totalCount; i++) {
1335 MString tmp;
1336 tmp.append(&buf[i * strLen], strLen);
1337 data[i].append(tmp.c_str(), 0, strLen);
1338 }
1339 } else {
1340 // Read in chunks
1341 MPI_Offset chunkSize = count[0] / noChunks;
1342 if(count[0] % noChunks > 0) {
1343 chunkSize += 1;
1344 }
1345
1346 // Determine number of entries for a fixed first dimension index
1347 size_type nDSize = 1;
1348 for(size_type d = 1; d < noDims; d++) {
1349 nDSize *= count[d];
1350 }
1351
1352 ScratchSpace<hsize_t> start_(noDims, FUN_, "start_");
1353 ScratchSpace<hsize_t> count_(noDims, FUN_, "count_");
1354
1355 std::copy(start, start + noDims, &start_[0]);
1356 std::copy(count, count + noDims, &count_[0]);
1357
1358 for(size_type i = 0; i < noChunks; i++) {
1359 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1360 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1361 memory_dspace_id = H5Screate_simple(noDims, &count_[0], nullptr);
1362 b_error(memory_dspace_id, name, AT_);
1363 // if ( diskStride == 1 ) {
1364 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], nullptr, &count_[0], nullptr);
1365 // }
1366 // else {
1367 // status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0],
1368 // (hsize_t*)diskStride, &count_[0], nullptr);
1369 // }
1370 b_error(status, name, AT_);
1371 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, &buf[0]);
1372 b_error(status, name, AT_);
1373
1374 // Extract strings from buffer
1375 for(size_type j = start_[0]; j < totalCount; j++) {
1376 MString tmp;
1377 tmp.append(&buf[(j - start_[0]) * strLen], strLen);
1378 data[j].append(tmp.c_str(), 0, strLen);
1379 }
1380 }
1381 }
1382
1383 // Unpack strided data if necessary
1384 if(memoryStride != 1) {
1385 for(MPI_Offset i = 0; i < totalCount; i++) {
1386 array[memoryStride * i] = tmpScratch[i];
1387 }
1388 }
1389
1390 // Close all identifiers which have been created (except b_h5Id)
1391 status = H5Sclose(memory_dspace_id);
1392 b_error(status, name, AT_);
1393 status = H5Sclose(dspace_id);
1394 b_error(status, name, AT_);
1395 status = H5Dclose(dset_id);
1396 b_error(status, name, AT_);
1397 status = H5Tclose(dtype_id);
1398 b_error(status, name, AT_);
1399}
1400
1401
1413template <>
1415 TRACE();
1416
1417 TERMM(1, "untested Hdf5 specific I/O method, please see comment for how"
1418 " to proceed");
1419 // WARNING: This method is untested, yet. It is implemented due to be
1420 // consistent with parallel NetCDF. However, it was not used by any of the
1421 // testcases.If your code uses this part of the code, please make sure that
1422 // the I/O still works as expected and then remove this warning as well as
1423 // the subsequent TERMM().
1424 // Get variable id
1425 hid_t dset_id = H5Dopen(b_h5Id, name.c_str(), H5P_DEFAULT);
1426 b_error(dset_id, name, AT_);
1427
1428 // Read scalar
1429 hid_t dtype_id = H5Tcopy(H5T_C_S1);
1430 b_error(dtype_id, name, AT_);
1431 herr_t status = H5Tset_size(dtype_id, H5T_VARIABLE);
1432 b_error(status, name, AT_);
1433 status = H5Dread(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, (char*)(scalar));
1434 b_error(status, name, AT_);
1435
1436 // Close all identifiers
1437 status = H5Dclose(dset_id);
1438 b_error(status, name, AT_);
1439 status = H5Tclose(dtype_id);
1440 b_error(status, name, AT_);
1441}
1442
1443
1444//------------------------------------------------------------------------------
1445// Attribute methods
1446//------------------------------------------------------------------------------
1447
1448
1460template <class T>
1461void ParallelIoHdf5::b_createAttribute(const T* value, const MString& name, const MString& datasetName, hid_t dtype_id,
1462 const size_type totalCount) {
1463 TRACE();
1464
1465 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
1466 b_error(link_id, name, AT_);
1467 // Allocate dataspace for a scalar
1468 const auto tmpCnt = (hsize_t)totalCount;
1469 hid_t dspace_id = (totalCount > 1) ? H5Screate_simple(1, &tmpCnt, nullptr) : H5Screate(H5S_SCALAR);
1470 b_error(dspace_id, name, AT_);
1471
1472 hid_t attribute_id = -1;
1473 herr_t status = -1;
1474
1475 hid_t loc_id = 0;
1476 if(!datasetName.empty()) {
1477 // Create the group if not existent
1478 status = H5Lexists(b_h5Id, datasetName.c_str(), link_id);
1479 if(status == 0) {
1480 hid_t group_id = H5Gcreate(b_h5Id, datasetName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1481 b_error(group_id, datasetName, AT_);
1482 H5Gclose(group_id);
1483 }
1484 // Open path
1485 loc_id = H5Oopen(b_h5Id, datasetName.c_str(), H5P_DEFAULT);
1486 b_error(loc_id, datasetName, AT_);
1487 } else {
1488 loc_id = b_h5Id;
1489 }
1490
1491 // If the attribute already exists, delete it.
1492 if(b_hasAttribute(name, datasetName)) {
1493 if(datasetName.empty()) { // The attribute is attached to the file
1494 status = H5Adelete(loc_id, name.c_str());
1495 b_error(status, name, AT_);
1496 } else { // The attribute is attached to a group
1497 status = H5Adelete_by_name(loc_id, datasetName.c_str(), name.c_str(), H5P_DEFAULT);
1498 b_error(status, datasetName, AT_);
1499 }
1500 }
1501 // Create a new attribute
1502 attribute_id = H5Acreate(loc_id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT);
1503 b_error(attribute_id, name, AT_);
1504
1505 // Write out the attribute
1506 status = H5Awrite(attribute_id, dtype_id, value);
1507 b_error(status, name, AT_);
1508
1509 // Close all identifiers which have been created (except b_h5Id)
1510 status = H5Aclose(attribute_id);
1511 b_error(status, name, AT_);
1512 status = H5Sclose(dspace_id);
1513 b_error(status, name, AT_);
1514 if(!datasetName.empty()) {
1515 status = H5Oclose(loc_id);
1516 b_error(status, name, AT_);
1517 }
1518}
1519
1520
1532template <>
1533void ParallelIoHdf5::b_createAttribute(const MString* value, const MString& name, const MString& datasetName,
1534 hid_t dtype_id, const size_type totalCount) {
1535 TRACE();
1536
1537 // Allocate dataspace for a scalar
1538 if(totalCount > 1) {
1539 mTerm(1, AT_, "Array of strings attributes not yet supported.");
1540 }
1541
1542 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
1543 b_error(link_id, name, AT_);
1544
1545 hsize_t dims = 1;
1546 hid_t dspace_id = H5Screate_simple(1, &dims, nullptr);
1547 dtype_id = H5Tcopy(H5T_C_S1);
1548 MInt length = strlen(value->c_str());
1549 H5Tset_size(dtype_id, length + 1);
1550 b_error(dspace_id, name, AT_);
1551
1552 hid_t attribute_id = -1;
1553 herr_t status = -1;
1554
1555 hid_t loc_id = 0;
1556 if(!datasetName.empty()) {
1557 // Create the group if not existent
1558 status = H5Lexists(b_h5Id, datasetName.c_str(), link_id);
1559 if(status == 0) {
1560 hid_t group_id = H5Gcreate(b_h5Id, datasetName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1561 b_error(group_id, datasetName, AT_);
1562 H5Gclose(group_id);
1563 }
1564 // Open path
1565 loc_id = H5Oopen(b_h5Id, datasetName.c_str(), H5P_DEFAULT);
1566 b_error(loc_id, datasetName, AT_);
1567 } else {
1568 loc_id = b_h5Id;
1569 }
1570
1571 // If the attribute already exists, delete it.
1572 if(b_hasAttribute(name, datasetName)) {
1573 if(datasetName.empty()) { // The attribute is attached to the file
1574 status = H5Adelete(loc_id, name.c_str());
1575 b_error(status, name, AT_);
1576 } else { // The attribute is attached to a group
1577 status = H5Adelete_by_name(loc_id, datasetName.c_str(), name.c_str(), H5P_DEFAULT);
1578 b_error(status, datasetName, AT_);
1579 }
1580 }
1581 // Create a new attribute
1582 attribute_id = H5Acreate(loc_id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT);
1583 b_error(attribute_id, name, AT_);
1584
1585 // Write out the attribute
1586 status = H5Awrite(attribute_id, dtype_id, value->c_str());
1587 b_error(status, name, AT_);
1588
1589 // Close all identifiers which have been created (except b_h5Id)
1590 status = H5Aclose(attribute_id);
1591 b_error(status, name, AT_);
1592 status = H5Sclose(dspace_id);
1593 b_error(status, name, AT_);
1594 status = H5Tclose(dtype_id);
1595 b_error(status, name, AT_);
1596 if(!datasetName.empty()) {
1597 status = H5Oclose(loc_id);
1598 b_error(status, name, AT_);
1599 }
1600}
1601
1602
1614template <class T>
1615void ParallelIoHdf5::b_setAttribute(const T* value, const MString& name, const MString& datasetName,
1616 const size_type totalCount) {
1617 TRACE();
1618
1619 // Create the type and specify its length
1620 hid_t dtype_id = H5Tcopy(hdf5_traits<T>::type());
1621 b_error(dtype_id, name, AT_);
1622
1623 b_createAttribute(value, name, datasetName, dtype_id, totalCount);
1624}
1625
1626
1638template <>
1639void ParallelIoHdf5::b_setAttribute(const MString* value, const MString& name, const MString& datasetName,
1640 const size_type totalCount) {
1641 TRACE();
1642
1643 if(totalCount > 1) mTerm(1, AT_, "Array of strings attributes not yet supported.");
1644
1645 // Create the type and specify its length
1646 hid_t dtype_id = H5Tcopy(H5T_C_S1);
1647 b_error(dtype_id, name, AT_);
1648 herr_t status = H5Tset_size(dtype_id, H5T_VARIABLE);
1649 b_error(status, name, AT_);
1650 b_createAttribute(value, name, datasetName, dtype_id, 1);
1651}
1652
1653
1654//------------------------------------------------------------------------------
1655// Auxiliary methods
1656//------------------------------------------------------------------------------
1671void ParallelIoHdf5::b_error(MInt status, const MString& name, const MString& location) {
1672 TRACE();
1673 if(status < 0) {
1674 cerr << endl;
1675 cerr << "*** ERROR in parallelio_hdf5 ***" << endl;
1676 cerr << "HDF5 error in function " << location << endl;
1677 cerr << "HDF5 returns status " << status << endl;
1678 cerr << "The file/variable/attribute in question was: " << name << endl;
1679 cerr << endl;
1680 TERMM(1, "HDF5 error in ParallelIoHdf5.");
1681 }
1682}
1683
1684
1699void ParallelIoHdf5::b_warning(MInt status, const MString& name, const MString& location) {
1700 TRACE();
1701 if(status < 0) {
1702 cerr << endl;
1703 cerr << "*** HDF5 Warning ***" << endl;
1704 cerr << "HDF5 warning in function " << location << endl;
1705 cerr << "HDF5 returns status " << status << endl;
1706 cerr << "The file/variable/attribute in question was: " << name << endl;
1707 cerr << endl;
1708 }
1709}
1710
1711
1712//------------------------------------------------------------------------------
1713// Data mode methods
1714//------------------------------------------------------------------------------
1730template <class T>
1731void ParallelIoHdf5::b_writeArray(const T* array, const MString& name, const size_type noDims, MPI_Offset* start,
1732 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
1733 MPI_Offset diskStride) {
1734 TRACE();
1735
1736 // Get variable id
1737 hid_t dset_id = H5Dopen2(b_h5Id, name.c_str(), H5P_DEFAULT);
1738 b_error(dset_id, name, AT_);
1739 hid_t dspace_id = H5Dget_space(dset_id);
1740 b_error(dspace_id, name, AT_);
1741 hid_t dtype_id = H5Dget_type(dset_id);
1742 b_error(dtype_id, name, AT_);
1743
1744 // Determine total data count
1745 size_type totalCount = 1;
1746 for(size_type d = 0; d < noDims; d++) {
1747 totalCount *= count[d];
1748 }
1749
1750 // Create temporary storage space if needed and set data pointers
1751 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1752 ScratchSpace<T> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1753
1754 // Pack strided data
1755 const T* data = 0;
1756 if(memoryStride == 1) {
1757 data = array;
1758 } else {
1759 for(MPI_Offset i = 0; i < totalCount; i++) {
1760 tmpScratch[i] = array[memoryStride * i];
1761 }
1762 // data = tmpScratch.begin();
1763 data = &tmpScratch[0];
1764 }
1765
1766 hid_t memory_dspace_id;
1767 herr_t status = -1;
1768 // Write array
1769 if(noChunks == 1) {
1770 // If number of chunks is one, write everything at once
1771 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count, nullptr);
1772 b_error(memory_dspace_id, name, AT_);
1773 if(diskStride == 1) {
1774 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, nullptr, (hsize_t*)count, nullptr);
1775 } else {
1776 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, (hsize_t*)diskStride, (hsize_t*)count,
1777 nullptr);
1778 }
1779 b_error(status, name, AT_);
1780 status = H5Dwrite(dset_id, dtype_id, memory_dspace_id, dspace_id, b_h5DatasetXferHandle, data);
1781 b_error(status, name, AT_);
1782 } else {
1783 TERMM(1, "untested Hdf5 specific I/O method, please see comment for how"
1784 " to proceed");
1785 // WARNING: This method is untested, yet. It is implemented due to be
1786 // consistent with parallel NetCDF. However, it was not used by any of the
1787 // testcases.If your code uses this part of the code, please make sure that
1788 // the I/O still works as expected and then remove this warning as well as
1789 // the subsequent TERMM().
1790 // Determine variable id
1791 // Write in chunks
1792 MPI_Offset chunkSize = count[0] / noChunks;
1793 if(count[0] % noChunks > 0) {
1794 chunkSize += 1;
1795 }
1796
1797 // Determine number of entries for a fixed first dimension index
1798 size_type nDSize = 1;
1799 for(size_type d = 1; d < noDims; d++) {
1800 nDSize *= count[d];
1801 }
1802
1803 ScratchSpace<hsize_t> start_(noDims, FUN_, "start_");
1804 ScratchSpace<hsize_t> count_(noDims, FUN_, "count_");
1805
1806 std::copy(start, start + noDims, &start_[0]);
1807 std::copy(count, count + noDims, &count_[0]);
1808
1809 for(size_type i = 0; i < noChunks; i++) {
1810 start_[0] = std::min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1811 count_[0] = std::max(std::min(chunkSize, count[0] - i * chunkSize), 0ll);
1812 const T* data_ = data + std::min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1813 memory_dspace_id = H5Screate_simple(noDims, &count_[0], nullptr);
1814 b_error(memory_dspace_id, name, AT_);
1815 if(diskStride == 1) {
1816 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], nullptr, &count_[0], nullptr);
1817 } else {
1818 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], (hsize_t*)diskStride, &count_[0], nullptr);
1819 }
1820 b_error(status, name, AT_);
1821 status = H5Dwrite(dset_id, dtype_id, memory_dspace_id, dspace_id, b_h5DatasetXferHandle, data_);
1822 b_error(status, name, AT_);
1823 }
1824 }
1825
1826 // Close all identifiers which have been created (except b_h5Id)
1827 status = H5Sclose(memory_dspace_id);
1828 b_error(status, name, AT_);
1829 status = H5Sclose(dspace_id);
1830 b_error(status, name, AT_);
1831 status = H5Tclose(dtype_id);
1832 b_error(status, name, AT_);
1833 status = H5Dclose(dset_id);
1834 b_error(status, name, AT_);
1835}
1836
1837
1847template <class T>
1848void ParallelIoHdf5::b_writeScalar(T scalar, const MString& name) {
1849 TRACE();
1850
1851 // Get variable id
1852 hid_t dset_id = H5Dopen(b_h5Id, name.c_str(), H5P_DEFAULT);
1853 b_error(dset_id, name, AT_);
1854 hid_t dtype_id = H5Dget_type(dset_id);
1855 b_error(dtype_id, name, AT_);
1856
1857 // Write scalar
1858 herr_t status = H5Dwrite(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &scalar);
1859 b_error(status, name, AT_);
1860
1861 // Close all identifiers which have been created (except b_h5Id)
1862 status = H5Dclose(dset_id);
1863 b_error(status, name, AT_);
1864}
1865
1866
1884template <class T>
1885void ParallelIoHdf5::b_readArray(T* array, const MString& name, const size_type noDims, MPI_Offset* start,
1886 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
1887 MPI_Offset diskStride) {
1888 TRACE();
1889
1890 // Get variable id
1891 hid_t dset_id = H5Dopen(b_h5Id, name.c_str(), H5P_DEFAULT);
1892 b_error(dset_id, name, AT_);
1893 hid_t dspace_id = H5Dget_space(dset_id);
1894 b_error(dspace_id, name, AT_);
1895 hid_t dtype_id = H5Dget_type(dset_id);
1896 b_error(dtype_id, name, AT_);
1897
1898 // Determine total data count
1899 size_type totalCount = 1;
1900 for(size_type d = 0; d < noDims; d++) {
1901 totalCount *= count[d];
1902 }
1903
1904 // Create temporary storage space if needed and set data pointers
1905 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1906 ScratchSpace<T> tmpScratch(tmpScratchSize, FUN_, "tmpStorage");
1907 T* data = 0;
1908 if(memoryStride == 1) {
1909 data = array;
1910 } else {
1911 // data = tmpScratch.begin();
1912 data = &tmpScratch[0];
1913 }
1914
1915 hid_t memory_dspace_id;
1916 herr_t status = -1;
1917 // Read array
1918 if(noChunks == 1) {
1919 // If number of chunks is one, write everything at once
1920 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count, nullptr);
1921 b_error(memory_dspace_id, name, AT_);
1922 if(diskStride == 1) {
1923 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, nullptr, (hsize_t*)count, nullptr);
1924 } else {
1925 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, (hsize_t*)diskStride, (hsize_t*)count,
1926 nullptr);
1927 }
1928 b_error(status, name, AT_);
1929 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, data);
1930 b_error(status, name, AT_);
1931 } else {
1932 TERMM(1, "untested Hdf5 specific I/O method, please see comment for how"
1933 " to proceed");
1934 // WARNING: This method is untested, yet. It is implemented due to be
1935 // consistent with parallel NetCDF. However, it was not used by any of the
1936 // testcases.If your code uses this part of the code, please make sure that
1937 // the I/O still works as expected and then remove this warning as well as
1938 // the subsequent TERMM().
1939 // Determine variable id
1940 // Read in chunks
1941 MPI_Offset chunkSize = count[0] / noChunks;
1942 if(count[0] % noChunks > 0) {
1943 chunkSize += 1;
1944 }
1945
1946 // Determine number of entries for a fixed first dimension index
1947 size_type nDSize = 1;
1948 for(size_type d = 1; d < noDims; d++) {
1949 nDSize *= count[d];
1950 }
1951
1952 ScratchSpace<hsize_t> start_(noDims, FUN_, "start_");
1953 ScratchSpace<hsize_t> count_(noDims, FUN_, "count_");
1954
1955 std::copy(start, start + noDims, &start_[0]);
1956 std::copy(count, count + noDims, &count_[0]);
1957
1958 for(size_type i = 0; i < noChunks; i++) {
1959 start_[0] = std::min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1960 count_[0] = std::max(std::min(chunkSize, count[0] - i * chunkSize), 0ll);
1961 T* data_ = data + std::min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1962 memory_dspace_id = H5Screate_simple(noDims, &count_[0], nullptr);
1963 b_error(memory_dspace_id, name, AT_);
1964 if(diskStride == 1) {
1965 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], nullptr, &count_[0], nullptr);
1966 } else {
1967 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], (hsize_t*)diskStride, &count_[0], nullptr);
1968 }
1969 b_error(status, name, AT_);
1970 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, data_);
1971 b_error(status, name, AT_);
1972 }
1973 }
1974
1975 // Unpack strided data if necessary
1976 if(memoryStride != 1) {
1977 for(MPI_Offset i = 0; i < totalCount; i++) {
1978 array[memoryStride * i] = tmpScratch[i];
1979 }
1980 }
1981
1982 // Close all identifiers which have been created (except b_h5Id)
1983 status = H5Sclose(memory_dspace_id);
1984 b_error(status, name, AT_);
1985 status = H5Sclose(dspace_id);
1986 b_error(status, name, AT_);
1987 status = H5Tclose(dtype_id);
1988 b_error(status, name, AT_);
1989 status = H5Dclose(dset_id);
1990 b_error(status, name, AT_);
1991}
1992
1993
2005template <class T>
2006void ParallelIoHdf5::b_readScalar(T* scalar, const MString& name) {
2007 TRACE();
2008
2009 // Get variable id
2010 hid_t dset_id = H5Dopen(b_h5Id, name.c_str(), H5P_DEFAULT);
2011 b_error(dset_id, name, AT_);
2012 hid_t dtype_id = H5Dget_type(dset_id);
2013 b_error(dtype_id, name, AT_);
2014
2015 // Read scalar
2016 herr_t status = H5Dread(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, scalar);
2017 b_error(status, name, AT_);
2018
2019 // Close all identifiers
2020 status = H5Dclose(dset_id);
2021 b_error(status, name, AT_);
2022}
2023
2039template <class T>
2040void ParallelIoHdf5::b_readArray(T* array, const MString& path, const MString& name, const size_type noDims,
2041 const size_type* start, const size_type* count) {
2042 TRACE();
2043
2044 // Handle whether or not path was given
2045 hid_t loc_id = 0;
2046 MInt checkLoc_id = 0;
2047 if(path.empty()) {
2048 loc_id = b_h5Id;
2049 } else {
2050 loc_id = H5Oopen(b_h5Id, path.c_str(), H5P_DEFAULT);
2051 b_error(loc_id, path, AT_);
2052 checkLoc_id = 1;
2053 }
2054 // Get variable id
2055 hid_t dset_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2056 b_error(dset_id, name, AT_);
2057 // Get file space
2058 hid_t fspace_id = H5Dget_space(dset_id);
2059 b_error(fspace_id, name, AT_);
2060
2061 hid_t dtype_id = H5Dget_type(dset_id);
2062 b_error(dtype_id, name, AT_);
2063
2064 // Prepare data
2065 ScratchSpace<hsize_t> start_(noDims, FUN_, "start_");
2066 ScratchSpace<hsize_t> count_(noDims, FUN_, "count_");
2067
2068 std::copy(start, start + noDims, &start_[0]);
2069 std::copy(count, count + noDims, &count_[0]);
2070
2071 herr_t status = -1;
2072 status = H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, &start_[0], NULL, &count_[0], NULL);
2073 b_error(status, name, AT_);
2074 hid_t dspace_id = H5Screate_simple(noDims, &count_[0], NULL);
2075 b_error(dspace_id, name, AT_);
2076 status = H5Dread(dset_id, dtype_id, dspace_id, fspace_id, H5P_DEFAULT, array);
2077 b_error(status, name, AT_);
2078
2079 // Close all indetifiers
2080 if(checkLoc_id) {
2081 status = H5Oclose(loc_id);
2082 b_error(status, name, AT_);
2083 }
2084 status = H5Dclose(dset_id);
2085 b_error(status, path, AT_);
2086 status = H5Sclose(fspace_id);
2087 b_error(status, name, AT_);
2088 status = H5Sclose(dspace_id);
2089 b_error(status, name, AT_);
2090}
2091
2092
2110template <class T>
2111void ParallelIoHdf5::b_writeArray(const T* array, const MString& path, const MString& name, const size_type noDims,
2112 const size_type* start, const size_type* count, const size_type* ghost) {
2113 TRACE();
2114
2115 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2116 b_error(link_id, name, AT_);
2117 // Handle whether or not path was given
2118 hid_t loc_id = b_h5Id;
2119 MInt checkLoc_id = 0;
2120 herr_t status = -1;
2121 if(path.empty()) {
2122 loc_id = b_h5Id;
2123 } else {
2124 loc_id = H5Oopen(b_h5Id, path.c_str(), H5P_DEFAULT);
2125 b_error(loc_id, path, AT_);
2126 checkLoc_id = 1;
2127 }
2128
2129 // Check if dataset exists
2130 status = H5Lexists(loc_id, name.c_str(), link_id);
2131 b_error(status, name, AT_);
2132 // Get variable id
2133 hid_t dset_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2134 b_error(dset_id, name, AT_);
2135
2136 hid_t dtype_id = H5Dget_type(dset_id);
2137 b_error(dtype_id, name, AT_);
2138
2139 // Set memory information
2140 ScratchSpace<hsize_t> count_(noDims, FUN_, "count_");
2141 ScratchSpace<hsize_t> start_(noDims, FUN_, "start_");
2142 ScratchSpace<hsize_t> ghost_(noDims, FUN_, "ghost_");
2143
2144 for(size_type i = 0; i < noDims; i++) {
2145 count_[i] = count[i] + (2 * ghost[i]);
2146 }
2147 std::copy(start, start + noDims, &start_[0]);
2148 std::copy(ghost, ghost + noDims, &ghost_[0]);
2149
2150 hid_t dspace_id = H5Screate_simple(noDims, &count_[0], nullptr);
2151 b_error(dspace_id, name, AT_);
2152 // Reassing count values to count_ array before selecting hyperslab
2153 std::copy(count, count + noDims, &count_[0]);
2154 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &ghost_[0], nullptr, &count_[0], nullptr);
2155 b_error(status, name, AT_);
2156 // Obtain space
2157 hid_t fspace_id = H5Dget_space(dset_id);
2158 b_error(fspace_id, name, AT_);
2159 // Select hyperslab
2160 status = H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, &start_[0], nullptr, &count_[0], nullptr);
2161 b_error(status, name, AT_);
2162 status = H5Dwrite(dset_id, dtype_id, dspace_id, fspace_id, b_h5DatasetXferHandle, array);
2163
2164#if !defined(HOST_Hawk)
2165 // HDF5 installation on Hawk produces strange warnings/errors, although files are ok
2166 b_error(status, name, AT_);
2167#endif
2168
2169 status = H5Pclose(link_id);
2170 b_error(status, name, AT_);
2171 if(checkLoc_id) {
2172 status = H5Oclose(loc_id);
2173 b_error(status, name, AT_);
2174 }
2175 status = H5Dclose(dset_id);
2176 b_error(status, path, AT_);
2177 status = H5Sclose(dspace_id);
2178 b_error(status, name, AT_);
2179 status = H5Sclose(fspace_id);
2180 b_error(status, name, AT_);
2181}
2182
2183
2199void ParallelIoHdf5::b_defineArray(maiabd_type dsetType, const MString& path, const MString& name,
2200 const size_type noDims, const size_type* count) {
2201 TRACE();
2202
2203 hid_t dtype_id;
2204 switch(dsetType) {
2205 case PIO_FLOAT: {
2206 dtype_id = H5T_NATIVE_DOUBLE;
2207 break;
2208 }
2209 case PIO_INT: {
2210 dtype_id = H5T_NATIVE_INT;
2211 break;
2212 }
2213 case PIO_LONG: {
2214 dtype_id = H5T_NATIVE_LONG;
2215 break;
2216 }
2217 case PIO_STRING: {
2218 TERMM(1, "Not yet implemented");
2219 break;
2220 }
2221 case PIO_UCHAR: {
2222 dtype_id = H5T_NATIVE_UCHAR;
2223 break;
2224 }
2225 case PIO_ULONGLONG: {
2226 dtype_id = H5T_NATIVE_ULLONG;
2227 break;
2228 }
2229
2230 default: {
2231 TERMM(1, "Invalid ParallelIo data type!");
2232 }
2233 }
2234
2235 ScratchSpace<hsize_t> datcount(noDims, FUN_, "count_");
2236 std::copy(count, count + noDims, &datcount[0]);
2237
2238 hid_t datspace = H5Screate_simple(noDims, &datcount[0], NULL);
2239 b_error(datspace, name, AT_);
2240 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2241 b_error(link_id, name, AT_);
2242
2243 // Handle whether or not path was given
2244 hid_t loc_id = b_h5Id;
2245 MInt checkLoc_id = 0;
2246 herr_t status = -1;
2247 if(path.empty()) {
2248 loc_id = b_h5Id;
2249 } else {
2250 // Create the group if not existent
2251 status = H5Lexists(b_h5Id, path.c_str(), link_id);
2252 if(status == 0) {
2253 // b_createGroup(path);
2254 loc_id = H5Gcreate(b_h5Id, path.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2255 loc_id = H5Gclose(loc_id);
2256 b_error(loc_id, path, AT_);
2257 }
2258 loc_id = H5Oopen(b_h5Id, path.c_str(), H5P_DEFAULT);
2259 b_error(loc_id, name, AT_);
2260
2261 checkLoc_id = 1;
2262 }
2263
2264 status = H5Lexists(loc_id, name.c_str(), link_id);
2265 b_error(status, path, AT_);
2266
2267 hid_t dset_id = H5Dcreate2(loc_id, name.c_str(), dtype_id, datspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2268 b_error(dset_id, name, AT_);
2269
2270 status = H5Dclose(dset_id);
2271 b_error(status, path, AT_);
2272
2273 // Close all indetifiers
2274 if(checkLoc_id) {
2275 status = H5Oclose(loc_id);
2276 b_error(status, name, AT_);
2277 }
2278
2279 status = H5Pclose(link_id);
2280 b_error(status, name, AT_);
2281 status = H5Sclose(datspace);
2282 b_error(status, name, AT_);
2283}
2284
2285
2300void ParallelIoHdf5::b_getDatasetSize(const MString& name, const MString& path, size_type noDims, size_type* data) {
2301 TRACE();
2302
2303 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2304 b_error(link_id, name, AT_);
2305 // Handle whether or not path was given
2306 hid_t loc_id = 0;
2307 MInt checkLoc_id = 0;
2308 if(path.empty()) {
2309 loc_id = b_h5Id;
2310 } else {
2311 loc_id = H5Oopen(b_h5Id, path.c_str(), H5P_DEFAULT);
2312 b_error(loc_id, path, AT_);
2313 checkLoc_id = 1;
2314 }
2315
2316 // Check if object exists
2317 herr_t status = H5Lexists(loc_id, name.c_str(), link_id);
2318 if(status == 0) { // Object does not exsit
2319 m_log << "WARNING: Datatset " << name << " does not exist at path " << path << std::endl;
2320 } else {
2321 hid_t data_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2322 // 1) Get space information
2323 hid_t filespace = H5Dget_space(data_id);
2324 ScratchSpace<hsize_t> count(noDims, FUN_, "count");
2325 ScratchSpace<hsize_t> maxcount(noDims, FUN_, "maxcount");
2326 H5Sget_simple_extent_dims(filespace, &count[0], &maxcount[0]);
2327 // 2) Get the size
2328 for(size_type i = 0; i < noDims; i++) {
2329 data[i] = count[i];
2330 }
2331 H5Sclose(filespace);
2332 H5Oclose(data_id);
2333 }
2334 H5Pclose(link_id);
2335 if(checkLoc_id) H5Oclose(loc_id);
2336}
2337
2338
2352ParallelIo::size_type ParallelIoHdf5::b_getDatasetNoDims(const MString& name, const MString& path) {
2353 TRACE();
2354
2355 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2356 b_error(link_id, name, AT_);
2357 // Handle whether or not path was given
2358 hid_t loc_id = 0;
2359 MBool checkLoc_id = false;
2360 if(path.empty()) {
2361 loc_id = b_h5Id;
2362 } else {
2363 loc_id = H5Oopen(b_h5Id, path.c_str(), H5P_DEFAULT);
2364 b_error(loc_id, path, AT_);
2365 checkLoc_id = true;
2366 }
2367
2368 hid_t data_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2369 b_error(data_id, name, AT_);
2370
2371 // 1) Get space information
2372 hid_t dspace_id = H5Dget_space(data_id);
2373 b_error(dspace_id, name, AT_);
2374
2375 size_type noDims = H5Sget_simple_extent_ndims(dspace_id);
2376 b_error(noDims, name, AT_);
2377
2378 herr_t status = H5Sclose(dspace_id);
2379 b_error(status, name, AT_);
2380
2381 status = H5Oclose(data_id);
2382 b_error(status, name, AT_);
2383
2384 status = H5Pclose(link_id);
2385 b_error(status, name, AT_);
2386
2387 if(checkLoc_id) {
2388 status = H5Oclose(loc_id);
2389 b_error(status, name, AT_);
2390 }
2391
2392 return noDims;
2393}
2394
2395
2408 TRACE();
2409
2410 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2411 b_error(link_id, path, AT_);
2412
2413 // Get path tokens
2414 std::vector<MString> p;
2415 tokenize(path, p, "/", false);
2416
2417 MString parentpath = "/";
2418 MString objectpath = "/";
2419 // Get the count number of groups, datasets etc in the path
2420 MInt num = distance(p.begin(), p.end()) - 1;
2421
2422 for(MInt i = 0; i < num; ++i) {
2423 if(p[i] == "") {
2424 continue;
2425 }
2426
2427 if(i != 0) {
2428 objectpath.append("/");
2429 parentpath.append(p[i - 1]);
2430 parentpath.append("/");
2431 }
2432 objectpath.append(p[i]);
2433 herr_t status = H5Lexists(b_h5Id, objectpath.c_str(), link_id);
2434 b_error(status, path, AT_);
2435
2436 if(status == 0) { // Path does not exist
2437 if(i == 0) {
2438 hid_t group_id = H5Gcreate(b_h5Id, p[i].c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2439 b_error(group_id, path, AT_);
2440 status = H5Gclose(group_id);
2441 b_error(status, path, AT_);
2442 } else { // We are in another group
2443 hid_t group_id = H5Gopen(b_h5Id, parentpath.c_str(), H5P_DEFAULT);
2444 b_error(group_id, path, AT_);
2445 hid_t new_group_id = H5Gcreate(group_id, p[i].c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2446 b_error(new_group_id, path, AT_);
2447
2448 // Close current groups
2449 status = H5Gclose(new_group_id);
2450 b_error(status, path, AT_);
2451 status = H5Gclose(group_id);
2452 b_error(status, path, AT_);
2453 }
2454 }
2455 }
2456
2457 herr_t status = H5Pclose(link_id);
2458 b_error(status, path, AT_);
2459}
2460
2461
2473void ParallelIoHdf5::b_getGroupNames(vector<MString>& names, const MString& path) {
2474 TRACE();
2475 if(b_getNoGroups(path) <= 0) {
2476 return;
2477 }
2478
2479 MString pathStr = "/";
2480 if(!path.empty()) {
2481 pathStr = path;
2482 }
2483
2484 hid_t group_id = H5Gopen(b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2485 b_error(group_id, m_fileName, AT_);
2486
2487 // Iterate through each object in the group.
2488 hsize_t noObj = 0;
2489 herr_t status = H5Gget_num_objs(group_id, &noObj);
2490 for(MInt idx = 0; idx < (signed)noObj; idx++) {
2491 int obj_type = H5Gget_objtype_by_idx(group_id, (size_t)idx);
2492 b_error(obj_type, m_fileName, AT_);
2493 if(obj_type == H5G_GROUP) {
2494 char GroupName[NC_MAX_NAME];
2495 status = H5Gget_objname_by_idx(group_id, (hsize_t)idx, GroupName, NC_MAX_NAME);
2496 b_error(status, m_fileName, AT_);
2497 names.emplace_back(GroupName);
2498 }
2499 }
2500
2501 status = H5Gclose(group_id);
2502 b_error(status, m_fileName, AT_);
2503}
2504
2516ParallelIo::size_type ParallelIoHdf5::b_getNoGroups(const MString& path) {
2517 TRACE();
2518
2519 MString pathStr = "/";
2520 if(!path.empty()) {
2521 pathStr = path;
2522 }
2523
2524 hid_t group_id = H5Gopen(b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2525 b_error(group_id, m_fileName, AT_);
2526
2527 // Iterate through each object in the group.
2528 hsize_t noObj = 0;
2529 ParallelIo::size_type noGroups = 0;
2530
2531 herr_t status = H5Gget_num_objs(group_id, &noObj);
2532 b_error(status, m_fileName, AT_);
2533 for(MInt idx = 0; idx < (signed)noObj; idx++) {
2534 int obj_type = H5Gget_objtype_by_idx(group_id, (size_t)idx);
2535 b_error(obj_type, m_fileName, AT_);
2536 if(obj_type == H5G_GROUP) {
2537 noGroups++;
2538 }
2539 }
2540
2541 status = H5Gclose(group_id);
2542 b_error(status, m_fileName, AT_);
2543
2544 return noGroups;
2545}
2546
2547
2559void ParallelIoHdf5::b_getDatasetNames(vector<MString>& names, const MString& path) {
2560 TRACE();
2561 if(b_getNoDatasets(path) <= 0) {
2562 return;
2563 }
2564
2565 MString pathStr = "/";
2566 if(!path.empty()) {
2567 pathStr = path;
2568 }
2569
2570 hid_t group_id = H5Gopen(b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2571 b_error(group_id, m_fileName, AT_);
2572
2573 // Iterate through each object in the group.
2574 hsize_t noObj = 0;
2575 herr_t status = H5Gget_num_objs(group_id, &noObj);
2576 for(MInt idx = 0; idx < (signed)noObj; idx++) {
2577 int obj_type = H5Gget_objtype_by_idx(group_id, (size_t)idx);
2578 b_error(obj_type, m_fileName, AT_);
2579 if(obj_type == H5G_DATASET) {
2580 char DatasetName[NC_MAX_NAME];
2581 status = H5Gget_objname_by_idx(group_id, (hsize_t)idx, DatasetName, NC_MAX_NAME);
2582 b_error(status, m_fileName, AT_);
2583 names.emplace_back(DatasetName);
2584 }
2585 }
2586
2587 status = H5Gclose(group_id);
2588 b_error(status, m_fileName, AT_);
2589}
2590
2602ParallelIo::size_type ParallelIoHdf5::b_getNoDatasets(const MString& path) {
2603 TRACE();
2604
2605 MString pathStr = "/";
2606 if(!path.empty()) {
2607 pathStr = path;
2608 }
2609
2610 hid_t group_id = H5Gopen(b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2611 b_error(group_id, m_fileName, AT_);
2612
2613 // Iterate through each object in the group.
2614 hsize_t noObj = 0;
2615 ParallelIo::size_type noDatasets = 0;
2616
2617 herr_t status = H5Gget_num_objs(group_id, &noObj);
2618 b_error(status, m_fileName, AT_);
2619 for(MInt idx = 0; idx < (signed)noObj; idx++) {
2620 int obj_type = H5Gget_objtype_by_idx(group_id, (size_t)idx);
2621 b_error(obj_type, m_fileName, AT_);
2622 if(obj_type == H5G_DATASET) {
2623 noDatasets++;
2624 }
2625 }
2626
2627 status = H5Gclose(group_id);
2628 b_error(status, m_fileName, AT_);
2629
2630 return noDatasets;
2631}
2632
2633// Explicit instantiations for all supported types
2634template void ParallelIoHdf5::b_setAttribute(const MFloat* value, const MString& name, const MString& datasetName,
2635 const size_type totalCount);
2636template void ParallelIoHdf5::b_setAttribute(const MInt* value, const MString& name, const MString& datasetName,
2637 const size_type totalCount);
2638template void ParallelIoHdf5::b_setAttribute(const MLong* value, const MString& name, const MString& datasetName,
2639 const size_type totalCount);
2640template void ParallelIoHdf5::b_setAttribute(const MUchar* value, const MString& name, const MString& datasetName,
2641 const size_type totalCount);
2642template void ParallelIoHdf5::b_setAttribute(const MUlong* value, const MString& name, const MString& datasetName,
2643 const size_type totalCount);
2644
2645// Explicit instantiations for all supported types
2646template void ParallelIoHdf5::b_getAttribute(MFloat* value, const MString& name, const MString& datasetName,
2647 const size_type totalCount);
2648template void ParallelIoHdf5::b_getAttribute(MInt* value, const MString& name, const MString& datasetName,
2649 const size_type totalCount);
2650template void ParallelIoHdf5::b_getAttribute(MLong* value, const MString& name, const MString& datasetName,
2651 const size_type totalCount);
2652template void ParallelIoHdf5::b_getAttribute(MUchar* value, const MString& name, const MString& datasetName,
2653 const size_type totalCount);
2654template void ParallelIoHdf5::b_getAttribute(MChar* value, const MString& name, const MString& datasetName,
2655 const size_type totalCount);
2656template void ParallelIoHdf5::b_getAttribute(MUlong* value, const MString& name, const MString& datasetName,
2657 const size_type totalCount);
2658
2659template void ParallelIoHdf5::b_writeArray(const MFloat* array, const MString& name, const size_type noDims,
2660 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2661 const size_type noChunks, MPI_Offset diskStride);
2662template void ParallelIoHdf5::b_writeArray(const MInt* array, const MString& name, const size_type noDims,
2663 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2664 const size_type noChunks, MPI_Offset diskStride);
2665template void ParallelIoHdf5::b_writeArray(const MLong* array, const MString& name, const size_type noDims,
2666 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2667 const size_type noChunks, MPI_Offset diskStride);
2668template void ParallelIoHdf5::b_writeArray(const MUchar* array, const MString& name, const size_type noDims,
2669 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2670 const size_type noChunks, MPI_Offset diskStride);
2671template void ParallelIoHdf5::b_writeArray(const MUlong* array, const MString& name, const size_type noDims,
2672 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2673 const size_type noChunks, MPI_Offset diskStride);
2674
2675template void ParallelIoHdf5::b_writeArray(const MFloat* array, const MString& path, const MString& name,
2676 const size_type noDims, const size_type* start, const size_type* count,
2677 const size_type* ghost);
2678template void ParallelIoHdf5::b_writeArray(const MInt* array, const MString& path, const MString& name,
2679 const size_type noDims, const size_type* start, const size_type* count,
2680 const size_type* ghost);
2681
2682
2683template void ParallelIoHdf5::b_readArray(MFloat* array, const MString& name, const size_type noDims, MPI_Offset* start,
2684 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
2685 MPI_Offset diskStride);
2686template void ParallelIoHdf5::b_readArray(MInt* array, const MString& name, const size_type noDims, MPI_Offset* start,
2687 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
2688 MPI_Offset diskStride);
2689template void ParallelIoHdf5::b_readArray(MLong* array, const MString& name, const size_type noDims, MPI_Offset* start,
2690 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
2691 MPI_Offset diskStride);
2692template void ParallelIoHdf5::b_readArray(MUchar* array, const MString& name, const size_type noDims, MPI_Offset* start,
2693 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
2694 MPI_Offset diskStride);
2695template void ParallelIoHdf5::b_readArray(MUlong* array, const MString& name, const size_type noDims, MPI_Offset* start,
2696 MPI_Offset* count, MPI_Offset memoryStride, const size_type noChunks,
2697 MPI_Offset diskStride);
2698
2699template void ParallelIoHdf5::b_readArray(MFloat* array, const MString& path, const MString& name, size_type noDims,
2700 const size_type* start, const size_type* count);
2701template void ParallelIoHdf5::b_readArray(MInt* array, const MString& path, const MString& name, size_type noDims,
2702 const size_type* start, const size_type* count);
2703
2704template void ParallelIoHdf5::b_readScalar(MFloat* scalar, const MString& name);
2705template void ParallelIoHdf5::b_readScalar(MInt* scalar, const MString& name);
2706template void ParallelIoHdf5::b_readScalar(MLong* scalar, const MString& name);
2707template void ParallelIoHdf5::b_readScalar(MUchar* scalar, const MString& name);
2708template void ParallelIoHdf5::b_readScalar(MUlong* scalar, const MString& name);
2709
2710template void ParallelIoHdf5::b_writeScalar(MFloat scalar, const MString& name);
2711template void ParallelIoHdf5::b_writeScalar(MInt scalar, const MString& name);
2712template void ParallelIoHdf5::b_writeScalar(MLong scalar, const MString& name);
2713template void ParallelIoHdf5::b_writeScalar(MUchar scalar, const MString& name);
2714template void ParallelIoHdf5::b_writeScalar(MUlong scalar, const MString& name);
2715
2716//#ifdef MAIA_MS_COMPILER
2717// template void ParallelIoHdf5::b_getAttribute(double* value, const MString& name, const MString& datasetName,
2718// const size_type totalCount);
2719// template void ParallelIoHdf5::b_getAttribute(int* value, const MString& name,
2720// const MString& datasetName,
2721// const size_type totalCount);
2722// template void ParallelIoHdf5::b_getAttribute(__int64* value, const MString& name,
2723// const MString& datasetName,
2724// const size_type totalCount);
2725// template void ParallelIoHdf5::b_getAttribute(unsigned __int64* value, const MString& name,
2726// const MString& datasetName,
2727// const size_type totalCount);
2728
2729// template void ParallelIoHdf5::b_readScalar(__int64* scalar, const MString& name);
2730// template void ParallelIoHdf5::b_readScalar(int* scalar, const MString& name);
2731// template void ParallelIoHdf5::b_readScalar(long* scalar, const MString& name);
2732// template void ParallelIoHdf5::b_readScalar(double* scalar, const MString& name);
2733//
2734// template void ParallelIoHdf5::b_writeScalar(long scalar, const MString& name);
2735// template void ParallelIoHdf5::b_writeScalar(__int64 scalar, const MString& name);
2736// template void ParallelIoHdf5::b_writeScalar(int scalar, const MString& name);
2737// template void ParallelIoHdf5::b_writeScalar(double scalar, const MString& name);
2738
2739// template void ParallelIoHdf5::b_writeArray(const int* array, const MString& name, const size_type noDims,
2740// MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2741// const size_type noChunks, MPI_Offset diskStride);
2742//
2743// template void ParallelIoHdf5::b_writeArray(const __int64* array, const MString& name,
2744// const size_type noDims, MPI_Offset* start,
2745// MPI_Offset* count, MPI_Offset memoryStride,
2746// const size_type noChunks, MPI_Offset diskStride);
2747//
2748// template void ParallelIoHdf5::b_writeArray(const double* array, const MString& name,
2749// const size_type noDims, MPI_Offset* start,
2750// MPI_Offset* count, MPI_Offset memoryStride,
2751// const size_type noChunks, MPI_Offset diskStride);
2752//
2753// template void ParallelIoHdf5::b_writeArray(const unsigned char* array, const MString& name,
2754// const size_type noDims, MPI_Offset* start,
2755// MPI_Offset* count, MPI_Offset memoryStride,
2756// const size_type noChunks, MPI_Offset diskStride);
2757
2758// template void ParallelIoHdf5::b_readArray(int* array, const MString& name, const size_type noDims,
2759// MPI_Offset* start, MPI_Offset* count,
2760// MPI_Offset memoryStride, const size_type noChunks,
2761// MPI_Offset diskStride);
2762//
2763// template void ParallelIoHdf5::b_readArray(__int64* array, const MString& name,
2764// const size_type noDims,
2765// MPI_Offset* start, MPI_Offset* count,
2766// MPI_Offset memoryStride, const size_type noChunks,
2767// MPI_Offset diskStride);
2768//
2769// template void ParallelIoHdf5::b_readArray(double* array, const MString& name,
2770// const size_type noDims, MPI_Offset* start,
2771// MPI_Offset* count, MPI_Offset memoryStride,
2772// const size_type noChunks, MPI_Offset diskStride);
2773//
2774// template void ParallelIoHdf5::b_readArray(unsigned char* array, const MString& name,
2775// const size_type noDims, MPI_Offset* start,
2776// MPI_Offset* count, MPI_Offset memoryStride,
2777// const size_type noChunks, MPI_Offset diskStride);
2778//#endif
2779
2780#endif
This class is intended to do all the heavy lifting when it comes to reading and writing "big data" fi...
Definition: parallelio.h:101
std::set< MString > m_unwrittenScalars
Definition: parallelio.h:255
MLong size_type
Type used for all size- and offset-related values.
Definition: parallelio.h:123
std::set< MString > m_unwrittenArrays
Definition: parallelio.h:254
void setAttribute(const T &value, const MString &name, const MString &datasetName="")
Set a file or dataset attribute. [MPI]
Definition: parallelio.h:1438
MBool b_hasObject(const MString &path)
void b_createAttribute(const T *value, const MString &name, const MString &datasetName, hid_t type_id, const size_type totalCount)
Creates an attribute in the file (generic version).
void b_defineScalar(maiabd_type type, const MString &name)
Defines a scalar in the file. [MPI]
void b_saveHeader()
Adds all additional header information that are needed in the file.
void b_readArray(T *, const MString &, const size_type, MPI_Offset *, MPI_Offset *, MPI_Offset, const size_type, MPI_Offset)
Read array data from file (float version). [MPI]
void b_writeScalar(T scalar, const MString &name)
Writes scalar data to file (Hdf5 version). [MPI]
hid_t b_h5DatasetXferHandle
void b_writeArray(const T *array, const MString &name, const size_type noDims, MPI_Offset *start, MPI_Offset *count, MPI_Offset memoryStride, const size_type noChunks, MPI_Offset diskStride)
Writes array data to file (Hdf5 version). [MPI]
void b_addAdditionalHeader()
Write additional headers to file (e.g. grid file name, creation date etc.). [MPI]
void b_getAttribute(T *value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Retrieve an attribute from file (float version).
static void b_error(MInt status, const MString &name, const MString &location)
Check the status code of a HDF5 operation and output a meaningful message.
MBool b_hasAttribute(const MString &name, const MString &path="")
Check if object exists. [MPI]
size_type b_getDatasetNoDims(const MString &name)
Get number of dimensions of a dataset with the given name.
MInt b_getDatasetType(const MString &name)
Returns the data type of a dataset in the file (can be array, multi-D array or scalar).
void b_readScalar(T *scalar, const MString &name)
Read scalar data from file (float version). [MPI]
MInt b_getAttributeType(const MString &name, const MString &datasetName="")
Returns the data type of an attribute in the file.
void b_defineArray(maiabd_type type, const MString &name, size_type noDims, size_type *totalCount)
Defines an array in the file. [MPI]
~ParallelIoHdf5() override
Close open identifiers and release memory. [MPI]
void b_writeAdditionalData()
Write additional data to file. [MPI]
size_type b_getNoDatasets(const MString &path)
Gets the number of datasetes in the given group [MPI]
void b_setAttribute(const T *value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Set an attribute in the file (generic version).
ParallelIoHdf5(const MString &fileName, MInt fileMode, const MPI_Comm &mpiComm)
Creates a new object to read and write big data files. [MPI]
static void b_warning(MInt status, const MString &name, const MString &location)
Check the status code of a HDF5 operation and output a meaningful message (Warning)
void b_getGroupNames(std::vector< MString > &names, const MString &path)
Gets the groups in the given group [MPI]
void b_getDatasetNames(std::vector< MString > &names, const size_type dimension)
Returns a vector of all existing datasets with the given number of dimensions in the file (if any)....
static const MString b_fileExt()
Returns backend-specific ending of filename (either ".Netcdf" or ".Hdf5")
static MBool b_isValidFile(const MString &name, const MPI_Comm &mpiComm)
Check if specified file is a valid HDF5 file (i.e. can be opened). [MPI]
void b_getAttributeCount(const MString &name, size_type *totalCount, const MString &datasetName="")
void b_createGroup(const MString &path)
Creates an HDF5 group [MPI]
size_type b_getDatasetSize(const MString &name, const size_type dimensionId=0)
Get the length of one dimension of an arbitrary array in the file.
MBool b_hasDataset(const MString &name, const size_type dimension)
Check if the file contains a dataset with the given name and number of dimensions.
size_type b_getNoGroups(const MString &path)
Gets the number of groups in the given group [MPI]
This class is a ScratchSpace.
Definition: scratch.h:758
void mTerm(const MInt errorCode, const MString &location, const MString &message)
Definition: functions.cpp:29
const MString & location
Definition: functions.h:37
void tokenize(const std::string &str, ContainerT &tokens, const std::string &delimiters=" ", MBool trimEmpty=false)
Definition: functions.h:439
InfoOutFile m_log
int32_t MInt
Definition: maiatypes.h:62
unsigned char MUchar
Definition: maiatypes.h:57
std::basic_string< char > MString
Definition: maiatypes.h:55
double MFloat
Definition: maiatypes.h:52
int64_t MLong
Definition: maiatypes.h:64
bool MBool
Definition: maiatypes.h:58
char MChar
Definition: maiatypes.h:56
uint64_t MUlong
Definition: maiatypes.h:65
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, const MString &name, const MString &varname)
same as MPI_Bcast
const MInt PIO_UNKNOWN_TYPE
Definition: parallelio.h:44
const MInt PIO_REPLACE
Definition: parallelio.h:36
const MInt PIO_STRING
Definition: parallelio.h:52
const MInt PIO_ULONGLONG
Definition: parallelio.h:56
const MInt PIO_UCHAR
Definition: parallelio.h:54
const MInt PIO_INT
Definition: parallelio.h:48
const MInt PIO_LONG
Definition: parallelio.h:50
const MInt PIO_APPEND
Definition: parallelio.h:38
const MInt PIO_CREATE
< File mode to create a new file. Aborts if file already exists
Definition: parallelio.h:34
const MInt PIO_FLOAT
Definition: parallelio.h:46
const MInt PIO_READ
Definition: parallelio.h:40
Namespace for auxiliary functions/classes.