28#if not defined(MAIA_WINDOWS)
33#define NC_MAX_NAME 256
39using namespace parallel_io;
49template <
class DataType>
54struct hdf5_traits<
MFloat> {
56 static hid_t type() {
return H5T_NATIVE_DOUBLE; }
61struct hdf5_traits<
MInt> {
63 static hid_t type() {
return H5T_NATIVE_INT; }
68struct hdf5_traits<
MLong> {
70 static hid_t type() {
return H5T_NATIVE_LONG; }
75struct hdf5_traits<
MChar> {
77 static hid_t type() {
return H5T_NATIVE_CHAR; }
82struct hdf5_traits<
MUchar> {
84 static hid_t type() {
return H5T_NATIVE_UCHAR; }
89struct hdf5_traits<
MUlong> {
91 static hid_t type() {
return H5T_NATIVE_ULLONG; }
141 fileId = H5Fopen(name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
144 status = H5Fclose(fileId);
227 mTerm(1, AT_,
"Unsupported file mode.");
246 status = H5Fclose(
b_h5Id);
249#ifdef MAIA_EXTRA_DEBUG
251 cerr <<
"Warning: array '" << *it <<
"' in file '" <<
m_fileName <<
"' was defined but never written. "
252 <<
"Make sure that this is the intended behavior." << endl;
255 cerr <<
"Warning: scalar '" << *it <<
"' in file '" <<
m_fileName <<
"' was defined but never written. "
256 <<
"Make sure that this is the intended behavior." << endl;
284 const size_type totalCount);
299 const MInt maxNoChars = 256;
303 MChar user[maxNoChars];
304 MChar host[maxNoChars];
305 MChar dir[maxNoChars];
306 MChar date[maxNoChars];
309 fill(user, user + maxNoChars,
'\0');
310 fill(host, host + maxNoChars,
'\0');
311 fill(dir, dir + maxNoChars,
'\0');
312 fill(date, date + maxNoChars,
'\0');
317#if not defined(MAIA_WINDOWS)
318 p = getpwuid(getuid());
320 strncpy(user, p->pw_name, maxNoChars - 1);
322 strncpy(user,
"n/a", maxNoChars - 1);
325 strncpy(user,
"windows", maxNoChars - 1);
329 gethostname(host, maxNoChars - 1);
332#if defined(MAIA_WINDOWS)
333 _getcwd(dir, maxNoChars - 1);
335 char* retv = getcwd(dir, maxNoChars - 1);
336 ASSERT(retv !=
nullptr,
"");
345 timeInfo = localtime(&rawTime);
348 strftime(date, maxNoChars,
"%Y-%m-%d %H:%M:%S", timeInfo);
352 const MInt noItems = 4;
353 MChar buffer[noItems * maxNoChars];
354 memcpy(buffer + 0 * maxNoChars, user, maxNoChars);
355 memcpy(buffer + 1 * maxNoChars, host, maxNoChars);
356 memcpy(buffer + 2 * maxNoChars, dir, maxNoChars);
357 memcpy(buffer + 3 * maxNoChars, date, maxNoChars);
364 memcpy(user, buffer + 0 * maxNoChars, maxNoChars);
365 memcpy(host, buffer + 1 * maxNoChars, maxNoChars);
366 memcpy(dir, buffer + 2 * maxNoChars, maxNoChars);
367 memcpy(date, buffer + 3 * maxNoChars, maxNoChars);
370 MString build =
MString(XSTRINGIFY(MAIA_COMPILER_STRING)) +
" " +
MString(XSTRINGIFY(MAIA_BUILD_TYPE_STRING)) +
" ("
371 +
MString(XSTRINGIFY(MAIA_COMPILER_VERSION_STRING)) +
")";
434 dtype_id = H5T_NATIVE_DOUBLE;
438 dtype_id = H5T_NATIVE_INT;
442 dtype_id = H5T_NATIVE_LONG;
446 dtype_id = H5Tcopy(H5T_C_S1);
448 status = H5Tset_size(dtype_id, H5T_VARIABLE);
453 dtype_id = H5T_NATIVE_UCHAR;
457 dtype_id = H5T_NATIVE_ULLONG;
462 TERMM(1,
"Invalid ParallelIo data type!");
467 hid_t dspace_id = H5Screate_simple(noDims, (hsize_t*)(totalCount),
nullptr);
469 hid_t dset_id = H5Dcreate2(
b_h5Id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
473 status = H5Dclose(dset_id);
475 status = H5Sclose(dspace_id);
478 status = H5Tclose(dtype_id);
502 dtype_id = H5T_NATIVE_DOUBLE;
506 dtype_id = H5T_NATIVE_INT;
510 dtype_id = H5T_NATIVE_LONG;
514 dtype_id = H5Tcopy(H5T_C_S1);
516 status = H5Tset_size(dtype_id, H5T_VARIABLE);
521 dtype_id = H5T_NATIVE_UCHAR;
525 dtype_id = H5T_NATIVE_ULLONG;
529 TERMM(1,
"Invalid ParallelIo data type!");
534 hid_t dspace_id = H5Screate(H5S_SCALAR);
536 hid_t dset_id = H5Dcreate2(
b_h5Id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
540 status = H5Dclose(dset_id);
542 status = H5Sclose(dspace_id);
545 status = H5Tclose(dtype_id);
572 MBool varExists =
false;
575 hid_t dset_id = -1, dspace_id = -1;
576 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
578 htri_t linkExists = H5Lexists(
b_h5Id, name.c_str(), link_id);
579 b_error(linkExists, name, AT_);
580 if(linkExists != 0) {
581 dset_id = H5Dopen2(
b_h5Id, name.c_str(), H5P_DEFAULT);
583 dspace_id = H5Dget_space(dset_id);
585 int nDims = H5Sget_simple_extent_ndims(dspace_id);
587 if(nDims == noDimensions || noDimensions == -1) {
594 status = H5Sclose(dspace_id);
596 status = H5Dclose(dset_id);
599 status = H5Pclose(link_id);
621 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
624 MBool exists =
false;
626 MBool checkGroup =
false;
630 herr_t pathExists = H5Lexists(
b_h5Id, path.c_str(), link_id);
631 if(pathExists != 0) {
633 loc_id = H5Oopen(
b_h5Id, path.c_str(), H5P_DEFAULT);
645 herr_t status = H5Lexists(loc_id, name.c_str(), link_id);
651 if(checkGroup) H5Oclose(loc_id);
670 hid_t dset_id = H5Dopen2(
b_h5Id, name.c_str(), H5P_DEFAULT);
672 hid_t dtype_id = H5Dget_type(dset_id);
676 if(H5Tequal(dtype_id, H5T_NATIVE_INT) > 0) {
678 }
else if(H5Tequal(dtype_id, H5T_NATIVE_DOUBLE) > 0) {
680 }
else if(H5Tequal(dtype_id, H5T_NATIVE_LONG) > 0) {
682 }
else if(H5Tget_class(dtype_id) == H5T_STRING || H5Tequal(dtype_id, H5T_C_S1) > 0) {
684 }
else if(H5Tequal(dtype_id, H5T_NATIVE_UCHAR) > 0) {
686 }
else if(H5Tequal(dtype_id, H5T_NATIVE_ULLONG) > 0) {
690 TERMM(1,
"ERROR: Unknown type for dataset " + name +
"!");
694 herr_t status = H5Tclose(dtype_id);
696 status = H5Dclose(dset_id);
719 vector<MString>().swap(names);
725 hid_t group_id = H5Gopen(
b_h5Id,
"/", H5P_DEFAULT);
729 status = H5Gget_num_objs(group_id, &noObj);
731 for(
MInt idx = 0; idx < (signed)noObj; idx++) {
733 int obj_type = H5Gget_objtype_by_idx(group_id, (
size_t)idx);
735 if(obj_type == H5G_DATASET) {
738 char DatasetName[NC_MAX_NAME];
739 status = H5Gget_objname_by_idx(group_id, (hsize_t)idx, DatasetName, NC_MAX_NAME);
741 hid_t dset_id = H5Dopen(
b_h5Id, DatasetName, H5P_DEFAULT);
743 hid_t dspace_id = H5Dget_space(dset_id);
745 int noDims = H5Sget_simple_extent_ndims(dspace_id);
747 if(noDims == dimension || dimension == -1) {
748 names.emplace_back(DatasetName);
751 status = H5Sclose(dspace_id);
753 status = H5Dclose(dset_id);
757 status = H5Gclose(group_id);
779 hid_t dset_id = H5Dopen2(
b_h5Id, name.c_str(), H5P_DEFAULT);
783 hid_t dspace_id = H5Dget_space(dset_id);
785 int noDims = H5Sget_simple_extent_ndims(dspace_id);
789 herr_t status = H5Sclose(dspace_id);
791 status = H5Dclose(dset_id);
813 hid_t dset_id = H5Dopen2(
b_h5Id, name.c_str(), H5P_DEFAULT);
820 hid_t dspace_id = H5Dget_space(dset_id);
825 herr_t status = H5Sget_simple_extent_dims(dspace_id, &dimId[0],
nullptr);
827 hid_t dtype_id = H5Dget_type(dset_id);
831 status = H5Sclose(dspace_id);
833 status = H5Dclose(dset_id);
836 return static_cast<size_type>(dimId[dimensionId]);
856 const size_type totalCount) {
860 mTerm(1, AT_,
"String array attributes not supported.");
865 if(datasetName ==
"") {
866 attribute_id = H5Aopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
867 b_error(attribute_id, name, AT_);
869 attribute_id = H5Aopen_by_name(
b_h5Id, datasetName.c_str(), name.c_str(), H5P_DEFAULT, H5P_DEFAULT);
870 b_error(attribute_id, name, AT_);
872 hid_t attribute_type_id = H5Aget_type(attribute_id);
873 b_error(attribute_type_id, name, AT_);
874 hsize_t length = H5Tget_size(attribute_type_id);
876 hid_t filetype = H5Tcopy(H5T_C_S1);
877 H5Tset_size(filetype, length);
880 herr_t status = H5Aread(attribute_id, filetype, &buf[0]);
884 value[0].append(&buf[0], length);
887 status = H5Tclose(attribute_type_id);
889 status = H5Aclose(attribute_id);
906 const size_type totalCount) {
911 if(datasetName ==
"") {
912 attribute_id = H5Aopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
913 b_error(attribute_id, name, AT_);
915 attribute_id = H5Aopen_by_name(
b_h5Id, datasetName.c_str(), name.c_str(), H5P_DEFAULT, H5P_DEFAULT);
916 b_error(attribute_id, name, AT_);
919 hid_t attribute_type_id = H5Aget_type(attribute_id);
920 b_error(attribute_type_id, name, AT_);
922 hsize_t length = H5Aget_storage_size(attribute_id) / H5Tget_size(attribute_type_id);
923 if(length != (hsize_t)totalCount) {
924 TERMM(1,
"Requested attribute (" + name
925 +
") has different number of elements than given. Use getAttributeCount() to "
926 "query number of elements first");
929 herr_t status = H5Aread(attribute_id, attribute_type_id, value);
933 status = H5Tclose(attribute_type_id);
935 status = H5Aclose(attribute_id);
956 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
960 MBool checkGroup =
false;
964 herr_t pathExists = H5Lexists(
b_h5Id, path.c_str(), link_id);
965 if(pathExists != 0) {
967 loc_id = H5Oopen(
b_h5Id, path.c_str(), H5P_DEFAULT);
979 herr_t status = H5Lexists(loc_id, name.c_str(), link_id);
981 status = H5Aexists(loc_id, name.c_str());
985 if(checkGroup) H5Oclose(loc_id);
992 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
995 MBool exists =
false;
999 herr_t pathExists = H5Lexists(
b_h5Id, path.c_str(), link_id);
1000 if(pathExists != 0) {
1017 if(datasetName.empty()) {
1018 attribute_id = H5Aopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
1019 b_error(attribute_id, name, AT_);
1021 attribute_id = H5Aopen_by_name(
b_h5Id, datasetName.c_str(), name.c_str(), H5P_DEFAULT, H5P_DEFAULT);
1022 b_error(attribute_id, name, AT_);
1025 hid_t attribute_type_id = H5Aget_type(attribute_id);
1026 b_error(attribute_type_id, name, AT_);
1028 hsize_t length = H5Aget_storage_size(attribute_id) / H5Tget_size(attribute_type_id);
1033 herr_t status = H5Tclose(attribute_type_id);
1035 status = H5Aclose(attribute_id);
1064 if(datasetName.empty()) {
1067 varId = H5Dopen2(
b_h5Id, datasetName.c_str(), H5P_DEFAULT);
1072 hid_t attribute_id = H5Aopen(varId, name.c_str(), H5P_DEFAULT);
1073 b_error(attribute_id, name, AT_);
1074 hid_t attrType_id = H5Aget_type(attribute_id);
1075 b_error(attrType_id, name, AT_);
1079 if(H5Tequal(attrType_id, H5T_NATIVE_INT) > 0) {
1081 }
else if(H5Tequal(attrType_id, H5T_NATIVE_DOUBLE) > 0) {
1083 }
else if(H5Tequal(attrType_id, H5T_NATIVE_LONG) > 0) {
1085 }
else if(H5Tequal(attrType_id, H5T_C_S1) > 0) {
1087 }
else if(H5Tequal(attrType_id, H5T_NATIVE_UCHAR) > 0) {
1089 }
else if(H5Tequal(attrType_id, H5T_NATIVE_ULLONG) > 0) {
1098 status = H5Dclose(varId);
1101 status = H5Aclose(attribute_id);
1103 status = H5Tclose(attrType_id);
1128 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
1129 MPI_Offset diskStride) {
1131 TERMM(1,
"untested Hdf5 specific I/O method, please see comment for how"
1140 hid_t dset_id = H5Dopen2(
b_h5Id, name.c_str(), H5P_DEFAULT);
1142 hid_t dspace_id = H5Dget_space(dset_id);
1143 b_error(dspace_id, name, AT_);
1144 hid_t dtype_id = H5Tcopy(H5T_C_S1);
1146 herr_t status = H5Tset_size(dtype_id, H5T_VARIABLE);
1151 for(
size_type d = 0; d < noDims - 1; d++) {
1152 totalCount *= count[d];
1159 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1164 if(memoryStride == 1) {
1167 for(MPI_Offset i = 0; i < totalCount; i++) {
1168 tmpScratch[i] = array[memoryStride * i];
1171 data = &tmpScratch[0];
1179 for(MPI_Offset i = 0; i < totalCount; i++) {
1180 data[i].copy(&buf[i * strLen], strLen, 0);
1183 hid_t memory_dspace_id = -1;
1187 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count,
nullptr);
1188 b_error(memory_dspace_id, name, AT_);
1189 if(diskStride == 1) {
1190 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start,
nullptr, (hsize_t*)count,
nullptr);
1192 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, (hsize_t*)diskStride, (hsize_t*)count,
1200 MPI_Offset chunkSize = count[0] / noChunks;
1201 if(count[0] % noChunks > 0) {
1214 std::copy(start, start + noDims, &start_[0]);
1215 std::copy(count, count + noDims, &count_[0]);
1217 for(
size_type i = 0; i < noChunks; i++) {
1218 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1219 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1220 const char* buf_ = &buf[0] + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1221 memory_dspace_id = H5Screate_simple(noDims, &count_[0],
nullptr);
1222 b_error(memory_dspace_id, name, AT_);
1223 if(diskStride == 1) {
1224 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0],
nullptr, &count_[0],
nullptr);
1226 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], (hsize_t*)diskStride, &count_[0],
nullptr);
1234 status = H5Sclose(memory_dspace_id);
1236 status = H5Sclose(dspace_id);
1238 status = H5Dclose(dset_id);
1240 status = H5Tclose(dtype_id);
1263 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
1264 MPI_Offset diskStride) {
1268 hid_t dset_id = H5Dopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
1270 hid_t dspace_id = H5Dget_space(dset_id);
1271 b_error(dspace_id, name, AT_);
1272 hid_t dtype_id = H5Dget_type(dset_id);
1275 int length = (int)H5Tget_size(dtype_id);
1277 if(H5Tget_cset(dtype_id)) {
1278 dtype_id = H5Tcopy(H5T_C_S1);
1279 H5Tset_cset(dtype_id, H5T_CSET_UTF8);
1281 dtype_id = H5Tcopy(H5T_C_S1);
1286 herr_t status = H5Tset_size(dtype_id, length);
1291 for(
size_type d = 0; d < noDims - 1; d++) {
1292 totalCount *= count[d];
1299 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1302 if(memoryStride == 1) {
1306 data = &tmpScratch[0];
1313 hid_t memory_dspace_id = -1;
1320 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count,
nullptr);
1321 b_error(memory_dspace_id, name, AT_);
1323 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start,
nullptr, (hsize_t*)count,
nullptr);
1330 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, &buf[0]);
1334 for(
size_type i = 0; i < totalCount; i++) {
1336 tmp.append(&buf[i * strLen], strLen);
1337 data[i].append(tmp.c_str(), 0, strLen);
1341 MPI_Offset chunkSize = count[0] / noChunks;
1342 if(count[0] % noChunks > 0) {
1355 std::copy(start, start + noDims, &start_[0]);
1356 std::copy(count, count + noDims, &count_[0]);
1358 for(
size_type i = 0; i < noChunks; i++) {
1359 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1360 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1361 memory_dspace_id = H5Screate_simple(noDims, &count_[0],
nullptr);
1362 b_error(memory_dspace_id, name, AT_);
1364 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0],
nullptr, &count_[0],
nullptr);
1371 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, &buf[0]);
1375 for(
size_type j = start_[0]; j < totalCount; j++) {
1377 tmp.append(&buf[(j - start_[0]) * strLen], strLen);
1378 data[j].append(tmp.c_str(), 0, strLen);
1384 if(memoryStride != 1) {
1385 for(MPI_Offset i = 0; i < totalCount; i++) {
1386 array[memoryStride * i] = tmpScratch[i];
1391 status = H5Sclose(memory_dspace_id);
1393 status = H5Sclose(dspace_id);
1395 status = H5Dclose(dset_id);
1397 status = H5Tclose(dtype_id);
1417 TERMM(1,
"untested Hdf5 specific I/O method, please see comment for how"
1425 hid_t dset_id = H5Dopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
1429 hid_t dtype_id = H5Tcopy(H5T_C_S1);
1431 herr_t status = H5Tset_size(dtype_id, H5T_VARIABLE);
1433 status = H5Dread(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, (
char*)(scalar));
1437 status = H5Dclose(dset_id);
1439 status = H5Tclose(dtype_id);
1462 const size_type totalCount) {
1465 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
1468 const auto tmpCnt = (hsize_t)totalCount;
1469 hid_t dspace_id = (totalCount > 1) ? H5Screate_simple(1, &tmpCnt,
nullptr) : H5Screate(H5S_SCALAR);
1470 b_error(dspace_id, name, AT_);
1472 hid_t attribute_id = -1;
1476 if(!datasetName.empty()) {
1478 status = H5Lexists(
b_h5Id, datasetName.c_str(), link_id);
1480 hid_t group_id = H5Gcreate(
b_h5Id, datasetName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1481 b_error(group_id, datasetName, AT_);
1485 loc_id = H5Oopen(
b_h5Id, datasetName.c_str(), H5P_DEFAULT);
1486 b_error(loc_id, datasetName, AT_);
1493 if(datasetName.empty()) {
1494 status = H5Adelete(loc_id, name.c_str());
1497 status = H5Adelete_by_name(loc_id, datasetName.c_str(), name.c_str(), H5P_DEFAULT);
1498 b_error(status, datasetName, AT_);
1502 attribute_id = H5Acreate(loc_id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT);
1503 b_error(attribute_id, name, AT_);
1506 status = H5Awrite(attribute_id, dtype_id, value);
1510 status = H5Aclose(attribute_id);
1512 status = H5Sclose(dspace_id);
1514 if(!datasetName.empty()) {
1515 status = H5Oclose(loc_id);
1534 hid_t dtype_id,
const size_type totalCount) {
1538 if(totalCount > 1) {
1539 mTerm(1, AT_,
"Array of strings attributes not yet supported.");
1542 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
1546 hid_t dspace_id = H5Screate_simple(1, &dims,
nullptr);
1547 dtype_id = H5Tcopy(H5T_C_S1);
1548 MInt length = strlen(value->c_str());
1549 H5Tset_size(dtype_id, length + 1);
1550 b_error(dspace_id, name, AT_);
1552 hid_t attribute_id = -1;
1556 if(!datasetName.empty()) {
1558 status = H5Lexists(
b_h5Id, datasetName.c_str(), link_id);
1560 hid_t group_id = H5Gcreate(
b_h5Id, datasetName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1561 b_error(group_id, datasetName, AT_);
1565 loc_id = H5Oopen(
b_h5Id, datasetName.c_str(), H5P_DEFAULT);
1566 b_error(loc_id, datasetName, AT_);
1573 if(datasetName.empty()) {
1574 status = H5Adelete(loc_id, name.c_str());
1577 status = H5Adelete_by_name(loc_id, datasetName.c_str(), name.c_str(), H5P_DEFAULT);
1578 b_error(status, datasetName, AT_);
1582 attribute_id = H5Acreate(loc_id, name.c_str(), dtype_id, dspace_id, H5P_DEFAULT, H5P_DEFAULT);
1583 b_error(attribute_id, name, AT_);
1586 status = H5Awrite(attribute_id, dtype_id, value->c_str());
1590 status = H5Aclose(attribute_id);
1592 status = H5Sclose(dspace_id);
1594 status = H5Tclose(dtype_id);
1596 if(!datasetName.empty()) {
1597 status = H5Oclose(loc_id);
1616 const size_type totalCount) {
1620 hid_t dtype_id = H5Tcopy(hdf5_traits<T>::type());
1640 const size_type totalCount) {
1643 if(totalCount > 1)
mTerm(1, AT_,
"Array of strings attributes not yet supported.");
1646 hid_t dtype_id = H5Tcopy(H5T_C_S1);
1648 herr_t status = H5Tset_size(dtype_id, H5T_VARIABLE);
1675 cerr <<
"*** ERROR in parallelio_hdf5 ***" << endl;
1676 cerr <<
"HDF5 error in function " <<
location << endl;
1677 cerr <<
"HDF5 returns status " << status << endl;
1678 cerr <<
"The file/variable/attribute in question was: " << name << endl;
1680 TERMM(1,
"HDF5 error in ParallelIoHdf5.");
1703 cerr <<
"*** HDF5 Warning ***" << endl;
1704 cerr <<
"HDF5 warning in function " <<
location << endl;
1705 cerr <<
"HDF5 returns status " << status << endl;
1706 cerr <<
"The file/variable/attribute in question was: " << name << endl;
1732 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
1733 MPI_Offset diskStride) {
1737 hid_t dset_id = H5Dopen2(
b_h5Id, name.c_str(), H5P_DEFAULT);
1739 hid_t dspace_id = H5Dget_space(dset_id);
1740 b_error(dspace_id, name, AT_);
1741 hid_t dtype_id = H5Dget_type(dset_id);
1747 totalCount *= count[d];
1751 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1756 if(memoryStride == 1) {
1759 for(MPI_Offset i = 0; i < totalCount; i++) {
1760 tmpScratch[i] = array[memoryStride * i];
1763 data = &tmpScratch[0];
1766 hid_t memory_dspace_id;
1771 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count,
nullptr);
1772 b_error(memory_dspace_id, name, AT_);
1773 if(diskStride == 1) {
1774 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start,
nullptr, (hsize_t*)count,
nullptr);
1776 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, (hsize_t*)diskStride, (hsize_t*)count,
1783 TERMM(1,
"untested Hdf5 specific I/O method, please see comment for how"
1792 MPI_Offset chunkSize = count[0] / noChunks;
1793 if(count[0] % noChunks > 0) {
1806 std::copy(start, start + noDims, &start_[0]);
1807 std::copy(count, count + noDims, &count_[0]);
1809 for(
size_type i = 0; i < noChunks; i++) {
1810 start_[0] = std::min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1811 count_[0] = std::max(std::min(chunkSize, count[0] - i * chunkSize), 0ll);
1812 const T* data_ = data + std::min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1813 memory_dspace_id = H5Screate_simple(noDims, &count_[0],
nullptr);
1814 b_error(memory_dspace_id, name, AT_);
1815 if(diskStride == 1) {
1816 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0],
nullptr, &count_[0],
nullptr);
1818 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], (hsize_t*)diskStride, &count_[0],
nullptr);
1827 status = H5Sclose(memory_dspace_id);
1829 status = H5Sclose(dspace_id);
1831 status = H5Tclose(dtype_id);
1833 status = H5Dclose(dset_id);
1852 hid_t dset_id = H5Dopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
1854 hid_t dtype_id = H5Dget_type(dset_id);
1858 herr_t status = H5Dwrite(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &scalar);
1862 status = H5Dclose(dset_id);
1886 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
1887 MPI_Offset diskStride) {
1891 hid_t dset_id = H5Dopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
1893 hid_t dspace_id = H5Dget_space(dset_id);
1894 b_error(dspace_id, name, AT_);
1895 hid_t dtype_id = H5Dget_type(dset_id);
1901 totalCount *= count[d];
1905 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1908 if(memoryStride == 1) {
1912 data = &tmpScratch[0];
1915 hid_t memory_dspace_id;
1920 memory_dspace_id = H5Screate_simple(noDims, (hsize_t*)count,
nullptr);
1921 b_error(memory_dspace_id, name, AT_);
1922 if(diskStride == 1) {
1923 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start,
nullptr, (hsize_t*)count,
nullptr);
1925 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, (hsize_t*)start, (hsize_t*)diskStride, (hsize_t*)count,
1929 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, data);
1932 TERMM(1,
"untested Hdf5 specific I/O method, please see comment for how"
1941 MPI_Offset chunkSize = count[0] / noChunks;
1942 if(count[0] % noChunks > 0) {
1955 std::copy(start, start + noDims, &start_[0]);
1956 std::copy(count, count + noDims, &count_[0]);
1958 for(
size_type i = 0; i < noChunks; i++) {
1959 start_[0] = std::min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1960 count_[0] = std::max(std::min(chunkSize, count[0] - i * chunkSize), 0ll);
1961 T* data_ = data + std::min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1962 memory_dspace_id = H5Screate_simple(noDims, &count_[0],
nullptr);
1963 b_error(memory_dspace_id, name, AT_);
1964 if(diskStride == 1) {
1965 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0],
nullptr, &count_[0],
nullptr);
1967 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &start_[0], (hsize_t*)diskStride, &count_[0],
nullptr);
1970 status = H5Dread(dset_id, dtype_id, memory_dspace_id, dspace_id, H5P_DEFAULT, data_);
1976 if(memoryStride != 1) {
1977 for(MPI_Offset i = 0; i < totalCount; i++) {
1978 array[memoryStride * i] = tmpScratch[i];
1983 status = H5Sclose(memory_dspace_id);
1985 status = H5Sclose(dspace_id);
1987 status = H5Tclose(dtype_id);
1989 status = H5Dclose(dset_id);
2010 hid_t dset_id = H5Dopen(
b_h5Id, name.c_str(), H5P_DEFAULT);
2012 hid_t dtype_id = H5Dget_type(dset_id);
2016 herr_t status = H5Dread(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, scalar);
2020 status = H5Dclose(dset_id);
2041 const size_type* start,
const size_type* count) {
2046 MInt checkLoc_id = 0;
2050 loc_id = H5Oopen(
b_h5Id, path.c_str(), H5P_DEFAULT);
2055 hid_t dset_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2058 hid_t fspace_id = H5Dget_space(dset_id);
2059 b_error(fspace_id, name, AT_);
2061 hid_t dtype_id = H5Dget_type(dset_id);
2068 std::copy(start, start + noDims, &start_[0]);
2069 std::copy(count, count + noDims, &count_[0]);
2072 status = H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, &start_[0], NULL, &count_[0], NULL);
2074 hid_t dspace_id = H5Screate_simple(noDims, &count_[0], NULL);
2075 b_error(dspace_id, name, AT_);
2076 status = H5Dread(dset_id, dtype_id, dspace_id, fspace_id, H5P_DEFAULT, array);
2081 status = H5Oclose(loc_id);
2084 status = H5Dclose(dset_id);
2086 status = H5Sclose(fspace_id);
2088 status = H5Sclose(dspace_id);
2112 const size_type* start,
const size_type* count,
const size_type* ghost) {
2115 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2119 MInt checkLoc_id = 0;
2124 loc_id = H5Oopen(
b_h5Id, path.c_str(), H5P_DEFAULT);
2130 status = H5Lexists(loc_id, name.c_str(), link_id);
2133 hid_t dset_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2136 hid_t dtype_id = H5Dget_type(dset_id);
2145 count_[i] = count[i] + (2 * ghost[i]);
2147 std::copy(start, start + noDims, &start_[0]);
2148 std::copy(ghost, ghost + noDims, &ghost_[0]);
2150 hid_t dspace_id = H5Screate_simple(noDims, &count_[0],
nullptr);
2151 b_error(dspace_id, name, AT_);
2153 std::copy(count, count + noDims, &count_[0]);
2154 status = H5Sselect_hyperslab(dspace_id, H5S_SELECT_SET, &ghost_[0],
nullptr, &count_[0],
nullptr);
2157 hid_t fspace_id = H5Dget_space(dset_id);
2158 b_error(fspace_id, name, AT_);
2160 status = H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, &start_[0],
nullptr, &count_[0],
nullptr);
2164#if !defined(HOST_Hawk)
2169 status = H5Pclose(link_id);
2172 status = H5Oclose(loc_id);
2175 status = H5Dclose(dset_id);
2177 status = H5Sclose(dspace_id);
2179 status = H5Sclose(fspace_id);
2200 const size_type noDims,
const size_type* count) {
2206 dtype_id = H5T_NATIVE_DOUBLE;
2210 dtype_id = H5T_NATIVE_INT;
2214 dtype_id = H5T_NATIVE_LONG;
2218 TERMM(1,
"Not yet implemented");
2222 dtype_id = H5T_NATIVE_UCHAR;
2226 dtype_id = H5T_NATIVE_ULLONG;
2231 TERMM(1,
"Invalid ParallelIo data type!");
2236 std::copy(count, count + noDims, &datcount[0]);
2238 hid_t datspace = H5Screate_simple(noDims, &datcount[0], NULL);
2240 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2245 MInt checkLoc_id = 0;
2251 status = H5Lexists(
b_h5Id, path.c_str(), link_id);
2254 loc_id = H5Gcreate(
b_h5Id, path.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2255 loc_id = H5Gclose(loc_id);
2258 loc_id = H5Oopen(
b_h5Id, path.c_str(), H5P_DEFAULT);
2264 status = H5Lexists(loc_id, name.c_str(), link_id);
2267 hid_t dset_id = H5Dcreate2(loc_id, name.c_str(), dtype_id, datspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2270 status = H5Dclose(dset_id);
2275 status = H5Oclose(loc_id);
2279 status = H5Pclose(link_id);
2281 status = H5Sclose(datspace);
2303 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2307 MInt checkLoc_id = 0;
2311 loc_id = H5Oopen(
b_h5Id, path.c_str(), H5P_DEFAULT);
2317 herr_t status = H5Lexists(loc_id, name.c_str(), link_id);
2319 m_log <<
"WARNING: Datatset " << name <<
" does not exist at path " << path << std::endl;
2321 hid_t data_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2323 hid_t filespace = H5Dget_space(data_id);
2326 H5Sget_simple_extent_dims(filespace, &count[0], &maxcount[0]);
2331 H5Sclose(filespace);
2335 if(checkLoc_id) H5Oclose(loc_id);
2355 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2359 MBool checkLoc_id =
false;
2363 loc_id = H5Oopen(
b_h5Id, path.c_str(), H5P_DEFAULT);
2368 hid_t data_id = H5Oopen(loc_id, name.c_str(), H5P_DEFAULT);
2372 hid_t dspace_id = H5Dget_space(data_id);
2373 b_error(dspace_id, name, AT_);
2375 size_type noDims = H5Sget_simple_extent_ndims(dspace_id);
2378 herr_t status = H5Sclose(dspace_id);
2381 status = H5Oclose(data_id);
2384 status = H5Pclose(link_id);
2388 status = H5Oclose(loc_id);
2410 hid_t link_id = H5Pcreate(H5P_LINK_ACCESS);
2414 std::vector<MString> p;
2420 MInt num = distance(p.begin(), p.end()) - 1;
2422 for(
MInt i = 0; i < num; ++i) {
2428 objectpath.append(
"/");
2429 parentpath.append(p[i - 1]);
2430 parentpath.append(
"/");
2432 objectpath.append(p[i]);
2433 herr_t status = H5Lexists(
b_h5Id, objectpath.c_str(), link_id);
2438 hid_t group_id = H5Gcreate(
b_h5Id, p[i].c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2440 status = H5Gclose(group_id);
2443 hid_t group_id = H5Gopen(
b_h5Id, parentpath.c_str(), H5P_DEFAULT);
2445 hid_t new_group_id = H5Gcreate(group_id, p[i].c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2446 b_error(new_group_id, path, AT_);
2449 status = H5Gclose(new_group_id);
2451 status = H5Gclose(group_id);
2457 herr_t status = H5Pclose(link_id);
2484 hid_t group_id = H5Gopen(
b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2489 herr_t status = H5Gget_num_objs(group_id, &noObj);
2490 for(
MInt idx = 0; idx < (signed)noObj; idx++) {
2491 int obj_type = H5Gget_objtype_by_idx(group_id, (
size_t)idx);
2493 if(obj_type == H5G_GROUP) {
2494 char GroupName[NC_MAX_NAME];
2495 status = H5Gget_objname_by_idx(group_id, (hsize_t)idx, GroupName, NC_MAX_NAME);
2497 names.emplace_back(GroupName);
2501 status = H5Gclose(group_id);
2524 hid_t group_id = H5Gopen(
b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2529 ParallelIo::size_type noGroups = 0;
2531 herr_t status = H5Gget_num_objs(group_id, &noObj);
2533 for(
MInt idx = 0; idx < (signed)noObj; idx++) {
2534 int obj_type = H5Gget_objtype_by_idx(group_id, (
size_t)idx);
2536 if(obj_type == H5G_GROUP) {
2541 status = H5Gclose(group_id);
2570 hid_t group_id = H5Gopen(
b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2575 herr_t status = H5Gget_num_objs(group_id, &noObj);
2576 for(
MInt idx = 0; idx < (signed)noObj; idx++) {
2577 int obj_type = H5Gget_objtype_by_idx(group_id, (
size_t)idx);
2579 if(obj_type == H5G_DATASET) {
2580 char DatasetName[NC_MAX_NAME];
2581 status = H5Gget_objname_by_idx(group_id, (hsize_t)idx, DatasetName, NC_MAX_NAME);
2583 names.emplace_back(DatasetName);
2587 status = H5Gclose(group_id);
2610 hid_t group_id = H5Gopen(
b_h5Id, pathStr.c_str(), H5P_DEFAULT);
2615 ParallelIo::size_type noDatasets = 0;
2617 herr_t status = H5Gget_num_objs(group_id, &noObj);
2619 for(
MInt idx = 0; idx < (signed)noObj; idx++) {
2620 int obj_type = H5Gget_objtype_by_idx(group_id, (
size_t)idx);
2622 if(obj_type == H5G_DATASET) {
2627 status = H5Gclose(group_id);
2635 const size_type totalCount);
2637 const size_type totalCount);
2639 const size_type totalCount);
2641 const size_type totalCount);
2643 const size_type totalCount);
2647 const size_type totalCount);
2649 const size_type totalCount);
2651 const size_type totalCount);
2653 const size_type totalCount);
2655 const size_type totalCount);
2657 const size_type totalCount);
2660 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2661 const size_type noChunks, MPI_Offset diskStride);
2663 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2664 const size_type noChunks, MPI_Offset diskStride);
2666 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2667 const size_type noChunks, MPI_Offset diskStride);
2669 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2670 const size_type noChunks, MPI_Offset diskStride);
2672 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2673 const size_type noChunks, MPI_Offset diskStride);
2676 const size_type noDims,
const size_type* start,
const size_type* count,
2677 const size_type* ghost);
2679 const size_type noDims,
const size_type* start,
const size_type* count,
2680 const size_type* ghost);
2684 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
2685 MPI_Offset diskStride);
2687 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
2688 MPI_Offset diskStride);
2690 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
2691 MPI_Offset diskStride);
2693 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
2694 MPI_Offset diskStride);
2696 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
2697 MPI_Offset diskStride);
2700 const size_type* start,
const size_type* count);
2702 const size_type* start,
const size_type* count);
This class is intended to do all the heavy lifting when it comes to reading and writing "big data" fi...
std::set< MString > m_unwrittenScalars
MLong size_type
Type used for all size- and offset-related values.
std::set< MString > m_unwrittenArrays
void setAttribute(const T &value, const MString &name, const MString &datasetName="")
Set a file or dataset attribute. [MPI]
MBool b_hasObject(const MString &path)
void b_createAttribute(const T *value, const MString &name, const MString &datasetName, hid_t type_id, const size_type totalCount)
Creates an attribute in the file (generic version).
void b_defineScalar(maiabd_type type, const MString &name)
Defines a scalar in the file. [MPI]
void b_saveHeader()
Adds all additional header information that are needed in the file.
void b_readArray(T *, const MString &, const size_type, MPI_Offset *, MPI_Offset *, MPI_Offset, const size_type, MPI_Offset)
Read array data from file (float version). [MPI]
void b_writeScalar(T scalar, const MString &name)
Writes scalar data to file (Hdf5 version). [MPI]
hid_t b_h5DatasetXferHandle
void b_writeArray(const T *array, const MString &name, const size_type noDims, MPI_Offset *start, MPI_Offset *count, MPI_Offset memoryStride, const size_type noChunks, MPI_Offset diskStride)
Writes array data to file (Hdf5 version). [MPI]
void b_addAdditionalHeader()
Write additional headers to file (e.g. grid file name, creation date etc.). [MPI]
void b_getAttribute(T *value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Retrieve an attribute from file (float version).
static void b_error(MInt status, const MString &name, const MString &location)
Check the status code of a HDF5 operation and output a meaningful message.
MBool b_hasAttribute(const MString &name, const MString &path="")
Check if object exists. [MPI]
size_type b_getDatasetNoDims(const MString &name)
Get number of dimensions of a dataset with the given name.
MInt b_getDatasetType(const MString &name)
Returns the data type of a dataset in the file (can be array, multi-D array or scalar).
void b_readScalar(T *scalar, const MString &name)
Read scalar data from file (float version). [MPI]
MInt b_getAttributeType(const MString &name, const MString &datasetName="")
Returns the data type of an attribute in the file.
void b_defineArray(maiabd_type type, const MString &name, size_type noDims, size_type *totalCount)
Defines an array in the file. [MPI]
~ParallelIoHdf5() override
Close open identifiers and release memory. [MPI]
void b_writeAdditionalData()
Write additional data to file. [MPI]
size_type b_getNoDatasets(const MString &path)
Gets the number of datasetes in the given group [MPI]
void b_setAttribute(const T *value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Set an attribute in the file (generic version).
ParallelIoHdf5(const MString &fileName, MInt fileMode, const MPI_Comm &mpiComm)
Creates a new object to read and write big data files. [MPI]
static void b_warning(MInt status, const MString &name, const MString &location)
Check the status code of a HDF5 operation and output a meaningful message (Warning)
void b_getGroupNames(std::vector< MString > &names, const MString &path)
Gets the groups in the given group [MPI]
void b_getDatasetNames(std::vector< MString > &names, const size_type dimension)
Returns a vector of all existing datasets with the given number of dimensions in the file (if any)....
static const MString b_fileExt()
Returns backend-specific ending of filename (either ".Netcdf" or ".Hdf5")
static MBool b_isValidFile(const MString &name, const MPI_Comm &mpiComm)
Check if specified file is a valid HDF5 file (i.e. can be opened). [MPI]
void b_getAttributeCount(const MString &name, size_type *totalCount, const MString &datasetName="")
void b_createGroup(const MString &path)
Creates an HDF5 group [MPI]
size_type b_getDatasetSize(const MString &name, const size_type dimensionId=0)
Get the length of one dimension of an arbitrary array in the file.
MBool b_hasDataset(const MString &name, const size_type dimension)
Check if the file contains a dataset with the given name and number of dimensions.
size_type b_getNoGroups(const MString &path)
Gets the number of groups in the given group [MPI]
This class is a ScratchSpace.
void mTerm(const MInt errorCode, const MString &location, const MString &message)
void tokenize(const std::string &str, ContainerT &tokens, const std::string &delimiters=" ", MBool trimEmpty=false)
std::basic_string< char > MString
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, const MString &name, const MString &varname)
same as MPI_Bcast
const MInt PIO_UNKNOWN_TYPE
const MInt PIO_CREATE
< File mode to create a new file. Aborts if file already exists
Namespace for auxiliary functions/classes.