22#if not defined(MAIA_MS_COMPILER)
28#include <sys/socket.h>
33using namespace parallel_io;
37#if(MAIA_NCMPI_FILE_TYPE != NC_64BIT_OFFSET) && (MAIA_NCMPI_FILE_TYPE != NC_64BIT_DATA)
38#error Bad value for MAIA_NCMPI_FILE_TYPE.
48template <
class DataType>
49struct pnetcdf_traits {};
53struct pnetcdf_traits<
MFloat> {
55 static nc_type type() {
return NC_DOUBLE; }
58 static int ncmpi_put_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
60 return ncmpi_put_vara_double_all(ncid, varid, start, count, buf);
64 static int ncmpi_put_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
65 const MPI_Offset stride[],
const MFloat* buf) {
66 return ncmpi_put_vars_double_all(ncid, varid, start, count, stride, buf);
70 static int ncmpi_get_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
72 return ncmpi_get_vara_double_all(ncid, varid, start, count, buf);
76 static int ncmpi_get_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
77 const MPI_Offset stride[],
MFloat* buf) {
78 return ncmpi_get_vars_double_all(ncid, varid, start, count, stride, buf);
82 static int ncmpi_put_att_type(
int ncid,
int varid,
const char* name, nc_type xtype,
const MPI_Offset nelems,
84 return ncmpi_put_att_double(ncid, varid, name, xtype, nelems, buf);
88 static int ncmpi_get_att_type(
int ncid,
int varid,
const char* name,
MFloat* buf) {
89 return ncmpi_get_att_double(ncid, varid, name, buf);
95struct pnetcdf_traits<
MInt> {
97 static nc_type type() {
return NC_INT; }
100 static int ncmpi_put_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
102 return ncmpi_put_vara_int_all(ncid, varid, start, count, buf);
106 static int ncmpi_put_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
107 const MPI_Offset stride[],
const MInt* buf) {
108 return ncmpi_put_vars_int_all(ncid, varid, start, count, stride, buf);
112 static int ncmpi_get_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
114 return ncmpi_get_vara_int_all(ncid, varid, start, count, buf);
118 static int ncmpi_get_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
119 const MPI_Offset stride[],
MInt* buf) {
120 return ncmpi_get_vars_int_all(ncid, varid, start, count, stride, buf);
124 static int ncmpi_put_att_type(
int ncid,
int varid,
const char* name, nc_type xtype, MPI_Offset nelems,
126 return ncmpi_put_att_int(ncid, varid, name, xtype, nelems, buf);
130 static int ncmpi_get_att_type(
int ncid,
int varid,
const char* name,
MInt* buf) {
131 return ncmpi_get_att_int(ncid, varid, name, buf);
137struct pnetcdf_traits<
MLong> {
139 static nc_type type() {
return NC_INT64; }
142 static int ncmpi_put_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
144 return ncmpi_put_vara_long_all(ncid, varid, start, count, buf);
148 static int ncmpi_put_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
149 const MPI_Offset stride[],
const MLong* buf) {
150 return ncmpi_put_vars_long_all(ncid, varid, start, count, stride, buf);
154 static int ncmpi_get_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
156 return ncmpi_get_vara_long_all(ncid, varid, start, count, buf);
160 static int ncmpi_get_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
161 const MPI_Offset stride[],
MLong* buf) {
162 return ncmpi_get_vars_long_all(ncid, varid, start, count, stride, buf);
166 static int ncmpi_put_att_type(
int ncid,
int varid,
const char* name, nc_type xtype, MPI_Offset nelems,
168 return ncmpi_put_att_long(ncid, varid, name, xtype, nelems, buf);
172 static int ncmpi_get_att_type(
int ncid,
int varid,
const char* name,
MLong* buf) {
173 return ncmpi_get_att_long(ncid, varid, name, buf);
179struct pnetcdf_traits<
MChar> {
181 static nc_type type() {
return NC_CHAR; }
184 static int ncmpi_put_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
186 return ncmpi_put_vara_text_all(ncid, varid, start, count, buf);
190 static int ncmpi_put_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
191 const MPI_Offset stride[],
const MChar* buf) {
192 return ncmpi_put_vars_text_all(ncid, varid, start, count, stride, buf);
196 static int ncmpi_get_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
198 return ncmpi_get_vara_text_all(ncid, varid, start, count, buf);
202 static int ncmpi_get_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
203 const MPI_Offset stride[],
MChar* buf) {
204 return ncmpi_get_vars_text_all(ncid, varid, start, count, stride, buf);
208 static int ncmpi_put_att_type(
int ncid,
int varid,
const char* name, nc_type NotUsed(xtype), MPI_Offset nelems,
211 return ncmpi_put_att_text(ncid, varid, name, nelems, buf);
215 static int ncmpi_get_att_type(
int ncid,
int varid,
const char* name,
MChar* buf) {
216 return ncmpi_get_att_text(ncid, varid, name, buf);
222struct pnetcdf_traits<
MUchar> {
224 static nc_type type() {
return NC_UBYTE; }
227 static int ncmpi_put_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
229 return ncmpi_put_vara_uchar_all(ncid, varid, start, count, buf);
233 static int ncmpi_put_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
234 const MPI_Offset stride[],
const MUchar* buf) {
235 return ncmpi_put_vars_uchar_all(ncid, varid, start, count, stride, buf);
239 static int ncmpi_get_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
241 return ncmpi_get_vara_uchar_all(ncid, varid, start, count, buf);
245 static int ncmpi_get_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
246 const MPI_Offset stride[],
MUchar* buf) {
247 return ncmpi_get_vars_uchar_all(ncid, varid, start, count, stride, buf);
251 static int ncmpi_put_att_type(
int ncid,
int varid,
const char* name, nc_type xtype, MPI_Offset nelems,
253 return ncmpi_put_att_uchar(ncid, varid, name, xtype, nelems, buf);
257 static int ncmpi_get_att_type(
int ncid,
int varid,
const char* name,
MUchar* buf) {
258 return ncmpi_get_att_uchar(ncid, varid, name, buf);
264struct pnetcdf_traits<
MUlong> {
266 static nc_type type() {
return NC_UINT64; }
269 static int ncmpi_put_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
271 return ncmpi_put_vara_ulonglong_all(ncid, varid, start, count, (
const long long unsigned int*)(buf));
275 static int ncmpi_put_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
276 const MPI_Offset stride[],
const MUlong* buf) {
277 return ncmpi_put_vars_ulonglong_all(ncid, varid, start, count, stride, (
const long long unsigned int*)(buf));
281 static int ncmpi_get_vara_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
283 return ncmpi_get_vara_ulonglong_all(ncid, varid, start, count, (
long long unsigned int*)(buf));
287 static int ncmpi_get_vars_type_all(
int ncid,
int varid,
const MPI_Offset start[],
const MPI_Offset count[],
288 const MPI_Offset stride[],
MUlong* buf) {
289 return ncmpi_get_vars_ulonglong_all(ncid, varid, start, count, stride, (
long long unsigned int*)(buf));
293 static int ncmpi_put_att_type(
int ncid,
int varid,
const char* name, nc_type xtype, MPI_Offset nelems,
295 return ncmpi_put_att_ulonglong(ncid, varid, name, xtype, nelems, (
const long long unsigned int*)(buf));
299 static int ncmpi_get_att_type(
int ncid,
int varid,
const char* name,
MUlong* buf) {
300 return ncmpi_get_att_ulonglong(ncid, varid, name, (
long long unsigned int*)(buf));
328 status = ncmpi_open(mpiComm, name.c_str(), NC_NOWRITE,
globalMpiInfo(), &fileId);
330 if(status == NC_NOERR) {
332 status = ncmpi_close(fileId);
406 status = ncmpi_inq_ndims(
b_ncId, &noDims);
408 for(
int dimId = 0; dimId < noDims; dimId++) {
409 MPI_Offset dimLength;
410 status = ncmpi_inq_dimlen(
b_ncId, dimId, &dimLength);
433 mTerm(1, AT_,
"Unsupported file mode.");
437#ifdef MAIA_NCMPI_PRINT_FILE_HINTS
439 std::cerr << std::endl <<
"Created/replaced/opened file: " <<
m_fileName << std::endl;
467#ifdef MAIA_NCMPI_PRINT_FILE_HINTS
469 std::cerr << std::endl <<
"Closing file: " <<
m_fileName << std::endl;
479#ifdef MAIA_EXTRA_DEBUG
481 cerr <<
"Warning: array '" << *it <<
"' in file '" <<
m_fileName <<
"' was defined but never written. "
482 <<
"Make sure that this is the intended behavior." << endl;
485 cerr <<
"Warning: scalar '" << *it <<
"' in file '" <<
m_fileName <<
"' was defined but never written. "
486 <<
"Make sure that this is the intended behavior." << endl;
551 const size_type totalCount);
568 const MInt maxNoChars = 256;
572 MChar user[maxNoChars];
573 MChar host[maxNoChars];
574 MChar dir[maxNoChars];
575 MChar exec[maxNoChars];
576 MChar date[maxNoChars];
579 fill(user, user + maxNoChars,
'\0');
580 fill(host, host + maxNoChars,
'\0');
581 fill(dir, dir + maxNoChars,
'\0');
582 fill(exec, exec + maxNoChars,
'\0');
583 fill(date, date + maxNoChars,
'\0');
588 p = getpwuid(getuid());
590 strncpy(user, p->pw_name, maxNoChars - 1);
592 strncpy(user,
"n/a", maxNoChars - 1);
596 gethostname(host, maxNoChars - 1);
597 strcpy(&host[strlen(host)],
" (");
598 strcpy(&host[strlen(host)], XSTRINGIFY(MAIA_HOST_STRING));
599 strcpy(&host[strlen(host)],
")");
602#ifdef MAIA_GCC_COMPILER
603#pragma GCC diagnostic push
604#pragma GCC diagnostic ignored "-Wunused-result"
607 getcwd(dir, maxNoChars - 1);
609 readlink(
"/proc/self/exe", exec, maxNoChars - 1);
611#ifdef MAIA_GCC_COMPILER
612#pragma GCC diagnostic pop
621 timeInfo = localtime(&rawTime);
624 strftime(date, maxNoChars,
"%Y-%m-%d %H:%M:%S", timeInfo);
628 const MInt noItems = 5;
629 MChar buffer[noItems * maxNoChars];
630 memcpy(buffer + 0 * maxNoChars, user, maxNoChars);
631 memcpy(buffer + 1 * maxNoChars, host, maxNoChars);
632 memcpy(buffer + 2 * maxNoChars, dir, maxNoChars);
633 memcpy(buffer + 3 * maxNoChars, exec, maxNoChars);
634 memcpy(buffer + 4 * maxNoChars, date, maxNoChars);
641 memcpy(user, buffer + 0 * maxNoChars, maxNoChars);
642 memcpy(host, buffer + 1 * maxNoChars, maxNoChars);
643 memcpy(dir, buffer + 2 * maxNoChars, maxNoChars);
644 memcpy(exec, buffer + 3 * maxNoChars, maxNoChars);
645 memcpy(date, buffer + 4 * maxNoChars, maxNoChars);
647 MString build =
MString(XSTRINGIFY(MAIA_COMPILER_STRING)) +
" " +
MString(XSTRINGIFY(MAIA_BUILD_TYPE_STRING)) +
" ("
648 +
MString(XSTRINGIFY(MAIA_COMPILER_VERSION_STRING)) +
")";
690 const size_type noDims,
const size_type* totalCount) {
697 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
723 MFloat fillValueFloat = MFloatNaN;
724 MInt fillValueInt = std::numeric_limits<MInt>::max();
725 MLong fillValueLong = std::numeric_limits<MLong>::max();
726 MUlong fillValueUlong = std::numeric_limits<MUlong>::max();
727 MUchar fillValueChar = std::numeric_limits<MChar>::max();
728 MUchar fillValueUchar = std::numeric_limits<MUchar>::max();
732 [[maybe_unused]]
void* fillValue =
nullptr;
734 dataType = NC_DOUBLE;
735 fillValue = &fillValueFloat;
738 fillValue = &fillValueInt;
741 fillValue = &fillValueLong;
744 fillValue = &fillValueChar;
747 fillValue = &fillValueUchar;
749 dataType = NC_UINT64;
750 fillValue = &fillValueUlong;
752 TERMM(1,
"Invalid ParallelIo data type!");
757 MInt dimId, status, varId;
759 for(
size_type dId = 0; dId < noDims; dId++) {
765 MInt maxUsedDimensionNo = -1;
767 maxUsedDimensionNo = max(maxUsedDimensionNo, it->second.dimNo);
771 const MInt dimNo = maxUsedDimensionNo + 1;
772 const MString dimensionName =
"dim" + to_string(dimNo);
775 status = ncmpi_def_dim(
b_ncId, dimensionName.c_str(), totalCount[dId], &dimId);
776 b_error(status, dimensionName, AT_);
787 status = ncmpi_def_var(
b_ncId, name.c_str(), dataType, noDims, &dimIds[0], &varId);
790#if MAIA_NCMPI_FILL_VARIABLES
792 status = ncmpi_def_var_fill(
b_ncId, varId, 0, fillValue);
815 MFloat fillValueFloat = MFloatNaN;
816 MInt fillValueInt = std::numeric_limits<MInt>::max();
817 MLong fillValueLong = std::numeric_limits<MLong>::max();
818 MUlong fillValueUlong = std::numeric_limits<MUlong>::max();
819 MUchar fillValueChar = std::numeric_limits<MChar>::max();
820 MUchar fillValueUchar = std::numeric_limits<MUchar>::max();
824 [[maybe_unused]]
void* fillValue =
nullptr;
826 dataType = NC_DOUBLE;
827 fillValue = &fillValueFloat;
830 fillValue = &fillValueInt;
833 fillValue = &fillValueLong;
836 fillValue = &fillValueChar;
839 fillValue = &fillValueUchar;
841 dataType = NC_UINT64;
842 fillValue = &fillValueUlong;
844 TERMM(1,
"Invalid ParallelIo data type!");
849 MInt status = ncmpi_def_var(
b_ncId, name.c_str(), dataType, 0,
nullptr, &varId);
852#if MAIA_NCMPI_FILL_VARIABLES
854 status = ncmpi_def_var_fill(
b_ncId, varId, 0, fillValue);
883 MInt status = ncmpi_inq_nvars(
b_ncId, &noVars);
889 MBool varExists =
false;
890 for(
MInt i = 0; i < noVars; i++) {
891 MChar varname[NC_MAX_NAME + 1];
892 status = ncmpi_inq_varname(
b_ncId, i, varname);
896 status = ncmpi_inq_varndims(
b_ncId, i, &nDims);
899 if(name == varname && (nDims == noDimensions || noDimensions == -1)) {
921 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
930 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
938 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
946 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
957 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
977 MInt status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
982 status = ncmpi_inq_vartype(
b_ncId, varId, &ncType);
987 if(ncType == NC_INT) {
989 }
else if(ncType == NC_DOUBLE) {
991 }
else if(ncType == NC_INT64) {
993 }
else if(ncType == NC_CHAR) {
995 }
else if(ncType == NC_UBYTE) {
997 }
else if(ncType == NC_UINT64) {
1024 vector<MString>().swap(names);
1030 status = ncmpi_inq_nvars(
b_ncId, &noVars);
1036 for(
MInt i = 0; i < noVars; i++) {
1038 status = ncmpi_inq_varndims(
b_ncId, i, &noDimensions);
1041 if(noDimensions == dimension || dimension == -1) {
1044 MChar varname[NC_MAX_NAME + 1];
1045 status = ncmpi_inq_varname(
b_ncId, i, varname);
1047 names.emplace_back(varname);
1071 MInt status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1076 status = ncmpi_inq_varndims(
b_ncId, varId, &noDims);
1099 MInt status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1107 status = ncmpi_inq_vardimid(
b_ncId, varId, &dimId[0]);
1111 MPI_Offset arraySize;
1112 status = ncmpi_inq_dimlen(
b_ncId, dimId[dimensionId], &arraySize);
1115 return static_cast<size_type>(arraySize);
1136 if(datasetName.empty()) {
1139 MInt status = ncmpi_inq_varid(
b_ncId, datasetName.c_str(), &varId);
1140 b_error(status, datasetName, AT_);
1145 MInt status = ncmpi_inq_attid(
b_ncId, varId, name.c_str(), &attId);
1148 MBool attributeExists;
1149 if(status == NC_NOERR) {
1150 attributeExists =
true;
1151 }
else if(status == NC_ENOTATT) {
1152 attributeExists =
false;
1154 attributeExists =
false;
1158 return attributeExists;
1179 if(datasetName.empty()) {
1182 MInt status = ncmpi_inq_varid(
b_ncId, datasetName.c_str(), &varId);
1183 b_error(status, datasetName, AT_);
1188 MInt status = ncmpi_inq_atttype(
b_ncId, varId, name.c_str(), &ncType);
1193 if(ncType == NC_INT) {
1195 }
else if(ncType == NC_DOUBLE) {
1197 }
else if(ncType == NC_INT64) {
1199 }
else if(ncType == NC_CHAR) {
1201 }
else if(ncType == NC_UBYTE) {
1203 }
else if(ncType == NC_UINT64) {
1221 char value[MPI_MAX_INFO_VAL];
1222 MInt status, len, flag;
1223 MPI_Offset header_size, header_extent;
1224 MPI_Offset h_align = -1, v_align = -1, h_chunk = -1;
1228 status = ncmpi_inq_header_size(
b_ncId, &header_size);
1232 status = ncmpi_inq_header_extent(
b_ncId, &header_extent);
1236 status = ncmpi_inq_file_info(
b_ncId, &info_used);
1242 MPI_Info_get(info_used,
"nc_header_align_size", len + 1, value, &flag, AT_);
1243 h_align = strtoll(value,
nullptr, 10);
1249 MPI_Info_get(info_used,
"nc_var_align_size", len + 1, value, &flag, AT_);
1250 v_align = strtoll(value,
nullptr, 10);
1256 MPI_Info_get(info_used,
"nc_header_read_chunk_size", len + 1, value, &flag, AT_);
1257 h_chunk = strtoll(value,
nullptr, 10);
1263 std::cerr <<
"##### PNetCDF file hints #####" << std::endl;
1266 std::cerr <<
"nc_header_align_size is NOT set" << std::endl;
1268 std::cerr <<
"nc_header_align_size set to = " << h_align << std::endl;
1272 std::cerr <<
"nc_var_align_size is NOT set" << std::endl;
1274 std::cerr <<
"nc_var_align_size set to = " << v_align << std::endl;
1277 std::cerr <<
"nc_header_read_chunk_size is NOT set" << std::endl;
1279 std::cerr <<
"nc_header_read_chunk_size set to = " << h_chunk << std::endl;
1282 std::cerr <<
"header size = " << header_size << std::endl;
1283 std::cerr <<
"header extent = " << header_extent << std::endl;
1287 const MPI_Offset header_free = header_extent - header_size;
1288 std::cerr <<
"header free space (append mode) = " << header_free << std::endl;
1289 if(header_free < 1024) {
1290 std::cerr <<
"WARNING: ParallelIoPNetcdf file in append mode has less than 1KB of free header "
1291 "space. If this space is used up by adding new data to the header the entire data "
1292 "file has to be moved to make room for the new definitions which may cause MPI I/O "
1298 std::cerr <<
"##### PNetCDF file hints #####" << std::endl;
1304 const size_type* start,
const size_type* count,
const size_type* ghost) {
1313 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
1337 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
1338 const size_type noChunks, MPI_Offset diskStride) {
1345 MInt status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1351 totalCount *= count[d];
1355 MInt tmpScratchSize = (memoryStride == 1) ? 1 : totalCount;
1360 if(memoryStride == 1) {
1363 for(MPI_Offset i = 0; i < totalCount; i++) {
1364 tmpScratch[i] = array[memoryStride * i];
1366 data = tmpScratch.
data();
1381 if(diskStride == 1) {
1382 status = pnetcdf_traits<T>::ncmpi_put_vara_type_all(
b_ncId, varId, start, count, data);
1384 status = pnetcdf_traits<T>::ncmpi_put_vars_type_all(
b_ncId, varId, start, count, &diskStride, data);
1389 MPI_Offset chunkSize = count[0] / noChunks;
1390 if(count[0] % noChunks > 0) {
1403 std::copy(start, start + noDims, &start_[0]);
1404 std::copy(count, count + noDims, &count_[0]);
1406 for(
size_type i = 0; i < noChunks; i++) {
1407 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1408 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1409 const T* data_ = data + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1410 if(diskStride == 1) {
1411 status = pnetcdf_traits<T>::ncmpi_put_vara_type_all(
b_ncId, varId, &start_[0], &count_[0], data_);
1413 status = pnetcdf_traits<T>::ncmpi_put_vars_type_all(
b_ncId, varId, &start_[0], &count_[0], &diskStride, data_);
1437 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
1438 const size_type noChunks, MPI_Offset diskStride) {
1445 MInt status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1450 for(
size_type d = 0; d < noDims - 1; d++) {
1451 totalCount *= count[d];
1458 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1463 if(memoryStride == 1) {
1466 for(MPI_Offset i = 0; i < totalCount; i++) {
1467 tmpScratch[i] = array[memoryStride * i];
1469 data = tmpScratch.
data();
1486 for(MPI_Offset i = 0; i < totalCount; i++) {
1487 data[i].copy(&buf[i * strLen], strLen, 0);
1493 if(diskStride == 1) {
1494 status = ncmpi_put_vara_text_all(
b_ncId, varId, start, count, &buf[0]);
1496 status = ncmpi_put_vars_text_all(
b_ncId, varId, start, count, &diskStride, &buf[0]);
1501 MPI_Offset chunkSize = count[0] / noChunks;
1502 if(count[0] % noChunks > 0) {
1515 std::copy(start, start + noDims, &start_[0]);
1516 std::copy(count, count + noDims, &count_[0]);
1518 for(
size_type i = 0; i < noChunks; i++) {
1519 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1520 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1521 const char* buf_ = &buf[0] + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1522 if(diskStride == 1) {
1523 status = ncmpi_put_vara_text_all(
b_ncId, varId, &start_[0], &count_[0], &buf_[0]);
1525 status = ncmpi_put_vars_text_all(
b_ncId, varId, &start_[0], &count_[0], &diskStride, &buf_[0]);
1551 status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1555 MPI_Offset start = 0;
1556 MPI_Offset count = 1;
1559 status = pnetcdf_traits<T>::ncmpi_put_vara_type_all(
b_ncId, varId, &start, &count, &scalar);
1582 status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1586 MPI_Offset start = 0;
1587 MPI_Offset count = 1;
1590 status = ncmpi_put_vara_text_all(
b_ncId, varId, &start, &count, scalar.c_str());
1596 const size_type* start,
const size_type* count) {
1605 mTerm(1, AT_,
"Group functionality not supported by PNetcdf backend");
1627 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
1628 MPI_Offset diskStride) {
1635 MInt status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1641 totalCount *= count[d];
1645 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1648 if(memoryStride == 1) {
1651 data = tmpScratch.
data();
1662 if(diskStride == 1) {
1663 status = pnetcdf_traits<T>::ncmpi_get_vara_type_all(
b_ncId, varId, start, count, data);
1665 status = pnetcdf_traits<T>::ncmpi_get_vars_type_all(
b_ncId, varId, start, count, &diskStride, data);
1670 MPI_Offset chunkSize = count[0] / noChunks;
1671 if(count[0] % noChunks > 0) {
1684 std::copy(start, start + noDims, &start_[0]);
1685 std::copy(count, count + noDims, &count_[0]);
1687 for(
size_type i = 0; i < noChunks; i++) {
1688 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1689 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1690 T* data_ = data + min(i * chunkSize * nDSize, (count[0] - 1) * nDSize);
1691 if(diskStride == 1) {
1692 status = pnetcdf_traits<T>::ncmpi_get_vara_type_all(
b_ncId, varId, &start_[0], &count_[0], data_);
1694 status = pnetcdf_traits<T>::ncmpi_get_vars_type_all(
b_ncId, varId, &start_[0], &count_[0], &diskStride, data_);
1701 if(memoryStride != 1) {
1702 for(MPI_Offset i = 0; i < totalCount; i++) {
1703 array[memoryStride * i] = tmpScratch[i];
1727 MPI_Offset* count, MPI_Offset memoryStride,
const size_type noChunks,
1728 MPI_Offset diskStride) {
1735 MInt status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1740 for(
size_type d = 0; d < noDims - 1; d++) {
1741 totalCount *= count[d];
1748 MInt tmpScratchSize = (memoryStride == 1 ? 1 : totalCount);
1751 if(memoryStride == 1) {
1754 data = tmpScratch.
data();
1764 if(diskStride == 1) {
1765 status = ncmpi_get_vara_text_all(
b_ncId, varId, start, count, &buf[0]);
1767 status = ncmpi_get_vars_text_all(
b_ncId, varId, start, count, &diskStride, &buf[0]);
1772 for(
size_type i = 0; i < totalCount; i++) {
1774 tmp.append(&buf[i * strLen], strLen);
1775 data[i].append(tmp.c_str(), 0, strLen);
1779 MPI_Offset chunkSize = count[0] / noChunks;
1780 if(count[0] % noChunks > 0) {
1787 std::copy(start, start + noDims, &start_[0]);
1788 std::copy(count, count + noDims, &count_[0]);
1790 for(
size_type i = 0; i < noChunks; i++) {
1791 start_[0] = min(start[0] + i * chunkSize * diskStride, count[0] * diskStride - 1);
1792 count_[0] = max(min(chunkSize, count[0] - i * chunkSize), 0ll);
1793 if(diskStride == 1) {
1794 status = ncmpi_get_vara_text_all(
b_ncId, varId, &start_[0], &count_[0], &buf[0]);
1796 status = ncmpi_get_vars_text_all(
b_ncId, varId, &start_[0], &count_[0], &diskStride, &buf[0]);
1802 for(
size_type j = start_[0]; j < totalCount; j++) {
1804 tmp.append(&buf[(j - start_[0]) * strLen], strLen);
1805 data[j].append(tmp, 0, strLen);
1811 if(memoryStride != 1) {
1812 for(MPI_Offset i = 0; i < totalCount; i++) {
1813 array[memoryStride * i] = tmpScratch[i];
1839 status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1843 MPI_Offset start = 0;
1844 MPI_Offset count = 1;
1847 status = pnetcdf_traits<T>::ncmpi_get_vara_type_all(
b_ncId, varId, &start, &count, scalar);
1872 status = ncmpi_inq_varid(
b_ncId, name.c_str(), &varId);
1876 MPI_Offset start = 0;
1877 MPI_Offset count = 1;
1880 status = ncmpi_get_vara_text_all(
b_ncId, varId, &start, &count, (
char*)scalar);
1902 const size_type totalCount) {
1907 if(datasetName.empty()) {
1910 MInt status = ncmpi_inq_varid(
b_ncId, datasetName.c_str(), &varId);
1911 b_error(status, datasetName, AT_);
1921 pnetcdf_traits<T>::ncmpi_put_att_type(
b_ncId, varId, name.c_str(), pnetcdf_traits<T>::type(), totalCount, value);
1922 b_error(status, datasetName +
"::" + name, AT_);
1939 const size_type totalCount) {
1942 if(totalCount > 1) {
1943 mTerm(1, AT_,
"Array of strings attributes not yet supported.");
1948 if(datasetName.empty()) {
1952 MInt status = ncmpi_inq_varid(
b_ncId, datasetName.c_str(), &varId);
1953 b_error(status, datasetName, AT_);
1961 MInt status = ncmpi_inq_attlen(
b_ncId, varId, name.c_str(), &length);
1962 b_error(status, datasetName +
"::" + name, AT_);
1964 if(length <
static_cast<MPI_Offset
>(value->length())) {
1970 MInt status = ncmpi_put_att_text(
b_ncId, varId, name.c_str(), value->length(), (*value).c_str());
1971 b_error(status, datasetName +
"::" + name, AT_);
1988 const size_type totalCount) {
1993 if(datasetName.empty()) {
1996 MInt status = ncmpi_inq_varid(
b_ncId, datasetName.c_str(), &varId);
1997 b_error(status, datasetName, AT_);
2003 status = ncmpi_inq_attlen(
b_ncId, varId, name.c_str(), &length);
2004 b_error(status, datasetName +
"::" + name, AT_);
2006 if(length != (MPI_Offset)totalCount) {
2007 TERMM(1,
"Requested attribute (" + name +
") has different number of elements (" + to_string(length)
2008 +
") than specified (" + to_string(totalCount)
2009 +
"). Use getAttributeCount() to query number of elements first");
2013 status = pnetcdf_traits<T>::ncmpi_get_att_type(
b_ncId, varId, name.c_str(), value);
2014 b_error(status, datasetName +
"::" + name, AT_);
2031 const size_type totalCount) {
2034 if(totalCount > 1) {
2035 mTerm(1, AT_,
"Array of strings attributes not yet supported.");
2040 if(datasetName.empty()) {
2043 MInt status = ncmpi_inq_varid(
b_ncId, datasetName.c_str(), &varId);
2044 b_error(status, datasetName, AT_);
2050 status = ncmpi_inq_attlen(
b_ncId, varId, name.c_str(), &length);
2051 b_error(status, datasetName +
"::" + name, AT_);
2055 status = ncmpi_get_att_text(
b_ncId, varId, name.c_str(), tmpScratch.
data());
2056 b_error(status, datasetName +
"::" + name, AT_);
2057 value->assign(tmpScratch.
data(), length);
2066 if(datasetName.empty()) {
2069 MInt status = ncmpi_inq_varid(
b_ncId, datasetName.c_str(), &varId);
2070 b_error(status, datasetName, AT_);
2076 status = ncmpi_inq_attlen(
b_ncId, varId, name.c_str(), &length);
2077 b_error(status, datasetName +
"::" + name, AT_);
2102 if(status != NC_NOERR) {
2104 cerr <<
"*** ERROR in parallelio_pnetcdf ***" << endl;
2105 cerr <<
"NetCDF error in '" <<
location <<
"'" << endl;
2106 cerr <<
"NetCDF returns status " << status <<
": " << ncmpi_strerror(status) << endl;
2107 cerr <<
"The file/variable/attribute in question was: " << name << endl;
2109 TERMM(1,
"NetCDF error in ParallelIo.");
2117 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2118 const size_type noChunks, MPI_Offset diskStride);
2120 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2121 const size_type noChunks, MPI_Offset diskStride);
2123 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2124 const size_type noChunks, MPI_Offset diskStride);
2126 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2127 const size_type noChunks, MPI_Offset diskStride);
2134 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2135 const size_type noChunks, MPI_Offset diskStride);
2137 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2138 const size_type noChunks, MPI_Offset diskStride);
2140 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2141 const size_type noChunks, MPI_Offset diskStride);
2143 MPI_Offset* start, MPI_Offset* count, MPI_Offset memoryStride,
2144 const size_type noChunks, MPI_Offset diskStride);
2151 const size_type totalCount);
2153 const size_type totalCount);
2155 const size_type totalCount);
2157 const size_type totalCount);
2159 const size_type totalCount);
2161 const size_type totalCount);
2163 const size_type totalCount);
2165 const size_type totalCount);
2167 const size_type totalCount);
2169 const size_type totalCount);
This class is intended to do all the heavy lifting when it comes to reading and writing "big data" fi...
std::set< MString > m_unwrittenScalars
MLong size_type
Type used for all size- and offset-related values.
std::set< MString > m_unwrittenArrays
void setAttribute(const T &value, const MString &name, const MString &datasetName="")
Set a file or dataset attribute. [MPI]
MInt b_getDatasetType(const MString &name)
Returns the data type of a dataset in the file (can be array, multi-D array or scalar).
static const MString b_fileExt()
Returns backend-specific ending of filename (either ".Netcdf" or ".Hdf5")
void b_getDatasetNames(std::vector< MString > &names, const size_type dimension)
Returns a vector of all existing datasets with the given number of dimensions in the file (if any)....
void b_writeArray(const T *const array, const MString &name, const size_type noDims, MPI_Offset *start, MPI_Offset *count, MPI_Offset memoryStride, const size_type noChunks, MPI_Offset diskStride)
Writes array data to file (generic version). [MPI]
void b_saveHeader()
Adds all additional header information that are needed in the file.
~ParallelIoPNetcdf() override
Calls close(). [MPI]
void b_readScalar(T *scalar, const MString &name)
Read scalar data from file (generic version). [MPI]
void b_printFileHints()
Print PNetCDF file hints to cerr.
void b_writeAdditionalData()
Write additional data to file (NetCDF-specific). [MPI]
void b_getAttribute(T *const value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Retrieve an attribute from file (generic version).
void b_getAttributeCount(const MString &name, size_type *totalCount, const MString &datasetName="")
void b_getGroupNames(std::vector< MString > &names, const MString &path)
static MBool b_isValidFile(const MString &name, const MPI_Comm &mpiComm)
Check if specified file is a valid HDF5 file (i.e. can be opened). [MPI]
void b_defineScalar(maiabd_type type, const MString &name)
Defines a scalar in the file. [MPI]
void b_defineArray(maiabd_type type, const MString &name, size_type noDims, size_type *totalCount)
Defines an array in the file. [MPI]
void b_setAttribute(const T *value, const MString &name, const MString &datasetName="", const size_type totalCount=1)
Set an attribute in the file (generic version).
size_type b_getDatasetNoDims(const MString &name)
Get number of dimensions of a dataset with the given name.
size_type b_getDatasetSize(const MString &name, const size_type dimensionId=0)
Get the length of one dimension of an arbitrary array in the file.
MBool b_hasObject(const MString &path)
MBool b_hasDataset(const MString &name, const size_type dimension)
Check if the file contains a dataset with the given name and number of dimensions.
MInt b_getAttributeType(const MString &name, const MString &datasetName="")
Returns the data type of an attribute in the file.
void b_addAdditionalHeader()
Write additional headers to file (e.g. grid file name, creation date etc.). [MPI]
void b_readArray(T *array, const MString &name, const size_type noDims, MPI_Offset *start, MPI_Offset *count, MPI_Offset memoryStride, const size_type noChunks, MPI_Offset diskStride)
Read array data from file (generic version). [MPI]
static void b_error(MInt status, const MString &name, const MString &location)
Check the status code of a HDF5 operation and output a meaningful message.
void close()
Close the file (normally called by the destructor but needs to be explicitely called earlier in speci...
MBool b_hasAttribute(const MString &name, const MString &datasetName="")
Check if a given attribute exists in the file.
void b_ncEndDef()
Leave the define mode (NetCDF-specific). [MPI]
void b_writeScalar(const T scalar, const MString &name)
Writes scalar data to file (generic version). [MPI]
ParallelIoPNetcdf(const MString &fileName, MInt fileMode, const MPI_Comm &mpiComm)
Creates a new object to read and write big data files. [MPI]
void b_ncRedef()
Enter define mode (NetCDF-specific). [MPI]
This class is a ScratchSpace.
void mTerm(const MInt errorCode, const MString &location, const MString &message)
const MPI_Info & globalMpiInfo()
Return global MPI information.
std::basic_string< char > MString
int MPI_Info_get(MPI_Info info, const char *key, int valuelen, char *value, int *flag, const MString &name)
same as MPI_Info_get
int MPI_Info_free(MPI_Info *info, const MString &name)
same as MPI_Info_free
int MPI_Info_get_valuelen(MPI_Info info, const char *key, int *valuelen, int *flag, const MString &name)
same as MPI_Info_get_valuelen
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, const MString &name, const MString &varname)
same as MPI_Bcast
const MInt PIO_UNKNOWN_TYPE
const MInt PIO_CREATE
< File mode to create a new file. Aborts if file already exists
Namespace for auxiliary functions/classes.