Commit 2ed8ad09 authored by gsell's avatar gsell

use the H5_HAVE_PARALLEL macro from hdf5 instead of the macro PARALLEL_IO

parent 3a9e0e41
......@@ -3,7 +3,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
......@@ -3,7 +3,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
......@@ -4,7 +4,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
......@@ -2,7 +2,7 @@
#include <stdlib.h>
#include "H5hut.h"
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
#include <mpi.h>
#endif
......@@ -87,7 +87,7 @@ done:
H5FedCloseMesh (m);
H5CloseFile (f);
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
MPI_Finalize ();
#endif
......
......@@ -2,7 +2,7 @@
#include <stdlib.h>
#include "H5hut.h"
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
#include <mpi.h>
#endif
......@@ -253,7 +253,7 @@ main (
/* done */
H5CloseFile (f);
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
MPI_Finalize ();
#endif
......
......@@ -3,7 +3,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
#ifndef __EXAMPLES_H
#define __EXAMPLES_H
#if !defined (PARALLEL_IO)
#if !defined (H5_HAVE_PARALLEL)
#define MPI_COMM_WORLD (0)
#define MPI_Init(argc, argv)
......
......@@ -25,7 +25,7 @@ h5_createprop_file (
H5_API_RETURN ((h5_int64_t)h5_create_prop (H5_PROP_FILE));
}
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
#define h5_setprop_file_mpio FC_MANGLING( \
h5_setprop_file_mpio, \
H5_SETPROP_FILE_MPIO)
......
......@@ -188,7 +188,7 @@ h5bl_3d_getchunk (
}
#ifdef PARALLEL_IO
#if defined(H5_HAVE_PARALLEL)
#define h5bl_3d_setgrid FC_MANGLING ( \
h5bl_3d_setgrid, \
h5bl_3d_setgrid)
......
......@@ -145,7 +145,7 @@ h5_abort_errorhandler (
if (h5_log_level > 0) {
h5_verror (fmt, ap);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
MPI_Abort(MPI_COMM_WORLD, -(int)h5_errno);
#else
exit (-(int)h5_errno);
......
......@@ -80,7 +80,7 @@ mpi_init (
const h5_file_p f
) {
H5_INLINE_FUNC_ENTER (h5_err_t);
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
TRY (h5priv_mpi_comm_size (f->props->comm, &f->nprocs));
TRY (h5priv_mpi_comm_rank (f->props->comm, &f->myproc));
......@@ -141,7 +141,7 @@ mpi_init (
TRY (h5_optimize_for_lustre(f, filename));
}
#endif
#endif /* PARALLEL_IO */
#endif /* H5_HAVE_PARALLEL */
H5_RETURN (H5_SUCCESS);
}
......@@ -181,7 +181,7 @@ set_default_file_props (
H5_STEPNAME,
H5_STEPNAME_LEN - 1);
props->width_step_idx = H5_STEPWIDTH;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->comm = MPI_COMM_WORLD;
#endif
H5_RETURN (H5_SUCCESS);
......@@ -201,7 +201,7 @@ h5_set_prop_file_mpio_collective (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_POSIX | H5_VFD_MPIO_INDEPENDENT | H5_VFD_CORE);
props->flags |= H5_VFD_MPIO_COLLECTIVE;
props->comm = *comm;
......@@ -229,7 +229,7 @@ h5_set_prop_file_mpio_independent (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_COLLECTIVE | H5_VFD_MPIO_POSIX | H5_VFD_CORE);
props->flags |= H5_VFD_MPIO_INDEPENDENT;
props->comm = *comm;
......@@ -254,7 +254,7 @@ h5_set_prop_file_mpio_posix (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_COLLECTIVE | H5_VFD_MPIO_POSIX | H5_VFD_CORE);
props->flags |= H5_VFD_MPIO_INDEPENDENT;
props->comm = *comm;
......@@ -279,7 +279,7 @@ h5_set_prop_file_core_vfd (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_COLLECTIVE | H5_VFD_MPIO_INDEPENDENT | H5_VFD_MPIO_POSIX);
props->flags |= H5_VFD_MPIO_INDEPENDENT;
props->comm = MPI_COMM_SELF;
......@@ -352,7 +352,7 @@ h5_set_prop_file_throttle (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
// throttle only if VFD is MPIO independent od POSIX
h5_int64_t mask = H5_VFD_MPIO_INDEPENDENT;
#if H5_VERSION_LE(1,8,12)
......@@ -519,7 +519,7 @@ h5_open_file2 (
"Invalid property class: %lld.",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
f->props->comm = props->comm;
#endif
f->props->flags = props->flags;
......
......@@ -47,7 +47,7 @@ h5bpriv_open_file (
b = f->b;
memset (b, 0, sizeof (*b));
#if defined(PARALLEL_IO)
#ifdef H5_HAVE_PARALLEL
size_t n = sizeof (struct h5b_partition) / sizeof (h5_int64_t);
TRY (h5priv_mpi_type_contiguous(n, MPI_LONG_LONG, &b->partition_mpi_t));
#endif
......@@ -88,7 +88,7 @@ h5bpriv_close_file (
TRY (hdf5_close_dataspace (b->diskshape));
TRY (hdf5_close_dataspace (b->memshape));
TRY (hdf5_close_property (b->dcreate_prop));
#if defined(PARALLEL_IO)
#ifdef H5_HAVE_PARALLEL
TRY (h5priv_mpi_type_free (&b->partition_mpi_t));
#endif
TRY (h5_free (f->b));
......
......@@ -74,7 +74,7 @@ _normalize_partition (
}
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
/* MLH: this could be improved with an MPI_Reduce and MAX operator...
* but the user_layout array-of-structs would need to be a struct-of-arrays */
static void
......@@ -571,7 +571,7 @@ h5b_3d_set_view (
b->user_layout[0].k_end = k_end;
_normalize_partition(&b->user_layout[0]);
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
h5b_partition_t *user_layout;
h5b_partition_t *write_layout;
......@@ -762,7 +762,6 @@ h5b_3d_get_chunk (
H5_RETURN (H5_SUCCESS);
}
#ifdef PARALLEL_IO
h5_err_t
h5b_3d_set_grid (
const h5_file_t fh, /*!< IN: File handle */
......@@ -796,11 +795,13 @@ h5b_3d_set_grid (
int dims[3] = { k, j, i };
int period[3] = { 0, 0, 0 };
#ifdef H5_HAVE_PARALLEL
TRY( h5priv_mpi_cart_create(
f->props->comm, 3, dims, period, 0, &f->b->cart_comm) );
#else
h5_warn ("Defining a grid in serial case doesn't make much sense!");
#endif
f->b->have_grid = 1;
H5_RETURN (H5_SUCCESS);
}
......@@ -824,11 +825,16 @@ h5b_3d_get_grid_coords (
"%s",
"Grid dimensions have not been set!");
#ifdef H5_HAVE_PARALLEL
int coords[3];
TRY( h5priv_mpi_cart_coords(f->b->cart_comm, proc, 3, coords) );
*k = coords[0];
*j = coords[1];
*i = coords[2];
#else
*k = *j = *i = 1;
h5_warn ("Defining grid in serial case doesn't make much sense!");
#endif
H5_RETURN (H5_SUCCESS);
}
......@@ -855,11 +861,14 @@ h5b_3d_set_dims (
"Grid dimensions have not been set!");
h5_size_t dims[3] = { k, j, i };
#ifdef H5_HAVE_PARALLEL
h5_size_t check_dims[3] = { k, j, i };
TRY( h5priv_mpi_bcast(
check_dims, 3, MPI_LONG_LONG, 0, f->props->comm) );
#else
h5_size_t check_dims[3] = { 1, 1, 1 };
h5_warn ("Defining grid in serial case doesn't make much sense!");
#endif
if ( dims[0] != check_dims[0] ||
dims[1] != check_dims[1] ||
dims[2] != check_dims[2]
......@@ -899,7 +908,6 @@ h5b_3d_set_dims (
H5_RETURN (H5_SUCCESS);
}
#endif
h5_err_t
h5b_3d_set_halo (
......
......@@ -136,7 +136,7 @@ h5u_set_num_points (
"Invalid number of particles: %lld!\n",
(long long)nparticles);
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
/*
if we are not using parallel-IO, there is enough information
to know that we can short circuit this routine. However,
......@@ -174,12 +174,12 @@ h5u_set_num_points (
NULL));
}
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
count = u->nparticles;
TRY( u->shape = hdf5_create_dataspace (1, &count, NULL));
u->viewstart = 0;
u->viewend = nparticles - 1; // view range is *inclusive*
#else /* PARALLEL_IO */
#else /* H5_HAVE_PARALLEL */
/*
The Gameplan here is to declare the overall size of the on-disk
data structure the same way we do for the serial case. But
......@@ -317,7 +317,7 @@ h5u_set_view (
(long long)end,
(long long)start);
}
#if PARALLEL_IO
#if H5_HAVE_PARALLEL
TRY (
h5priv_mpi_allreduce_max (
&end, &total, 1, MPI_LONG_LONG, f->props->comm)
......@@ -575,7 +575,7 @@ h5u_set_canonical_view (
u->nparticles = total / f->nprocs;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
h5_int64_t remainder = 0;
remainder = total % f->nprocs;
start = f->myproc * u->nparticles;
......@@ -588,7 +588,7 @@ h5u_set_canonical_view (
start += f->myproc;
else
start += remainder;
#endif // PARALLEL_IO
#endif // H5_HAVE_PARALLEL
h5_int64_t length = u->nparticles;
TRY (h5u_set_view_length (fh, start, length));
......
......@@ -1048,7 +1048,7 @@ hdf5_set_layout_property (
H5_RETURN (H5_SUCCESS);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
static inline h5_err_t
hdf5_set_fapl_mpio_property (
hid_t fapl_id,
......
......@@ -671,7 +671,7 @@ h5priv_initialize (
h5_initialized = 1;
H5_CORE_API_ENTER (h5_err_t, "%s", "void");
ret_value = H5_SUCCESS;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
int mpi_is_initialized;
MPI_Initialized (&mpi_is_initialized);
if (!mpi_is_initialized) {
......
......@@ -31,7 +31,7 @@
#include "private/h5_mpi.h"
#include "private/h5_hdf5.h"
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
static inline h5_err_t
h5priv_start_throttle (
const h5_file_p f
......@@ -98,7 +98,7 @@ h5priv_end_throttle (
}
H5_RETURN (H5_SUCCESS);
}
#else // PARALLEL_IO
#else // H5_HAVE_PARALLEL
static inline h5_err_t
h5priv_start_throttle (const h5_file_p f) {
UNUSED_ARGUMENT (f);
......@@ -111,7 +111,7 @@ h5priv_end_throttle (const h5_file_p f) {
return H5_SUCCESS;
}
#endif // PARALLEL_IO
#endif // H5_HAVE_PARALLEL
h5_err_t
......
......@@ -10,7 +10,7 @@
#ifndef __PRIVATE_H5_MPI_H
#define __PRIVATE_H5_MPI_H
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
#include "h5core/h5_types.h"
#include "h5core/h5_err.h"
......
......@@ -35,7 +35,7 @@ struct h5_prop_file { // file property
h5_int64_t align; // HDF5 alignment
h5_int64_t increment; // increment for core vfd
h5_int64_t throttle;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
MPI_Comm comm;
#endif
hid_t xfer_prop; // dataset transfer properties
......
......@@ -14,7 +14,7 @@
#include "h5core/h5_types.h"
#include "h5core/h5t_octree.h"
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
#include <mpi.h>
#endif
......
......@@ -336,7 +336,6 @@ H5Block3dGetChunkSize (
i, j, k));
}
#ifdef PARALLEL_IO
/**
Define an underlying 3D Cartesian grid on the processors with dimensions
(\c i,\c j,\c k). You can look up a processor's index into the grid
......@@ -347,6 +346,9 @@ H5Block3dGetChunkSize (
The product of the dimensions must equal the size of the MPI communicator.
\note This function is also available in a serial version of H5hut - even it
doesn't make much sense.
\return \c H5_SUCCESS on success
\return \c H5_FAILURE on error
*/
......@@ -373,6 +375,9 @@ H5Block3dSetGrid (
Look up the index (\c i, \c j, \c k) in the grid belonging to MPI processor
\c proc.
\note This function is also available in a serial version of H5hut - even it
doesn't make much sense.
\return \c H5_SUCCESS on success
\return \c H5_FAILURE on error
*/
......@@ -401,6 +406,9 @@ H5Block3dGetGridCoords (
A grid must be already set with \ref H5Block3dSetGrid, and all processors
must specify the same dimensions.
\note This function is also available in a serial version of H5hut - even it
doesn't make much sense.
\return \c H5_SUCCESS on success
\return \c H5_FAILURE on error
*/
......@@ -422,7 +430,6 @@ H5Block3dSetDims (
f,
i, j, k));
}
#endif
/**
Sets the additional cells (\c i, \c j, \c k) in each direction to use as
......
......@@ -82,7 +82,7 @@ test_read_data64(h5_file_t file, int step)
TEST("Verifying dataset info");
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
int rank, nprocs;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
......@@ -202,7 +202,7 @@ test_read_data32(h5_file_t file, int step)
RETURN(status, H5_SUCCESS, "H5SetStep");
test_read_field_attribs(file, "e", t);
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
status = H5Block3dSetGrid(file, grid[0], grid[1], grid[2]);
RETURN(status, H5_SUCCESS, "H5Block3dSetGrid");
......@@ -239,7 +239,7 @@ void h5b_test_read1(void)
TEST("Opening file once, read-only");
h5_prop_t props = H5CreateFileProp ();
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOCollective (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOCollective");
......
......@@ -18,7 +18,7 @@ void h5b_test_write2(void);
void h5b_test_read1(void);
void h5b_test_read2(void);
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
static int
_nth_root_int_divisor (const int m, const int n)
{
......@@ -39,7 +39,7 @@ _nth_root_int_divisor (const int m, const int n)
int main(int argc, char **argv)
{
extern h5_size_t layout[6];
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
MPI_Init(&argc, &argv);
int procs, rank;
......@@ -69,7 +69,7 @@ int main(int argc, char **argv)
layout[3] = (j+1)*NBLOCKY - 1;
layout[4] = k*NBLOCKZ;
layout[5] = (k+1)*NBLOCKZ - 1;
#else // PARALLEL_IO
#else // H5_HAVE_PARALLEL
grid[0] = 1;
grid[1] = 1;
grid[2] = 1;
......@@ -88,7 +88,7 @@ int main(int argc, char **argv)
/* Tests are generally arranged from least to most complexity... */
AddTest("write1", h5b_test_write1, NULL, "Write 64-bit data", NULL);
AddTest("read1", h5b_test_read1, NULL, "Read 64-bit data", NULL);
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
AddTest("write2", h5b_test_write2, NULL, "Write 32-bit data", NULL);
AddTest("read2", h5b_test_read2, NULL, "Read 32-bit data", NULL);
#endif
......@@ -111,7 +111,7 @@ int main(int argc, char **argv)
//if (GetTestCleanup() && !getenv("HDF5_NOCLEANUP"))
// TestCleanup();
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
TestPrintf ("reached end\n");
fflush(stdout);
MPI_Finalize();
......
......@@ -149,7 +149,7 @@ test_write_data32(h5_file_t file, int step)
if (val == 0) test_write_field_attribs(file, "e", t);
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
extern h5_size_t grid[3];
status = H5Block3dSetGrid(file, grid[0], grid[1], grid[2]);
......@@ -181,7 +181,7 @@ void h5b_test_write1(void)
TEST("Opening file once, write-truncate");
h5_prop_t props = H5CreateFileProp ();
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOCollective (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOCollective");
......
......@@ -106,7 +106,7 @@ test_read_data64(h5_file_t file, int nparticles, int step)
TEST("Verifying dataset info");
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
#else
......@@ -479,7 +479,7 @@ void h5u_test_read3(void)
TEST("Opening file once, read-only, MPI-POSIX VFD");
h5_prop_t props = H5CreateFileProp ();
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOPosix (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOPosix");
......@@ -508,7 +508,7 @@ void h5u_test_read4(void)
TEST("Opening file twice, read-only, MPI-IO Independent VFD");
h5_prop_t props = H5CreateFileProp ();
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOIndependent (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOIndependent");
......
......@@ -20,7 +20,7 @@ void h5u_test_read4(void);
int main(int argc, char **argv)
{
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
MPI_Init(&argc, &argv);
int procs;
MPI_Comm_size(MPI_COMM_WORLD, &procs);
......@@ -66,7 +66,7 @@ int main(int argc, char **argv)
//if (GetTestCleanup() && !getenv("HDF5_NOCLEANUP"))
// TestCleanup();
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
TestPrintf ("reached end\n");
fflush(stdout);
MPI_Finalize();
......
......@@ -211,7 +211,7 @@ test_write_data32(h5_file_t file, int nparticles, int step)
status = H5PartSetNumParticles(file, nparticles);
RETURN(status, H5_SUCCESS, "H5PartSetNumParticles");
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
int rank, nprocs;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
......@@ -358,7 +358,7 @@ void h5u_test_write1(void)
TEST("Opening file once, write-truncate");
h5_prop_t props = H5CreateFileProp ();
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOCollective (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOCollective");
......@@ -386,7 +386,7 @@ void h5u_test_write2(void)
TEST("Opening file twice, write-append + read-only");
h5_prop_t props = H5CreateFileProp ();
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOCollective (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOCollective");
......@@ -421,7 +421,7 @@ void h5u_test_write3(void)
h5_prop_t props = H5CreateFileProp ();
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOPosix (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOPosix");
......@@ -457,7 +457,7 @@ void h5u_test_write4(void)
TEST("Opening file twice, write-append + read-only, MPI-IO Independent VFD");
props = H5CreateFileProp ();
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
MPI_Comm comm = MPI_COMM_WORLD;
status = H5SetPropFileMPIOIndependent (props, &comm);
RETURN(status, H5_SUCCESS, "H5SetPropFileMPIOIndependent");
......
......@@ -530,7 +530,7 @@ TestPrintf(const char *format, ...)
va_list arglist;
int ret_value = -1;
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
int nproc;
MPI_Comm_rank(MPI_COMM_WORLD, &nproc);
if ( nproc == 0 || VERBOSE_HI ) {
......@@ -564,7 +564,7 @@ TestErrPrintf(const char *format, ...)
/* Increment the error count */
num_errs++;
#if PARALLEL_IO
#if H5_HAVE_PARALLEL
int nproc;
MPI_Comm_rank(MPI_COMM_WORLD, &nproc);
if ( nproc == 0 || VERBOSE_HI ) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment