Commit 2ed8ad09 authored by gsell's avatar gsell
Browse files

use the H5_HAVE_PARALLEL macro from hdf5 instead of the macro PARALLEL_IO

parent 3a9e0e41
......@@ -3,7 +3,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
......@@ -3,7 +3,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
......@@ -4,7 +4,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
......@@ -2,7 +2,7 @@
#include <stdlib.h>
#include "H5hut.h"
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
#include <mpi.h>
#endif
......@@ -87,7 +87,7 @@ done:
H5FedCloseMesh (m);
H5CloseFile (f);
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
MPI_Finalize ();
#endif
......
......@@ -2,7 +2,7 @@
#include <stdlib.h>
#include "H5hut.h"
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
#include <mpi.h>
#endif
......@@ -253,7 +253,7 @@ main (
/* done */
H5CloseFile (f);
#if defined (PARALLEL_IO)
#if defined (H5_HAVE_PARALLEL)
MPI_Finalize ();
#endif
......
......@@ -3,7 +3,7 @@
#include "H5hut.h"
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
......
#ifndef __EXAMPLES_H
#define __EXAMPLES_H
#if !defined (PARALLEL_IO)
#if !defined (H5_HAVE_PARALLEL)
#define MPI_COMM_WORLD (0)
#define MPI_Init(argc, argv)
......
......@@ -25,7 +25,7 @@ h5_createprop_file (
H5_API_RETURN ((h5_int64_t)h5_create_prop (H5_PROP_FILE));
}
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
#define h5_setprop_file_mpio FC_MANGLING( \
h5_setprop_file_mpio, \
H5_SETPROP_FILE_MPIO)
......
......@@ -188,7 +188,7 @@ h5bl_3d_getchunk (
}
#ifdef PARALLEL_IO
#if defined(H5_HAVE_PARALLEL)
#define h5bl_3d_setgrid FC_MANGLING ( \
h5bl_3d_setgrid, \
h5bl_3d_setgrid)
......
......@@ -145,7 +145,7 @@ h5_abort_errorhandler (
if (h5_log_level > 0) {
h5_verror (fmt, ap);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
MPI_Abort(MPI_COMM_WORLD, -(int)h5_errno);
#else
exit (-(int)h5_errno);
......
......@@ -80,7 +80,7 @@ mpi_init (
const h5_file_p f
) {
H5_INLINE_FUNC_ENTER (h5_err_t);
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
TRY (h5priv_mpi_comm_size (f->props->comm, &f->nprocs));
TRY (h5priv_mpi_comm_rank (f->props->comm, &f->myproc));
......@@ -141,7 +141,7 @@ mpi_init (
TRY (h5_optimize_for_lustre(f, filename));
}
#endif
#endif /* PARALLEL_IO */
#endif /* H5_HAVE_PARALLEL */
H5_RETURN (H5_SUCCESS);
}
......@@ -181,7 +181,7 @@ set_default_file_props (
H5_STEPNAME,
H5_STEPNAME_LEN - 1);
props->width_step_idx = H5_STEPWIDTH;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->comm = MPI_COMM_WORLD;
#endif
H5_RETURN (H5_SUCCESS);
......@@ -201,7 +201,7 @@ h5_set_prop_file_mpio_collective (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_POSIX | H5_VFD_MPIO_INDEPENDENT | H5_VFD_CORE);
props->flags |= H5_VFD_MPIO_COLLECTIVE;
props->comm = *comm;
......@@ -229,7 +229,7 @@ h5_set_prop_file_mpio_independent (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_COLLECTIVE | H5_VFD_MPIO_POSIX | H5_VFD_CORE);
props->flags |= H5_VFD_MPIO_INDEPENDENT;
props->comm = *comm;
......@@ -254,7 +254,7 @@ h5_set_prop_file_mpio_posix (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_COLLECTIVE | H5_VFD_MPIO_POSIX | H5_VFD_CORE);
props->flags |= H5_VFD_MPIO_INDEPENDENT;
props->comm = *comm;
......@@ -279,7 +279,7 @@ h5_set_prop_file_core_vfd (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
props->flags &= ~(H5_VFD_MPIO_COLLECTIVE | H5_VFD_MPIO_INDEPENDENT | H5_VFD_MPIO_POSIX);
props->flags |= H5_VFD_MPIO_INDEPENDENT;
props->comm = MPI_COMM_SELF;
......@@ -352,7 +352,7 @@ h5_set_prop_file_throttle (
"Invalid property class: %lld",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
// throttle only if VFD is MPIO independent od POSIX
h5_int64_t mask = H5_VFD_MPIO_INDEPENDENT;
#if H5_VERSION_LE(1,8,12)
......@@ -519,7 +519,7 @@ h5_open_file2 (
"Invalid property class: %lld.",
(long long int)props->class);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
f->props->comm = props->comm;
#endif
f->props->flags = props->flags;
......
......@@ -47,7 +47,7 @@ h5bpriv_open_file (
b = f->b;
memset (b, 0, sizeof (*b));
#if defined(PARALLEL_IO)
#ifdef H5_HAVE_PARALLEL
size_t n = sizeof (struct h5b_partition) / sizeof (h5_int64_t);
TRY (h5priv_mpi_type_contiguous(n, MPI_LONG_LONG, &b->partition_mpi_t));
#endif
......@@ -88,7 +88,7 @@ h5bpriv_close_file (
TRY (hdf5_close_dataspace (b->diskshape));
TRY (hdf5_close_dataspace (b->memshape));
TRY (hdf5_close_property (b->dcreate_prop));
#if defined(PARALLEL_IO)
#ifdef H5_HAVE_PARALLEL
TRY (h5priv_mpi_type_free (&b->partition_mpi_t));
#endif
TRY (h5_free (f->b));
......
......@@ -74,7 +74,7 @@ _normalize_partition (
}
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
/* MLH: this could be improved with an MPI_Reduce and MAX operator...
* but the user_layout array-of-structs would need to be a struct-of-arrays */
static void
......@@ -571,7 +571,7 @@ h5b_3d_set_view (
b->user_layout[0].k_end = k_end;
_normalize_partition(&b->user_layout[0]);
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
h5b_partition_t *user_layout;
h5b_partition_t *write_layout;
......@@ -762,7 +762,6 @@ h5b_3d_get_chunk (
H5_RETURN (H5_SUCCESS);
}
#ifdef PARALLEL_IO
h5_err_t
h5b_3d_set_grid (
const h5_file_t fh, /*!< IN: File handle */
......@@ -796,11 +795,13 @@ h5b_3d_set_grid (
int dims[3] = { k, j, i };
int period[3] = { 0, 0, 0 };
#ifdef H5_HAVE_PARALLEL
TRY( h5priv_mpi_cart_create(
f->props->comm, 3, dims, period, 0, &f->b->cart_comm) );
#else
h5_warn ("Defining a grid in serial case doesn't make much sense!");
#endif
f->b->have_grid = 1;
H5_RETURN (H5_SUCCESS);
}
......@@ -824,11 +825,16 @@ h5b_3d_get_grid_coords (
"%s",
"Grid dimensions have not been set!");
#ifdef H5_HAVE_PARALLEL
int coords[3];
TRY( h5priv_mpi_cart_coords(f->b->cart_comm, proc, 3, coords) );
*k = coords[0];
*j = coords[1];
*i = coords[2];
#else
*k = *j = *i = 1;
h5_warn ("Defining grid in serial case doesn't make much sense!");
#endif
H5_RETURN (H5_SUCCESS);
}
......@@ -855,11 +861,14 @@ h5b_3d_set_dims (
"Grid dimensions have not been set!");
h5_size_t dims[3] = { k, j, i };
#ifdef H5_HAVE_PARALLEL
h5_size_t check_dims[3] = { k, j, i };
TRY( h5priv_mpi_bcast(
check_dims, 3, MPI_LONG_LONG, 0, f->props->comm) );
#else
h5_size_t check_dims[3] = { 1, 1, 1 };
h5_warn ("Defining grid in serial case doesn't make much sense!");
#endif
if ( dims[0] != check_dims[0] ||
dims[1] != check_dims[1] ||
dims[2] != check_dims[2]
......@@ -899,7 +908,6 @@ h5b_3d_set_dims (
H5_RETURN (H5_SUCCESS);
}
#endif
h5_err_t
h5b_3d_set_halo (
......
......@@ -136,7 +136,7 @@ h5u_set_num_points (
"Invalid number of particles: %lld!\n",
(long long)nparticles);
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
/*
if we are not using parallel-IO, there is enough information
to know that we can short circuit this routine. However,
......@@ -174,12 +174,12 @@ h5u_set_num_points (
NULL));
}
#ifndef PARALLEL_IO
#ifndef H5_HAVE_PARALLEL
count = u->nparticles;
TRY( u->shape = hdf5_create_dataspace (1, &count, NULL));
u->viewstart = 0;
u->viewend = nparticles - 1; // view range is *inclusive*
#else /* PARALLEL_IO */
#else /* H5_HAVE_PARALLEL */
/*
The Gameplan here is to declare the overall size of the on-disk
data structure the same way we do for the serial case. But
......@@ -317,7 +317,7 @@ h5u_set_view (
(long long)end,
(long long)start);
}
#if PARALLEL_IO
#if H5_HAVE_PARALLEL
TRY (
h5priv_mpi_allreduce_max (
&end, &total, 1, MPI_LONG_LONG, f->props->comm)
......@@ -575,7 +575,7 @@ h5u_set_canonical_view (
u->nparticles = total / f->nprocs;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
h5_int64_t remainder = 0;
remainder = total % f->nprocs;
start = f->myproc * u->nparticles;
......@@ -588,7 +588,7 @@ h5u_set_canonical_view (
start += f->myproc;
else
start += remainder;
#endif // PARALLEL_IO
#endif // H5_HAVE_PARALLEL
h5_int64_t length = u->nparticles;
TRY (h5u_set_view_length (fh, start, length));
......
......@@ -1048,7 +1048,7 @@ hdf5_set_layout_property (
H5_RETURN (H5_SUCCESS);
}
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
static inline h5_err_t
hdf5_set_fapl_mpio_property (
hid_t fapl_id,
......
......@@ -671,7 +671,7 @@ h5priv_initialize (
h5_initialized = 1;
H5_CORE_API_ENTER (h5_err_t, "%s", "void");
ret_value = H5_SUCCESS;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
int mpi_is_initialized;
MPI_Initialized (&mpi_is_initialized);
if (!mpi_is_initialized) {
......
......@@ -31,7 +31,7 @@
#include "private/h5_mpi.h"
#include "private/h5_hdf5.h"
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
static inline h5_err_t
h5priv_start_throttle (
const h5_file_p f
......@@ -98,7 +98,7 @@ h5priv_end_throttle (
}
H5_RETURN (H5_SUCCESS);
}
#else // PARALLEL_IO
#else // H5_HAVE_PARALLEL
static inline h5_err_t
h5priv_start_throttle (const h5_file_p f) {
UNUSED_ARGUMENT (f);
......@@ -111,7 +111,7 @@ h5priv_end_throttle (const h5_file_p f) {
return H5_SUCCESS;
}
#endif // PARALLEL_IO
#endif // H5_HAVE_PARALLEL
h5_err_t
......
......@@ -10,7 +10,7 @@
#ifndef __PRIVATE_H5_MPI_H
#define __PRIVATE_H5_MPI_H
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
#include "h5core/h5_types.h"
#include "h5core/h5_err.h"
......
......@@ -35,7 +35,7 @@ struct h5_prop_file { // file property
h5_int64_t align; // HDF5 alignment
h5_int64_t increment; // increment for core vfd
h5_int64_t throttle;
#ifdef PARALLEL_IO
#ifdef H5_HAVE_PARALLEL
MPI_Comm comm;
#endif
hid_t xfer_prop; // dataset transfer properties
......
......@@ -14,7 +14,7 @@
#include "h5core/h5_types.h"
#include "h5core/h5t_octree.h"
#if defined(PARALLEL_IO)
#if defined(H5_HAVE_PARALLEL)
#include <mpi.h>
#endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment