Skip to content

Commit

Permalink
Create dataset info hash table only once
Browse files Browse the repository at this point in the history
  • Loading branch information
jhendersonHDF committed Oct 3, 2023
1 parent abf185d commit 1a0bc64
Show file tree
Hide file tree
Showing 6 changed files with 406 additions and 506 deletions.
25 changes: 25 additions & 0 deletions src/H5Dchunk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1114,6 +1114,31 @@ H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo)
}
}

#ifdef H5_HAVE_PARALLEL
/*
* If collective metadata reads are enabled, ensure all ranks
* have the dataset's chunk index open (if it was created) to
* prevent possible metadata inconsistency issues or unintentional
* independent metadata reads later on.
*/
if (H5F_SHARED_HAS_FEATURE(io_info->f_sh, H5FD_FEAT_HAS_MPI) &&
H5F_shared_get_coll_metadata_reads(io_info->f_sh) &&
H5D__chunk_is_space_alloc(&dataset->shared->layout.storage)) {
H5D_chunk_ud_t udata;
hsize_t scaled[H5O_LAYOUT_NDIMS] = {0};

/*
* TODO: Until the dataset chunk index callback structure has
* callbacks for checking if an index is opened and also for
* directly opening the index, the following fake chunk lookup
* serves the purpose of forcing a chunk index open operation
* on all ranks
*/
if (H5D__chunk_lookup(dataset, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to collectively open dataset chunk index");
}
#endif

done:
if (file_space_normalized == true)
if (H5S_hyper_denormalize_offset(dinfo->file_space, old_offset) < 0)
Expand Down
35 changes: 18 additions & 17 deletions src/H5Dio.c
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,10 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info)
dset_info[i].buf.vp = (void *)(((uint8_t *)dset_info[i].buf.vp) + buf_adj);
} /* end if */

/* Set up I/O operation */
if (H5D__dset_ioinfo_init(dset_info[i].dset, &(dset_info[i]), &(store[i])) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up I/O operation");

/* Check if any filters are applied to the dataset */
if (dset_info[i].dset->shared->dcpl_cache.pline.nused > 0)
io_info.filtered_count++;
Expand Down Expand Up @@ -274,10 +278,6 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info)
io_skipped = io_skipped + 1;
} /* end if */
else {
/* Set up I/O operation */
if (H5D__dset_ioinfo_init(dset_info[i].dset, &(dset_info[i]), &(store[i])) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation");

/* Sanity check that space is allocated, if there are elements */
if (dset_info[i].nelmts > 0)
assert(
Expand All @@ -288,22 +288,23 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info)
dset_info[i].dset->shared->dcpl_cache.efl.nused > 0 ||
dset_info[i].dset->shared->layout.type == H5D_COMPACT);

/* Call storage method's I/O initialization routine */
if (dset_info[i].layout_ops.io_init &&
(dset_info[i].layout_ops.io_init)(&io_info, &(dset_info[i])) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info");
dset_info[i].skip_io = false;
io_op_init++;

/* Reset metadata tagging */
H5AC_tag(prev_tag, NULL);
}

/* Call storage method's I/O initialization routine */
if (dset_info[i].layout_ops.io_init &&
(dset_info[i].layout_ops.io_init)(&io_info, &(dset_info[i])) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info");
io_op_init++;

/* Reset metadata tagging */
H5AC_tag(prev_tag, NULL);
} /* end of for loop */

assert(io_op_init + io_skipped == count);
assert(io_op_init == count);

/* If no datasets have I/O, we're done */
if (io_op_init == 0)
if (io_skipped == count)
HGOTO_DONE(SUCCEED);

/* Perform second phase of type info initialization */
Expand Down Expand Up @@ -449,8 +450,8 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info)

done:
/* Shut down the I/O op information */
for (i = 0; i < count; i++)
if (!dset_info[i].skip_io && dset_info[i].layout_ops.io_term &&
for (i = 0; i < io_op_init; i++)
if (dset_info[i].layout_ops.io_term &&
(*dset_info[i].layout_ops.io_term)(&io_info, &(dset_info[i])) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info");

Expand Down Expand Up @@ -875,7 +876,7 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info)

done:
/* Shut down the I/O op information */
for (i = 0; i < count; i++) {
for (i = 0; i < io_op_init; i++) {
assert(!dset_info[i].skip_io);
if (dset_info[i].layout_ops.io_term &&
(*dset_info[i].layout_ops.io_term)(&io_info, &(dset_info[i])) < 0)
Expand Down
Loading

0 comments on commit 1a0bc64

Please sign in to comment.