diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c index d19eb71f0f69..8b5e5a4ed932 100644 --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@ -468,7 +468,7 @@ static void verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg, uint64_t offset, uint64_t size) { - sublivelist_verify_block_t svb; + sublivelist_verify_block_t svb = {{{0}}}; DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid); DVA_SET_OFFSET(&svb.svb_dva, offset); DVA_SET_ASIZE(&svb.svb_dva, size); diff --git a/contrib/coverity/model.c b/contrib/coverity/model.c index 8e3e83cada19..8b4d14ee22a2 100644 --- a/contrib/coverity/model.c +++ b/contrib/coverity/model.c @@ -24,6 +24,8 @@ #include +#define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */ + #define UMEM_DEFAULT 0x0000 /* normal -- may fail */ #define UMEM_NOFAIL 0x0100 /* Never fails */ @@ -173,7 +175,7 @@ spl_kmem_alloc(size_t sz, int fl, const char *func, int line) if (condition1) __coverity_sleep__(); - if ((fl == 0) || condition0) { + if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) { void *buf = __coverity_alloc__(sz); __coverity_mark_as_uninitialized_buffer__(buf); __coverity_mark_as_afm_allocated__(buf, "spl_kmem_free"); @@ -194,7 +196,7 @@ spl_kmem_zalloc(size_t sz, int fl, const char *func, int line) if (condition1) __coverity_sleep__(); - if ((fl == 0) || condition0) { + if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) { void *buf = __coverity_alloc__(sz); __coverity_writeall0__(buf); __coverity_mark_as_afm_allocated__(buf, "spl_kmem_free"); @@ -276,7 +278,7 @@ spl_vmem_alloc(size_t sz, int fl, const char *func, int line) if (condition1) __coverity_sleep__(); - if ((fl == 0) || condition0) { + if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) { void *buf = __coverity_alloc__(sz); __coverity_mark_as_uninitialized_buffer__(buf); __coverity_mark_as_afm_allocated__(buf, "spl_vmem_free"); @@ -295,7 +297,7 @@ spl_vmem_zalloc(size_t sz, int fl, const char *func, int line) if (condition1) __coverity_sleep__(); - if ((fl == 0) || condition0) { + if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) { void *buf = __coverity_alloc__(sz); __coverity_writeall0__(buf); __coverity_mark_as_afm_allocated__(buf, "spl_vmem_free"); diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 7982d9702896..6e58f7e923ae 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -3323,10 +3323,10 @@ dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, blkptr_t *bp = ((blkptr_t *)abuf->b_data) + P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); - ASSERT(!BP_IS_REDACTED(bp) || + ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode && dsl_dataset_feature_is_active( dpa->dpa_dnode->dn_objset->os_dsl_dataset, - SPA_FEATURE_REDACTED_DATASETS)); + SPA_FEATURE_REDACTED_DATASETS))); if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { arc_buf_destroy(abuf, private); dbuf_prefetch_fini(dpa, B_TRUE); diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index ccb7eb20756d..fbf19d5c3372 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -1586,8 +1586,6 @@ send_merge_thread(void *arg) } range_free(front_ranges[i]); } - if (range == NULL) - range = kmem_zalloc(sizeof (*range), KM_SLEEP); range->eos_marker = B_TRUE; bqueue_enqueue_flush(&smt_arg->q, range, 1); spl_fstrans_unmark(cookie);