From e028b43c9345437ee05d85396c1dbb9b35b50b9d Mon Sep 17 00:00:00 2001 From: Tom Caputi Date: Tue, 14 Jan 2020 15:25:20 -0500 Subject: [PATCH] Fix errata #4 handling for resuming streams Currently, the handling for errata #4 has two issues which allow the checks for this issue to be bypassed using resumable sends. The first issue is that drc->drc_fromsnapobj is not set in the resuming code as it is in the non-resuming code. This causes dsl_crypto_recv_key_check() to skip its checks for the from_ivset_guid. The second issue is that resumable sends do not clean up their on-disk state if they fail the checks in dmu_recv_stream() that happen before any data is received. As a result of these two bugs, a user can attempt a resumable send of a dataset without a from_ivset_guid. This will fail the initial dmu_recv_stream() checks, leaving a valid resume state. The send can then be resumed, which skips those checks, allowing the receive to be completed. This commit fixes these issues by setting drc->drc_fromsnapobj in the resuming receive path and by ensuring that resumablereceives are properly cleaned up if they fail the initial dmu_recv_stream() checks. Reviewed-by: Brian Behlendorf Signed-off-by: Tom Caputi Closes #9818 Closes #9829 --- include/sys/dmu_recv.h | 1 + module/zfs/dmu_recv.c | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/sys/dmu_recv.h b/include/sys/dmu_recv.h index ffa89249d31..0840581db10 100644 --- a/include/sys/dmu_recv.h +++ b/include/sys/dmu_recv.h @@ -46,6 +46,7 @@ typedef struct dmu_recv_cookie { boolean_t drc_byteswap; boolean_t drc_force; boolean_t drc_resumable; + boolean_t drc_should_save; boolean_t drc_raw; boolean_t drc_clone; boolean_t drc_spill; diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index 2324e8e87ba..788eef02279 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -579,6 +579,7 @@ dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) dmu_recv_begin_arg_t *drba = arg; dsl_pool_t *dp = dmu_tx_pool(tx); struct drr_begin *drrb = drba->drba_cookie->drc_drrb; + dmu_recv_cookie_t *drc = drba->drba_cookie; int error; ds_hold_flags_t dsflags = 0; uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); @@ -694,6 +695,9 @@ dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) return (SET_ERROR(EINVAL)); } + if (ds->ds_prev != NULL) + drc->drc_fromsnapobj = ds->ds_prev->ds_object; + dsl_dataset_rele_flags(ds, dsflags, FTAG); return (0); } @@ -747,6 +751,7 @@ dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) rrw_exit(&ds->ds_bp_rwlock, FTAG); drba->drba_cookie->drc_ds = ds; + drba->drba_cookie->drc_should_save = B_TRUE; spa_history_log_internal_ds(ds, "resume receive", tx, ""); } @@ -1868,7 +1873,8 @@ dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) ds->ds_objset->os_raw_receive = B_FALSE; rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); - if (drc->drc_resumable && !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) { + if (drc->drc_resumable && drc->drc_should_save && + !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) { rrw_exit(&ds->ds_bp_rwlock, FTAG); dsl_dataset_disown(ds, dsflags, dmu_recv_tag); } else { @@ -2625,6 +2631,13 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp, goto out; } + /* + * If we failed before this point we will clean up any new resume + * state that was created. Now that we've gotten past the initial + * checks we are ok to retain that resume state. + */ + drc->drc_should_save = B_TRUE; + (void) bqueue_init(&rwa->q, MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize), offsetof(struct receive_record_arg, node));