Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix deadlocks in DMU #726

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion include/sys/zfs_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,6 @@ typedef struct taskq_ent {
#define TASKQ_DYNAMIC 0x0004 /* Use dynamic thread scheduling */
#define TASKQ_THREADS_CPU_PCT 0x0008 /* Scale # threads by # cpus */
#define TASKQ_DC_BATCH 0x0010 /* Mark threads as batch */
#define TASKQ_NORECLAIM 0x0020 /* Disable direct memory reclaim */

#define TQ_SLEEP KM_SLEEP /* Can block for memory */
#define TQ_NOSLEEP KM_NOSLEEP /* cannot block for memory; may fail */
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/bplist.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ bplist_destroy(bplist_t *bpl)
void
bplist_append(bplist_t *bpl, const blkptr_t *bp)
{
bplist_entry_t *bpe = kmem_alloc(sizeof (*bpe), KM_SLEEP);
bplist_entry_t *bpe = kmem_alloc(sizeof (*bpe), KM_PUSHPAGE);

mutex_enter(&bpl->bpl_lock);
bpe->bpe_blk = *bp;
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ dbuf_init(void)
#if defined(_KERNEL) && defined(HAVE_SPL)
/* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel */
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
#else
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
#endif
Expand Down Expand Up @@ -1719,7 +1719,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);

db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE);

db->db_objset = os;
db->db.db_object = dn->dn_object;
Expand Down Expand Up @@ -2019,7 +2019,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
int error;

dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
__dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);

error = __dbuf_hold_impl(dh);
Expand Down
12 changes: 6 additions & 6 deletions module/zfs/dmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
}
nblks = 1;
}
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP | KM_NODEBUG);
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_PUSHPAGE | KM_NODEBUG);

if (dn->dn_objset->os_dsl_dataset)
dp = dn->dn_objset->os_dsl_dataset->ds_dir->dd_pool;
Expand Down Expand Up @@ -863,11 +863,11 @@ dmu_xuio_init(xuio_t *xuio, int nblk)
uio_t *uio = &xuio->xu_uio;

uio->uio_iovcnt = nblk;
uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_PUSHPAGE);

priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_PUSHPAGE);
priv->cnt = nblk;
priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_PUSHPAGE);
priv->iovp = uio->uio_iov;
XUIO_XUZC_PRIV(xuio) = priv;

Expand Down Expand Up @@ -1431,7 +1431,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
return (EIO); /* Make zl_get_data do txg_waited_synced() */
}

dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_PUSHPAGE);
dsa->dsa_dr = NULL;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
Expand Down Expand Up @@ -1555,7 +1555,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
mutex_exit(&db->db_mtx);

dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_PUSHPAGE);
dsa->dsa_dr = dr;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dmu_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ static kstat_t *dmu_tx_ksp;
dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t *dd)
{
dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_PUSHPAGE);
tx->tx_dir = dd;
if (dd)
tx->tx_pool = dd->dd_pool;
Expand Down Expand Up @@ -141,7 +141,7 @@ dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
}
}

txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_PUSHPAGE);
txh->txh_tx = tx;
txh->txh_dnode = dn;
#ifdef DEBUG_DMU_TX
Expand Down Expand Up @@ -1241,7 +1241,7 @@ dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
{
dmu_tx_callback_t *dcb;

dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_PUSHPAGE);

dcb->dcb_func = func;
dcb->dcb_data = data;
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/dmu_zfetch.c
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
if (cur_streams >= max_streams) {
return;
}
newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
newstream = kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE);
}

newstream->zst_offset = zst.zst_offset;
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dnode.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_PUSHPAGE);

ASSERT(!POINTER_IS_VALID(dn->dn_objset));
dn->dn_moved = 0;
Expand Down Expand Up @@ -1491,7 +1491,7 @@ dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
} else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
/* clear a chunk out of this range */
free_range_t *new_rp =
kmem_alloc(sizeof (free_range_t), KM_SLEEP);
kmem_alloc(sizeof (free_range_t), KM_PUSHPAGE);

new_rp->fr_blkid = endblk;
new_rp->fr_nblks = fr_endblk - endblk;
Expand Down Expand Up @@ -1669,7 +1669,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];

/* Add new range to dn_ranges */
rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
rp = kmem_alloc(sizeof (free_range_t), KM_PUSHPAGE);
rp->fr_blkid = blkid;
rp->fr_nblks = nblks;
found = avl_find(tree, rp, &where);
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/lzjb.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
uint16_t *hp;
uint16_t *lempel;

lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_PUSHPAGE);
while (src < (uchar_t *)s_start + s_len) {
if ((copymask <<= 1) == (1 << NBBY)) {
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
Expand Down
10 changes: 5 additions & 5 deletions module/zfs/metaslab.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
{
metaslab_class_t *mc;

mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
mc = kmem_zalloc(sizeof (metaslab_class_t), KM_PUSHPAGE);

mc->mc_spa = spa;
mc->mc_rotor = NULL;
Expand Down Expand Up @@ -217,7 +217,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
{
metaslab_group_t *mg;

mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
mg = kmem_zalloc(sizeof (metaslab_group_t), KM_PUSHPAGE);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
Expand Down Expand Up @@ -422,9 +422,9 @@ metaslab_pp_load(space_map_t *sm)
space_seg_t *ss;

ASSERT(sm->sm_ppd == NULL);
sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_PUSHPAGE);

sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_PUSHPAGE);
avl_create(sm->sm_pp_root, metaslab_segsize_compare,
sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));

Expand Down Expand Up @@ -725,7 +725,7 @@ metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
vdev_t *vd = mg->mg_vd;
metaslab_t *msp;

msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
msp = kmem_zalloc(sizeof (metaslab_t), KM_PUSHPAGE);
mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);

msp->ms_smo_syncing = *smo;
Expand Down
Loading