Skip to content
This repository has been archived by the owner on Feb 26, 2020. It is now read-only.

Commit

Permalink
kmem_cache: Call constructor/destructor on each alloc/free
Browse files Browse the repository at this point in the history
This has a few benefits. First, it fixes a regression that "Rework
generic memory allocation interfaces" appears to have triggered in
splat's slab_reap and slab_age tests. Second, it makes porting code from
Illumos to ZFSOnLinux easier. Third, it has the side effect of making
reclaim from slab caches that specify reclaim functions an order of
magnitude faster. The splat slab_reap test usually took 30 to 40
seconds. With this change, it takes 3 to 4.

Signed-off-by: Richard Yao <ryao@gentoo.org>
  • Loading branch information
ryao committed Oct 13, 2014
1 parent be41b1c commit f61fcf5
Showing 1 changed file with 16 additions and 20 deletions.
36 changes: 16 additions & 20 deletions module/spl/spl-kmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -926,9 +926,6 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
list_add_tail(&sko->sko_list, &sks->sks_free_list);
}

list_for_each_entry(sko, &sks->sks_free_list, sko_list)
if (skc->skc_ctor)
skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
out:
if (rc) {
if (skc->skc_flags & KMC_OFFSLAB)
Expand Down Expand Up @@ -1034,9 +1031,6 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);

if (skc->skc_dtor)
skc->skc_dtor(sko->sko_addr, skc->skc_private);

if (skc->skc_flags & KMC_OFFSLAB)
kv_free(skc, sko->sko_addr, size);
}
Expand Down Expand Up @@ -1138,9 +1132,6 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
SRETURN(-EINVAL);
}

if (skc->skc_ctor)
skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);

*obj = ske->ske_obj;

SRETURN(0);
Expand All @@ -1167,9 +1158,6 @@ spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
if (unlikely(ske == NULL))
SRETURN(-ENOENT);

if (skc->skc_dtor)
skc->skc_dtor(ske->ske_obj, skc->skc_private);

kfree(ske->ske_obj);
kfree(ske);

Expand Down Expand Up @@ -2051,12 +2039,9 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
do {
int lflags = kmem_flags_convert(flags) | __GFP_COMP;
obj = kmem_cache_alloc(slc, lflags);
if (obj && skc->skc_ctor)
skc->skc_ctor(obj, skc->skc_private, flags);
} while ((obj == NULL) && !(flags & KM_NOSLEEP));

atomic_dec(&skc->skc_ref);
SRETURN(obj);
SGOTO(ret, 0);
}

local_irq_disable();
Expand Down Expand Up @@ -2085,12 +2070,20 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
ASSERT(obj);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));

ret:
/* Pre-emptively migrate object to CPU L1 cache */
prefetchw(obj);
if (obj) {
if (obj && skc->skc_ctor)
skc->skc_ctor(obj, skc->skc_private, flags);
else
prefetchw(obj);
}

atomic_dec(&skc->skc_ref);

SRETURN(obj);
}

EXPORT_SYMBOL(spl_kmem_cache_alloc);

/*
Expand All @@ -2110,13 +2103,16 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);

/*
* Run the destructor
*/
if (skc->skc_dtor)
skc->skc_dtor(obj, skc->skc_private);

/*
* Free the object from the Linux underlying Linux slab.
*/
if (skc->skc_flags & KMC_SLAB) {
if (skc->skc_dtor)
skc->skc_dtor(obj, skc->skc_private);

kmem_cache_free(skc->skc_linux_cache, obj);
goto out;
}
Expand Down

0 comments on commit f61fcf5

Please sign in to comment.