Skip to content

Commit

Permalink
Linux 3.12 compat: shrinker semantics
Browse files Browse the repository at this point in the history
The new shrinker API as of Linux 3.12 modifies "struct shrinker" by
replacing the @shrink callback with the pair of @count_objects and
@scan_objects.  It also requires the return value of @count_objects to
return the number of objects actually freed whereas the previous @shrink
callback returned the number of remaining freeable objects.

This patch adds support for the new @scan_objects return value semantics
and updates the splat shrinker test case appropriately.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Closes openzfs#403

Conflicts:
	module/spl/spl-kmem.c
	module/splat/splat-linux.c
  • Loading branch information
dweeezil authored and ryao committed Feb 19, 2015
1 parent b8c5997 commit e60b31b
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 24 deletions.
7 changes: 7 additions & 0 deletions include/linux/mm_compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -311,4 +311,11 @@ fn ## _scan_objects(struct shrinker *shrink, struct shrink_control *sc) \
#error "Unknown shrinker callback"
#endif

#if defined(HAVE_SPLIT_SHRINKER_CALLBACK)
typedef unsigned long spl_shrinker_t;
#else
typedef int spl_shrinker_t;
#define SHRINK_STOP (-1)
#endif

#endif /* SPL_MM_COMPAT_H */
51 changes: 33 additions & 18 deletions module/spl/spl-kmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -2126,14 +2126,24 @@ EXPORT_SYMBOL(spl_kmem_cache_free);
* report that they contain unused objects. Because of this we only
* register one shrinker function in the shim layer for all slab caches.
* We always attempt to shrink all caches when this generic shrinker
* is called. The shrinker should return the number of free objects
* in the cache when called with nr_to_scan == 0 but not attempt to
* free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
* objects should be freed, which differs from Solaris semantics.
* Solaris semantics are to free all available objects which may (and
* probably will) be more objects than the requested nr_to_scan.
* is called.
*
* If sc->nr_to_scan is zero, the caller is requesting a query of the
* number of objects which can potentially be freed. If it is nonzero,
* the request is to free that many objects.
*
* Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
* in struct shrinker and also require the shrinker to return the number
* of objects freed.
*
* Older kernels require the shrinker to return the number of freeable
* objects following the freeing of nr_to_free.
*
* Linux semantics differ from those under Solaris, which are to
* free all available objects which may (and probably will) be more
* objects than the requested nr_to_scan.
*/
static int
static spl_shrinker_t
__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
struct shrink_control *sc)
{
Expand All @@ -2142,17 +2152,22 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,

down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
if (sc->nr_to_scan)
if (sc->nr_to_scan) {
#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
uint64_t oldalloc = skc->skc_obj_alloc;
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));

/*
* Presume everything alloc'ed is reclaimable, this ensures
* we are called again with nr_to_scan > 0 so can try and
* reclaim. The exact number is not important either so
* we forgo taking this already highly contented lock.
*/
alloc += skc->skc_obj_alloc;
if (oldalloc > skc->skc_obj_alloc)
alloc += oldalloc - skc->skc_obj_alloc;
#else
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
alloc += skc->skc_obj_alloc;
#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
} else {
/* Request to query number of freeable objects */
alloc += skc->skc_obj_alloc;
}
}
up_read(&spl_kmem_cache_sem);

Expand All @@ -2163,7 +2178,7 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
* system to thrash.
*/
if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
return (-1);
return (SHRINK_STOP);

return MAX((alloc * sysctl_vfs_cache_pressure) / 100, 0);
}
Expand Down Expand Up @@ -2274,7 +2289,7 @@ spl_kmem_reap(void)
sc.nr_to_scan = KMC_REAP_CHUNK;
sc.gfp_mask = GFP_KERNEL;

__spl_kmem_cache_generic_shrinker(NULL, &sc);
(void) __spl_kmem_cache_generic_shrinker(NULL, &sc);
}
EXPORT_SYMBOL(spl_kmem_reap);

Expand Down
22 changes: 16 additions & 6 deletions module/splat/splat-linux.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,13 @@ SPL_SHRINKER_DECLARE(splat_linux_shrinker, splat_linux_shrinker_fn, 1);
static unsigned long splat_linux_shrinker_size = 0;
static struct file *splat_linux_shrinker_file = NULL;

static int
static spl_shrinker_t
__splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
{
static int failsafe = 0;
static unsigned long last_splat_linux_shrinker_size = 0;
unsigned long size;
spl_shrinker_t count;

/*
* shrinker_size can only decrease or stay the same between callbacks
Expand All @@ -114,13 +116,21 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
last_splat_linux_shrinker_size = splat_linux_shrinker_size;

if (sc->nr_to_scan) {
splat_linux_shrinker_size = splat_linux_shrinker_size -
MIN(sc->nr_to_scan, splat_linux_shrinker_size);
size = MIN(sc->nr_to_scan, splat_linux_shrinker_size);
splat_linux_shrinker_size -= size;

splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST3_NAME,
"Reclaimed %lu objects, size now %lu\n",
sc->nr_to_scan, splat_linux_shrinker_size);
size, splat_linux_shrinker_size);

#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
count = size;
#else
count = splat_linux_shrinker_size;
#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */

} else {
count = splat_linux_shrinker_size;
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST3_NAME,
"Cache size is %lu\n", splat_linux_shrinker_size);
}
Expand All @@ -130,7 +140,7 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST3_NAME,
"Far more calls than expected (%d), size now %lu\n",
failsafe, splat_linux_shrinker_size);
return -1;
return (SHRINK_STOP);
} else {
/*
* We only increment failsafe if it doesn't trigger. This
Expand All @@ -142,7 +152,7 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
/* Shrinker has run, so signal back to test. */
wake_up(&shrinker_wait);

return (int)splat_linux_shrinker_size;
return (count);
}

SPL_SHRINKER_CALLBACK_WRAPPER(splat_linux_shrinker_fn);
Expand Down