diff --git a/config/kernel-kmem.m4 b/config/kernel-kmem.m4 index 2862299168c1..43f9e72f88d8 100644 --- a/config/kernel-kmem.m4 +++ b/config/kernel-kmem.m4 @@ -80,3 +80,29 @@ AC_DEFUN([ZFS_AC_KERNEL_KVMALLOC], [ AC_MSG_RESULT(no) ]) ]) + +dnl # +dnl # 5.8 API, +dnl # __vmalloc PAGE_KERNEL removal +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL], [ + ZFS_LINUX_TEST_SRC([__vmalloc], [ + #include + #include + ],[ + void *p __attribute__ ((unused)); + + p = __vmalloc(0, GFP_KERNEL, PAGE_KERNEL); + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL], [ + AC_MSG_CHECKING([whether __vmalloc(ptr, flags, pageflags) is available]) + ZFS_LINUX_TEST_RESULT([__vmalloc], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_VMALLOC_PAGE_KERNEL, 1, [__vmalloc page flags exists]) + ],[ + AC_MSG_RESULT(no) + ]) +]) +- \ No newline at end of file diff --git a/config/kernel.m4 b/config/kernel.m4 index 49db6eae1c43..53c6edbdb578 100644 --- a/config/kernel.m4 +++ b/config/kernel.m4 @@ -46,6 +46,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [ ZFS_AC_KERNEL_SRC_USLEEP_RANGE ZFS_AC_KERNEL_SRC_KMEM_CACHE ZFS_AC_KERNEL_SRC_KVMALLOC + ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL ZFS_AC_KERNEL_SRC_WAIT ZFS_AC_KERNEL_SRC_INODE_TIMES ZFS_AC_KERNEL_SRC_INODE_LOCK @@ -165,6 +166,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [ ZFS_AC_KERNEL_USLEEP_RANGE ZFS_AC_KERNEL_KMEM_CACHE ZFS_AC_KERNEL_KVMALLOC + ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL ZFS_AC_KERNEL_WAIT ZFS_AC_KERNEL_INODE_TIMES ZFS_AC_KERNEL_INODE_LOCK diff --git a/include/spl/sys/kmem.h b/include/spl/sys/kmem.h index cc820bda5b85..37837b605068 100644 --- a/include/spl/sys/kmem.h +++ b/include/spl/sys/kmem.h @@ -172,6 +172,15 @@ extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line); extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line); extern void spl_kmem_free(const void *ptr, size_t sz); +/* + * 5.8 API change, pgprot_t argument removed. + */ +#ifdef HAVE_VMALLOC_PAGE_KERNEL +#define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL) +#else +#define spl_vmalloc(size, flags) __vmalloc(size, flags) +#endif + /* * The following functions are only available for internal use. */ diff --git a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c index 1d1a9fc805ce..1dc03a5bebc0 100644 --- a/module/spl/spl-kmem-cache.c +++ b/module/spl/spl-kmem-cache.c @@ -203,23 +203,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags) ASSERT(ISP2(size)); ptr = (void *)__get_free_pages(lflags, get_order(size)); } else { - /* - * GFP_KERNEL allocations can safely use kvmalloc which may - * improve performance by avoiding a) high latency caused by - * vmalloc's on-access allocation, b) performance loss due to - * MMU memory address mapping and c) vmalloc locking overhead. - * This has the side-effect that the slab statistics will - * incorrectly report this as a vmem allocation, but that is - * purely cosmetic. - * - * For non-GFP_KERNEL allocations we stick to __vmalloc. - */ - if ((lflags & GFP_KERNEL) == GFP_KERNEL) { - ptr = spl_kvmalloc(size, lflags); - } else { - ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, - PAGE_KERNEL); - } + ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); } /* Resulting allocated memory will be page aligned */ @@ -247,7 +231,7 @@ kv_free(spl_kmem_cache_t *skc, void *ptr, int size) ASSERT(ISP2(size)); free_pages((unsigned long)ptr, get_order(size)); } else { - spl_kmem_free_impl(ptr, size); + vfree(ptr, size); } } @@ -1258,7 +1242,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) * allocation. * * However, this can't be applied to KVM_VMEM due to a bug that - * __vmalloc() doesn't honor gfp flags in page table allocation. + * spl_vmalloc() doesn't honor gfp flags in page table allocation. */ if (!(skc->skc_flags & KMC_VMEM)) { rc = __spl_cache_grow(skc, flags | KM_NOSLEEP); diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c index 7765acb5957b..128b3f42960a 100644 --- a/module/spl/spl-kmem.c +++ b/module/spl/spl-kmem.c @@ -26,7 +26,6 @@ #include #include #include -#include /* * As a general rule kmem_alloc() allocations should be small, preferably @@ -184,11 +183,11 @@ spl_kvmalloc(size_t size, gfp_t lflags) /* * We first try kmalloc - even for big sizes - and fall back to - * __vmalloc if that fails. + * spl_vmalloc if that fails. * * For non-GFP_KERNEL allocations we always stick to kmalloc_node, * and fail when kmalloc is not successful (returns NULL). - * We cannot fall back to __vmalloc in this case because __vmalloc + * We cannot fall back to spl_vmalloc in this case because spl_vmalloc * internally uses GPF_KERNEL allocations. */ void *ptr = kmalloc_node(size, kmalloc_lflags, NUMA_NO_NODE); @@ -197,7 +196,7 @@ spl_kvmalloc(size_t size, gfp_t lflags) return (ptr); } - return (__vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL)); + return (spl_vmalloc(size, lflags | __GFP_HIGHMEM)); } /* @@ -238,16 +237,15 @@ spl_kmem_alloc_impl(size_t size, int flags, int node) * kmem_zalloc() callers. * * For vmem_alloc() and vmem_zalloc() callers it is permissible - * to use __vmalloc(). However, in general use of __vmalloc() - * is strongly discouraged because a global lock must be - * acquired. Contention on this lock can significantly + * to use spl_vmalloc(). However, in general use of + * spl_vmalloc() is strongly discouraged because a global lock + * must be acquired. Contention on this lock can significantly * impact performance so frequently manipulating the virtual * address space is strongly discouraged. */ if (size > spl_kmem_alloc_max) { if (flags & KM_VMEM) { - ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, - PAGE_KERNEL); + ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); } else { return (NULL); }