From 57407732b9aff49920595cb9bfce9fb723bd16e1 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Fri, 6 Feb 2015 13:37:02 -0800 Subject: [PATCH] Retire zio_bulk_flags Long ago the zio_bulk_flags module parameter was introduced to facilitate debugging and profiling the zio_buf_caches. Today this code works well and there's no compelling reason to keep this functionality. In fact it's preferable to revert this so the code is more consistent with other ZFS implementations. Signed-off-by: Brian Behlendorf Signed-off-by: Ned Bass Issue #3063 --- man/man5/zfs-module-parameters.5 | 11 ----------- module/zfs/zio.c | 11 +++-------- scripts/zpios-survey.sh | 30 +----------------------------- 3 files changed, 4 insertions(+), 48 deletions(-) diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index e0000a23a14e..321b6285cad6 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -1379,17 +1379,6 @@ Max commit bytes to separate log device Default value: \fB1,048,576\fR. .RE -.sp -.ne 2 -.na -\fBzio_bulk_flags\fR (int) -.ad -.RS 12n -Additional flags to pass to bulk buffers -.sp -Default value: \fB0\fR. -.RE - .sp .ne 2 .na diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 9d70b3e5969d..513eb3acab08 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -58,7 +58,6 @@ kmem_cache_t *zio_link_cache; kmem_cache_t *zio_vdev_cache; kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; -int zio_bulk_flags = 0; int zio_delay_max = ZIO_DELAY_MAX; /* @@ -145,6 +144,7 @@ zio_init(void) size_t size = (c + 1) << SPA_MINBLOCKSHIFT; size_t p2 = size; size_t align = 0; + size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; while (p2 & (p2 - 1)) p2 &= p2 - 1; @@ -169,16 +169,14 @@ zio_init(void) if (align != 0) { char name[36]; - int flags = zio_bulk_flags; - (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); zio_buf_cache[c] = kmem_cache_create(name, size, - align, NULL, NULL, NULL, NULL, NULL, flags); + align, NULL, NULL, NULL, NULL, NULL, cflags); (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); zio_data_buf_cache[c] = kmem_cache_create(name, size, align, NULL, NULL, NULL, NULL, - data_alloc_arena, flags); + data_alloc_arena, cflags); } } @@ -3413,9 +3411,6 @@ EXPORT_SYMBOL(zio_handle_device_injection); EXPORT_SYMBOL(zio_handle_label_injection); EXPORT_SYMBOL(zio_type_name); -module_param(zio_bulk_flags, int, 0644); -MODULE_PARM_DESC(zio_bulk_flags, "Additional flags to pass to bulk buffers"); - module_param(zio_delay_max, int, 0644); MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event"); diff --git a/scripts/zpios-survey.sh b/scripts/zpios-survey.sh index cb751b467e6e..78601695ff19 100755 --- a/scripts/zpios-survey.sh +++ b/scripts/zpios-survey.sh @@ -120,40 +120,13 @@ zpios_survey_pending() { tee -a ${ZPIOS_SURVEY_LOG} } -# To avoid memory fragmentation issues our slab implementation can be -# based on a virtual address space. Interestingly, we take a pretty -# substantial performance penalty for this somewhere in the low level -# IO drivers. If we back the slab with kmem pages we see far better -# read performance numbers at the cost of memory fragmention and general -# system instability due to large allocations. This may be because of -# an optimization in the low level drivers due to the contigeous kmem -# based memory. This needs to be explained. The good news here is that -# with zerocopy interfaces added at the DMU layer we could gaurentee -# kmem based memory for a pool of pages. -# -# 0x100 = KMC_KMEM - Force kmem_* based slab -# 0x200 = KMC_VMEM - Force vmem_* based slab -zpios_survey_kmem() { - TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+kmem" - print_header ${TEST_NAME} - - ${ZFS_SH} ${VERBOSE_FLAG} \ - zfs="zio_bulk_flags=0x100" | \ - tee -a ${ZPIOS_SURVEY_LOG} - ${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \ - tee -a ${ZPIOS_SURVEY_LOG} - ${ZFS_SH} -u ${VERBOSE_FLAG} | \ - tee -a ${ZPIOS_SURVEY_LOG} -} - # Apply all possible turning concurrently to get a best case number zpios_survey_all() { TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+all" print_header ${TEST_NAME} ${ZFS_SH} ${VERBOSE_FLAG} \ - zfs="zfs_vdev_max_pending=1024" \ - zfs="zio_bulk_flags=0x100" | \ + zfs="zfs_vdev_max_pending=1024" | \ tee -a ${ZPIOS_SURVEY_LOG} ${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \ -o "--noprefetch --zerocopy" \ @@ -209,7 +182,6 @@ zpios_survey_prefetch zpios_survey_zerocopy zpios_survey_checksum zpios_survey_pending -zpios_survey_kmem zpios_survey_all exit 0