diff --git a/.github/codecov.yml b/.github/codecov.yml
new file mode 100644
index 000000000000..f36be39cb2e8
--- /dev/null
+++ b/.github/codecov.yml
@@ -0,0 +1,9 @@
+codecov:
+ strict_yaml_branch: master # only use the latest copy on master branch
+
+comment: off
+
+coverage:
+ status:
+ project: off
+ patch: off
diff --git a/.gitignore b/.gitignore
index 318a4343414d..e664bdd67bfa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,6 +19,8 @@
*.mod.c
*~
*.swp
+*.gcno
+*.gcda
.deps
.libs
.dirstamp
diff --git a/Makefile.am b/Makefile.am
index 508d3f40e876..b539ff30f7a1 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -23,16 +23,18 @@ EXTRA_DIST = autogen.sh copy-builtin
EXTRA_DIST += config/config.awk config/rpm.am config/deb.am config/tgz.am
EXTRA_DIST += META DISCLAIMER COPYRIGHT README.markdown OPENSOLARIS.LICENSE
+@CODE_COVERAGE_RULES@
+
distclean-local::
-$(RM) -R autom4te*.cache
-find . \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \
-o -name .pc -o -name .hg -o -name .git \) -prune -o \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
- -o -name '.*.rej' -o -name '.script-config' -o -size 0 \
- -o -name '*%' -o -name '.*.cmd' -o -name 'core' \
- -o -name 'Makefile' -o -name 'Module.symvers' \
- -o -name '*.order' -o -name '*.markers' \) \
+ -o -name '.*.rej' -o -size 0 -o -name '*%' -o -name '.*.cmd' \
+ -o -name 'core' -o -name 'Makefile' -o -name 'Module.symvers' \
+ -o -name '*.order' -o -name '*.markers' -o -name '*.gcda' \
+ -o -name '*.gcno' \) \
-type f -print | xargs $(RM)
dist-hook:
diff --git a/cmd/arcstat/arcstat.py b/cmd/arcstat/arcstat.py
index aa54ee87a75d..85c83ccc442b 100755
--- a/cmd/arcstat/arcstat.py
+++ b/cmd/arcstat/arcstat.py
@@ -280,7 +280,7 @@ def init():
"outfile",
"help",
"verbose",
- "seperator",
+ "separator",
"columns"
]
)
@@ -299,7 +299,7 @@ def init():
hflag = True
if opt in ('-v', '--verbose'):
vflag = True
- if opt in ('-s', '--seperator'):
+ if opt in ('-s', '--separator'):
sep = arg
i += 1
if opt in ('-f', '--columns'):
diff --git a/cmd/dbufstat/dbufstat.py b/cmd/dbufstat/dbufstat.py
index dda0a143f783..42bb0c7c7c3e 100755
--- a/cmd/dbufstat/dbufstat.py
+++ b/cmd/dbufstat/dbufstat.py
@@ -474,7 +474,7 @@ def main():
"help",
"infile",
"outfile",
- "seperator",
+ "separator",
"types",
"verbose",
"extended"
@@ -499,7 +499,7 @@ def main():
ofile = arg
if opt in ('-r', '--raw'):
raw += 1
- if opt in ('-s', '--seperator'):
+ if opt in ('-s', '--separator'):
sep = arg
if opt in ('-t', '--types'):
tflag = True
diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c
index 1097501e89cc..17a0ae2516db 100644
--- a/cmd/zdb/zdb.c
+++ b/cmd/zdb/zdb.c
@@ -2716,10 +2716,6 @@ dump_label(const char *dev)
exit(1);
}
- if (ioctl(fd, BLKFLSBUF) != 0)
- (void) printf("failed to invalidate cache '%s' : %s\n", path,
- strerror(errno));
-
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
@@ -2727,6 +2723,10 @@ dump_label(const char *dev)
exit(1);
}
+ if (S_ISBLK(statbuf.st_mode) && ioctl(fd, BLKFLSBUF) != 0)
+ (void) printf("failed to invalidate cache '%s' : %s\n", path,
+ strerror(errno));
+
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
@@ -3313,7 +3313,7 @@ dump_block_stats(spa_t *spa)
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
- int e, c;
+ int e, c, err;
bp_embedded_type_t i;
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
@@ -3354,7 +3354,7 @@ dump_block_stats(spa_t *spa)
zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
- zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
+ err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
/*
* If we've traversed the data blocks then we need to wait for those
@@ -3370,6 +3370,12 @@ dump_block_stats(spa_t *spa)
}
}
+ /*
+ * Done after zio_wait() since zcb_haderrors is modified in
+ * zdb_blkptr_done()
+ */
+ zcb.zcb_haderrors |= err;
+
if (zcb.zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
@@ -3889,13 +3895,6 @@ zdb_vdev_lookup(vdev_t *vdev, char *path)
return (NULL);
}
-/* ARGSUSED */
-static int
-random_get_pseudo_bytes_cb(void *buf, size_t len, void *unused)
-{
- return (random_get_pseudo_bytes(buf, len));
-}
-
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
@@ -4058,17 +4057,8 @@ zdb_read_block(char *thing, spa_t *spa)
* every decompress function at every inflated blocksize.
*/
enum zio_compress c;
- void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
- abd_copy_to_buf(pbuf2, pabd, psize);
-
- VERIFY0(abd_iterate_func(pabd, psize, SPA_MAXBLOCKSIZE - psize,
- random_get_pseudo_bytes_cb, NULL));
-
- VERIFY0(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize,
- SPA_MAXBLOCKSIZE - psize));
-
/*
* XXX - On the one hand, with SPA_MAXBLOCKSIZE at 16MB,
* this could take a while and we should let the user know
@@ -4078,13 +4068,29 @@ zdb_read_block(char *thing, spa_t *spa)
for (lsize = psize + SPA_MINBLOCKSIZE;
lsize <= SPA_MAXBLOCKSIZE; lsize += SPA_MINBLOCKSIZE) {
for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
+ /*
+ * ZLE can easily decompress non zle stream.
+ * So have an option to disable it.
+ */
+ if (c == ZIO_COMPRESS_ZLE &&
+ getenv("ZDB_NO_ZLE"))
+ continue;
+
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize, (u_longlong_t)lsize,
zio_compress_table[c].ci_name);
+
+ /*
+ * We randomize lbuf2, and decompress to both
+ * lbuf and lbuf2. This way, we will know if
+ * decompression fill exactly to lsize.
+ */
+ VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
+
if (zio_decompress_data(c, pabd,
lbuf, psize, lsize) == 0 &&
- zio_decompress_data_buf(c, pbuf2,
+ zio_decompress_data(c, pabd,
lbuf2, psize, lsize) == 0 &&
bcmp(lbuf, lbuf2, lsize) == 0)
break;
@@ -4092,11 +4098,9 @@ zdb_read_block(char *thing, spa_t *spa)
if (c != ZIO_COMPRESS_FUNCTIONS)
break;
}
-
- umem_free(pbuf2, SPA_MAXBLOCKSIZE);
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
- if (lsize <= psize) {
+ if (lsize > SPA_MAXBLOCKSIZE) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
@@ -4135,11 +4139,12 @@ zdb_embedded_block(char *thing)
{
blkptr_t bp;
unsigned long long *words = (void *)&bp;
- char buf[SPA_MAXBLOCKSIZE];
+ char *buf;
int err;
- memset(&bp, 0, sizeof (blkptr_t));
+ buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
+ bzero(&bp, sizeof (bp));
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
@@ -4157,6 +4162,7 @@ zdb_embedded_block(char *thing)
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
+ umem_free(buf, SPA_MAXBLOCKSIZE);
}
int
@@ -4171,7 +4177,7 @@ main(int argc, char **argv)
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
- char *target;
+ char *target, *target_pool;
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int flags = ZFS_IMPORT_MISSING_LOG;
@@ -4374,6 +4380,20 @@ main(int argc, char **argv)
error = 0;
target = argv[0];
+ if (strpbrk(target, "/@") != NULL) {
+ size_t targetlen;
+
+ target_pool = strdup(target);
+ *strpbrk(target_pool, "/@") = '\0';
+
+ target_is_spa = B_FALSE;
+ targetlen = strlen(target);
+ if (targetlen && target[targetlen - 1] == '/')
+ target[targetlen - 1] = '\0';
+ } else {
+ target_pool = target;
+ }
+
if (dump_opt['e']) {
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
@@ -4382,8 +4402,10 @@ main(int argc, char **argv)
args.path = searchdirs;
args.can_be_active = B_TRUE;
- error = zpool_tryimport(g_zfs, target, &cfg, &args);
+ error = zpool_tryimport(g_zfs, target_pool, &cfg, &args);
+
if (error == 0) {
+
if (nvlist_add_nvlist(cfg,
ZPOOL_REWIND_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
@@ -4398,19 +4420,13 @@ main(int argc, char **argv)
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
- error = spa_import(target, cfg, NULL,
+ error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
- if (strpbrk(target, "/@") != NULL) {
- size_t targetlen;
-
- target_is_spa = B_FALSE;
- targetlen = strlen(target);
- if (targetlen && target[targetlen - 1] == '/')
- target[targetlen - 1] = '\0';
- }
+ if (target_pool != target)
+ free(target_pool);
if (error == 0) {
if (target_is_spa || dump_opt['R']) {
diff --git a/cmd/zed/Makefile.am b/cmd/zed/Makefile.am
index 97733a5125d6..ee44898cd6d7 100644
--- a/cmd/zed/Makefile.am
+++ b/cmd/zed/Makefile.am
@@ -69,7 +69,8 @@ dist_zedexec_SCRIPTS = \
zed.d/statechange-notify.sh \
zed.d/vdev_clear-led.sh \
zed.d/vdev_attach-led.sh \
- zed.d/pool_import-led.sh
+ zed.d/pool_import-led.sh \
+ zed.d/resilver_finish-start-scrub.sh
zedconfdefaults = \
all-syslog.sh \
@@ -80,7 +81,8 @@ zedconfdefaults = \
statechange-notify.sh \
vdev_clear-led.sh \
vdev_attach-led.sh \
- pool_import-led.sh
+ pool_import-led.sh \
+ resilver_finish-start-scrub.sh
install-data-hook:
$(MKDIR_P) "$(DESTDIR)$(zedconfdir)"
diff --git a/cmd/zed/zed.d/resilver_finish-start-scrub.sh b/cmd/zed/zed.d/resilver_finish-start-scrub.sh
new file mode 100755
index 000000000000..6f9c0b309467
--- /dev/null
+++ b/cmd/zed/zed.d/resilver_finish-start-scrub.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+# resilver_finish-start-scrub.sh
+# Run a scrub after a resilver
+#
+# Exit codes:
+# 1: Internal error
+# 2: Script wasn't enabled in zed.rc
+[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
+. "${ZED_ZEDLET_DIR}/zed-functions.sh"
+
+[ "${ZED_SCRUB_AFTER_RESILVER}" = "1" ] || exit 2
+[ -n "${ZEVENT_POOL}" ] || exit 1
+[ -n "${ZEVENT_SUBCLASS}" ] || exit 1
+zed_check_cmd "${ZPOOL}" || exit 1
+
+zed_log_msg "Starting scrub after resilver on ${ZEVENT_POOL}"
+"${ZPOOL}" scrub "${ZEVENT_POOL}"
diff --git a/cmd/zed/zed.d/zed.rc b/cmd/zed/zed.d/zed.rc
index a1dd33704db8..8b0e476d5a1a 100644
--- a/cmd/zed/zed.d/zed.rc
+++ b/cmd/zed/zed.d/zed.rc
@@ -86,6 +86,9 @@
#
ZED_USE_ENCLOSURE_LEDS=1
+##
+# Run a scrub after every resilver
+#ZED_SCRUB_AFTER_RESILVER=1
##
# The syslog priority (e.g., specified as a "facility.level" pair).
diff --git a/cmd/zed/zed_conf.c b/cmd/zed/zed_conf.c
index 5b27f1e4f1f2..86671369c19e 100644
--- a/cmd/zed/zed_conf.c
+++ b/cmd/zed/zed_conf.c
@@ -155,6 +155,8 @@ _zed_conf_display_help(const char *prog, int got_err)
"Run daemon in the foreground.");
fprintf(fp, "%*c%*s %s\n", w1, 0x20, -w2, "-M",
"Lock all pages in memory.");
+ fprintf(fp, "%*c%*s %s\n", w1, 0x20, -w2, "-P",
+ "$PATH for ZED to use (only used by ZTS).");
fprintf(fp, "%*c%*s %s\n", w1, 0x20, -w2, "-Z",
"Zero state file.");
fprintf(fp, "\n");
@@ -247,7 +249,7 @@ _zed_conf_parse_path(char **resultp, const char *path)
void
zed_conf_parse_opts(struct zed_conf *zcp, int argc, char **argv)
{
- const char * const opts = ":hLVc:d:p:s:vfFMZ";
+ const char * const opts = ":hLVc:d:p:P:s:vfFMZ";
int opt;
if (!zcp || !argv || !argv[0])
@@ -275,6 +277,9 @@ zed_conf_parse_opts(struct zed_conf *zcp, int argc, char **argv)
case 'p':
_zed_conf_parse_path(&zcp->pid_file, optarg);
break;
+ case 'P':
+ _zed_conf_parse_path(&zcp->path, optarg);
+ break;
case 's':
_zed_conf_parse_path(&zcp->state_file, optarg);
break;
diff --git a/cmd/zed/zed_conf.h b/cmd/zed/zed_conf.h
index 2bc63413425b..7d6b63b1d7cd 100644
--- a/cmd/zed/zed_conf.h
+++ b/cmd/zed/zed_conf.h
@@ -37,6 +37,7 @@ struct zed_conf {
int state_fd; /* fd to state file */
libzfs_handle_t *zfs_hdl; /* handle to libzfs */
int zevent_fd; /* fd for access to zevents */
+ char *path; /* custom $PATH for zedlets to use */
};
struct zed_conf *zed_conf_create(void);
diff --git a/cmd/zed/zed_event.c b/cmd/zed/zed_event.c
index 390235019b36..2a7ff16fd38e 100644
--- a/cmd/zed/zed_event.c
+++ b/cmd/zed/zed_event.c
@@ -733,12 +733,14 @@ _zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
/*
* Restrict various environment variables to safe and sane values
- * when constructing the environment for the child process.
+ * when constructing the environment for the child process, unless
+ * we're running with a custom $PATH (like under the ZFS test suite).
*
* Reference: Secure Programming Cookbook by Viega & Messier, Section 1.1.
*/
static void
-_zed_event_add_env_restrict(uint64_t eid, zed_strings_t *zsp)
+_zed_event_add_env_restrict(uint64_t eid, zed_strings_t *zsp,
+ const char *path)
{
const char *env_restrict[][2] = {
{ "IFS", " \t\n" },
@@ -753,11 +755,35 @@ _zed_event_add_env_restrict(uint64_t eid, zed_strings_t *zsp)
{ "ZFS_RELEASE", ZFS_META_RELEASE },
{ NULL, NULL }
};
+
+ /*
+ * If we have a custom $PATH, use the default ZFS binary locations
+ * instead of the hard-coded ones.
+ */
+ const char *env_path[][2] = {
+ { "IFS", " \t\n" },
+ { "PATH", NULL }, /* $PATH copied in later on */
+ { "ZDB", "zdb" },
+ { "ZED", "zed" },
+ { "ZFS", "zfs" },
+ { "ZINJECT", "zinject" },
+ { "ZPOOL", "zpool" },
+ { "ZFS_ALIAS", ZFS_META_ALIAS },
+ { "ZFS_VERSION", ZFS_META_VERSION },
+ { "ZFS_RELEASE", ZFS_META_RELEASE },
+ { NULL, NULL }
+ };
const char *(*pa)[2];
assert(zsp != NULL);
- for (pa = env_restrict; *(*pa); pa++) {
+ pa = path != NULL ? env_path : env_restrict;
+
+ for (; *(*pa); pa++) {
+ /* Use our custom $PATH if we have one */
+ if (path != NULL && strcmp((*pa)[0], "PATH") == 0)
+ (*pa)[1] = path;
+
_zed_event_add_var(eid, zsp, NULL, (*pa)[0], "%s", (*pa)[1]);
}
}
@@ -902,7 +928,7 @@ zed_event_service(struct zed_conf *zcp)
while ((nvp = nvlist_next_nvpair(nvl, nvp)))
_zed_event_add_nvpair(eid, zsp, nvp);
- _zed_event_add_env_restrict(eid, zsp);
+ _zed_event_add_env_restrict(eid, zsp, zcp->path);
_zed_event_add_env_preserve(eid, zsp);
_zed_event_add_var(eid, zsp, ZED_VAR_PREFIX, "PID",
diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c
index e8fe6a9fa9bf..f57df8581f3c 100644
--- a/cmd/zfs/zfs_main.c
+++ b/cmd/zfs/zfs_main.c
@@ -6072,7 +6072,7 @@ share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Contains partially-completed state from "
- "\"zfs receive -r\", which can be resumed with "
+ "\"zfs receive -s\", which can be resumed with "
"\"zfs send -t\"\n"),
cmdname, zfs_get_name(zhp));
return (1);
diff --git a/cmd/zpool/Makefile.am b/cmd/zpool/Makefile.am
index 6eff1d143202..d07f8d616534 100644
--- a/cmd/zpool/Makefile.am
+++ b/cmd/zpool/Makefile.am
@@ -60,9 +60,15 @@ dist_zpoolexec_SCRIPTS = \
zpool.d/pend_sec \
zpool.d/off_ucor \
zpool.d/ata_err \
+ zpool.d/nvme_err \
zpool.d/pwr_cyc \
zpool.d/upath \
- zpool.d/vendor
+ zpool.d/vendor \
+ zpool.d/smart_test \
+ zpool.d/test_type \
+ zpool.d/test_status \
+ zpool.d/test_progress \
+ zpool.d/test_ended
zpoolconfdefaults = \
enc \
@@ -98,9 +104,15 @@ zpoolconfdefaults = \
pend_sec \
off_ucor \
ata_err \
+ nvme_err \
pwr_cyc \
upath \
- vendor
+ vendor \
+ smart_test \
+ test_type \
+ test_status \
+ test_progress \
+ test_ended
install-data-hook:
$(MKDIR_P) "$(DESTDIR)$(zpoolconfdir)"
diff --git a/cmd/zpool/zpool.d/nvme_err b/cmd/zpool/zpool.d/nvme_err
new file mode 120000
index 000000000000..94f22861f0ce
--- /dev/null
+++ b/cmd/zpool/zpool.d/nvme_err
@@ -0,0 +1 @@
+smart
\ No newline at end of file
diff --git a/cmd/zpool/zpool.d/smart b/cmd/zpool/zpool.d/smart
index 3721f30edd24..64b5f6e4e4bc 100755
--- a/cmd/zpool/zpool.d/smart
+++ b/cmd/zpool/zpool.d/smart
@@ -23,8 +23,45 @@ off_ucor: Show SMART offline uncorrectable errors (ATA).
ata_err: Show SMART ATA errors (ATA).
pwr_cyc: Show SMART power cycle count (ATA).
serial: Show disk serial number.
+nvme_err: Show SMART NVMe errors (NVMe).
+smart_test: Show SMART self-test results summary.
+test_type: Show SMART self-test type (short, long... ).
+test_status: Show SMART self-test status.
+test_progress: Show SMART self-test percentage done.
+test_ended: Show when the last SMART self-test ended (if supported).
"
+# Hack for developer testing
+#
+# If you set $samples to a directory containing smartctl output text files,
+# we will use them instead of running smartctl on the vdevs. This can be
+# useful if you want to test a bunch of different smartctl outputs. Also, if
+# $samples is set, and additional 'file' column is added to the zpool output
+# showing the filename.
+samples=
+
+# get_filename_from_dir DIR
+#
+# Look in directory DIR and return a filename from it. The filename returned
+# is chosen quasi-sequentially (based off our PID). This allows us to return
+# a different filename every time this script is invoked (which we do for each
+# vdev), without having to maintain state.
+get_filename_from_dir()
+{
+ dir=$1
+ pid="$$"
+ num_files=$(find "$dir" -maxdepth 1 -type f | wc -l)
+ mod=$((pid % num_files))
+ i=0
+ find "$dir" -type f -printf "%f\n" | while read -r file ; do
+ if [ "$mod" = "$i" ] ; then
+ echo "$file"
+ break
+ fi
+ i=$((i+1))
+ done
+}
+
script=$(basename "$0")
if [ "$1" = "-h" ] ; then
@@ -34,10 +71,18 @@ fi
smartctl_path=$(which smartctl)
-if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ]; then
- raw_out=$(eval "sudo $smartctl_path -a $VDEV_UPATH")
+if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ] || [ -n "$samples" ] ; then
+ if [ -n "$samples" ] ; then
+ # cat a smartctl output text file instead of running smartctl
+ # on a vdev (only used for developer testing).
+ file=$(get_filename_from_dir $samples)
+ echo "file=$file"
+ raw_out=$(cat "$samples/$file")
+ else
+ raw_out=$(eval "sudo $smartctl_path -a $VDEV_UPATH")
+ fi
- # Are we a SAS or ATA drive? Look for the right line in smartctl:
+ # What kind of drive are we? Look for the right line in smartctl:
#
# SAS:
# Transport protocol: SAS
@@ -45,7 +90,9 @@ if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ]; then
# SATA:
# ATA Version is: 8
#
- type=$(echo "$raw_out" | grep -m 1 -Eo '^ATA|SAS$')
+ # NVMe:
+ # SMART/Health Information (NVMe Log 0xnn, NSID 0xnn)
+ #
out=$(echo "$raw_out" | awk '
# SAS specific
/read:/{print "rrd="$4"\nr_cor="$5"\nr_proc="$7"\nr_ucor="$8}
@@ -54,10 +101,11 @@ if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ]; then
/Elements in grown defect list/{print "defect="$6}
# SAS common
+/SAS/{type="sas"}
/Drive Temperature:/{print "temp="$4}
# Status can be a long string, substitute spaces for '_'
/SMART Health Status:/{printf "health="; for(i=4;i<=NF-1;i++){printf "%s_", $i}; printf "%s\n", $i}
-/number of hours powered up/{print "hours_on="$7}
+/number of hours powered up/{print "hours_on="$7; hours_on=int($7)}
/Serial number:/{print "serial="$3}
# SATA specific
@@ -70,40 +118,111 @@ if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ]; then
/Power_Cycle_Count/{print "pwr_cyc="$10}
# SATA common
+/SATA/{type="sata"}
/Temperature_Celsius/{print "temp="$10}
+/Airflow_Temperature_Cel/{print "temp="$10}
+/Current Temperature:/{print "temp="$3}
+/SMART overall-health self-assessment test result:/{print "health="$6}
+/Power_On_Hours/{print "hours_on="$10; hours_on=int($10)}
+/Serial Number:/{print "serial="$3}
+
+# NVMe common
+/NVMe/{type="nvme"}
+/Temperature:/{print "temp="$2}
/SMART overall-health self-assessment test result:/{print "health="$6}
-/Power_On_Hours/{print "hours_on="$10}
+/Power On Hours:/{gsub("[^0-9]","",$4); print "hours_on="$4}
/Serial Number:/{print "serial="$3}
+/Power Cycles:/{print "pwr_cyc="$3}
+
+# NVMe specific
+/Media and Data Integrity Errors:/{print "nvme_err="$6}
+
+# SMART self-test info
+/Self-test execution status:/{progress=tolower($4)} # SAS
+/SMART Self-test log/{test_seen=1} # SAS
+/SMART Extended Self-test Log/{test_seen=1} # SATA
+/# 1/{
+ test_type=tolower($3"_"$4);
+ # Status could be one word ("Completed") or multiple ("Completed: read
+ # failure"). Look for the ":" to see if we need to grab more words.
+
+ if ($5 ~ ":")
+ status=tolower($5""$6"_"$7)
+ else
+ status=tolower($5)
+ if (status=="self")
+ status="running";
-END {ORS="\n"; print ""}
+ if (type == "sas") {
+ hours=int($(NF-4))
+ } else {
+ hours=int($(NF-1))
+ # SATA reports percent remaining, rather than percent done
+ # Convert it to percent done.
+ progress=(100-int($(NF-2)))"%"
+ }
+ # When we int()-ify "hours", it converts stuff like "NOW" and "-" into
+ # 0. In those cases, set it to hours_on, so they will cancel out in
+ # the "hours_ago" calculation later on.
+ if (hours == 0)
+ hours=hours_on
+
+ if (test_seen) {
+ print "test="hours_on
+ print "test_type="test_type
+ print "test_status="status
+ print "test_progress="progress
+ }
+ # Not all drives report hours_on
+ if (hours_on && hours) {
+ total_hours_ago=(hours_on-hours)
+ days_ago=int(total_hours_ago/24)
+ hours_ago=(total_hours_ago % 24)
+ if (days_ago != 0)
+ ago_str=days_ago"d"
+ if (hours_ago !=0)
+ ago_str=ago_str""hours_ago"h"
+ print "test_ended="ago_str
+ }
+}
+
+END {print "type="type; ORS="\n"; print ""}
');
fi
+type=$(echo "$out" | grep '^type=' | cut -d '=' -f 2)
-# if type is not set by now, either we don't have a block device
-# or smartctl failed. Either way, default to ATA and set out to
-# nothing
+# If type is not set by now, either we don't have a block device
+# or smartctl failed. Either way, default to ATA and set $out to
+# nothing.
if [ -z "$type" ]; then
- type="ATA"
+ type="sata"
out=
fi
case $script in
smart)
# Print temperature plus common predictors of drive failure
- if [ "$type" = "SAS" ] ; then
+ if [ "$type" = "sas" ] ; then
scripts="temp|health|r_ucor|w_ucor"
- elif [ "$type" = "ATA" ] ; then
+ elif [ "$type" = "sata" ] ; then
scripts="temp|health|ata_err|realloc|rep_ucor|cmd_to|pend_sec|off_ucor"
+ elif [ "$type" = "nvme" ] ; then
+ scripts="temp|health|nvme_err"
fi
;;
smartx)
# Print some other interesting stats
- if [ "$type" = "SAS" ] ; then
+ if [ "$type" = "sas" ] ; then
scripts="hours_on|defect|nonmed|r_proc|w_proc"
- elif [ "$type" = "ATA" ] ; then
+ elif [ "$type" = "sata" ] ; then
+ scripts="hours_on|pwr_cyc"
+ elif [ "$type" = "nvme" ] ; then
scripts="hours_on|pwr_cyc"
fi
;;
+smart_test)
+ scripts="test_type|test_status|test_progress|test_ended"
+ ;;
*)
scripts="$script"
esac
diff --git a/cmd/zpool/zpool.d/smart_test b/cmd/zpool/zpool.d/smart_test
new file mode 120000
index 000000000000..94f22861f0ce
--- /dev/null
+++ b/cmd/zpool/zpool.d/smart_test
@@ -0,0 +1 @@
+smart
\ No newline at end of file
diff --git a/cmd/zpool/zpool.d/test_ended b/cmd/zpool/zpool.d/test_ended
new file mode 120000
index 000000000000..94f22861f0ce
--- /dev/null
+++ b/cmd/zpool/zpool.d/test_ended
@@ -0,0 +1 @@
+smart
\ No newline at end of file
diff --git a/cmd/zpool/zpool.d/test_progress b/cmd/zpool/zpool.d/test_progress
new file mode 120000
index 000000000000..94f22861f0ce
--- /dev/null
+++ b/cmd/zpool/zpool.d/test_progress
@@ -0,0 +1 @@
+smart
\ No newline at end of file
diff --git a/cmd/zpool/zpool.d/test_status b/cmd/zpool/zpool.d/test_status
new file mode 120000
index 000000000000..94f22861f0ce
--- /dev/null
+++ b/cmd/zpool/zpool.d/test_status
@@ -0,0 +1 @@
+smart
\ No newline at end of file
diff --git a/cmd/zpool/zpool.d/test_type b/cmd/zpool/zpool.d/test_type
new file mode 120000
index 000000000000..94f22861f0ce
--- /dev/null
+++ b/cmd/zpool/zpool.d/test_type
@@ -0,0 +1 @@
+smart
\ No newline at end of file
diff --git a/config/Rules.am b/config/Rules.am
index 1d39e7779a7e..215f09c34a9c 100644
--- a/config/Rules.am
+++ b/config/Rules.am
@@ -6,6 +6,7 @@ AM_CFLAGS += ${NO_UNUSED_BUT_SET_VARIABLE}
AM_CFLAGS += ${NO_BOOL_COMPARE}
AM_CFLAGS += -fno-strict-aliasing
AM_CFLAGS += -std=gnu99
+AM_CFLAGS += $(CODE_COVERAGE_CFLAGS)
AM_CPPFLAGS = -D_GNU_SOURCE -D__EXTENSIONS__ -D_REENTRANT
AM_CPPFLAGS += -D_POSIX_PTHREAD_SEMANTICS -D_FILE_OFFSET_BITS=64
AM_CPPFLAGS += -D_LARGEFILE64_SOURCE -DHAVE_LARGE_STACKS=1
@@ -14,3 +15,4 @@ AM_CPPFLAGS += -DLIBEXECDIR=\"$(libexecdir)\"
AM_CPPFLAGS += -DRUNSTATEDIR=\"$(runstatedir)\"
AM_CPPFLAGS += -DSBINDIR=\"$(sbindir)\"
AM_CPPFLAGS += -DSYSCONFDIR=\"$(sysconfdir)\"
+AM_CPPFLAGS += $(CODE_COVERAGE_CPPFLAGS)
diff --git a/config/ax_code_coverage.m4 b/config/ax_code_coverage.m4
new file mode 100644
index 000000000000..6484f0332435
--- /dev/null
+++ b/config/ax_code_coverage.m4
@@ -0,0 +1,264 @@
+# ===========================================================================
+# https://www.gnu.org/software/autoconf-archive/ax_code_coverage.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CODE_COVERAGE()
+#
+# DESCRIPTION
+#
+# Defines CODE_COVERAGE_CPPFLAGS, CODE_COVERAGE_CFLAGS,
+# CODE_COVERAGE_CXXFLAGS and CODE_COVERAGE_LIBS which should be included
+# in the CPPFLAGS, CFLAGS CXXFLAGS and LIBS/LIBADD variables of every
+# build target (program or library) which should be built with code
+# coverage support. Also defines CODE_COVERAGE_RULES which should be
+# substituted in your Makefile; and $enable_code_coverage which can be
+# used in subsequent configure output. CODE_COVERAGE_ENABLED is defined
+# and substituted, and corresponds to the value of the
+# --enable-code-coverage option, which defaults to being disabled.
+#
+# Test also for gcov program and create GCOV variable that could be
+# substituted.
+#
+# Note that all optimization flags in CFLAGS must be disabled when code
+# coverage is enabled.
+#
+# Usage example:
+#
+# configure.ac:
+#
+# AX_CODE_COVERAGE
+#
+# Makefile.am:
+#
+# @CODE_COVERAGE_RULES@
+# my_program_LIBS = ... $(CODE_COVERAGE_LIBS) ...
+# my_program_CPPFLAGS = ... $(CODE_COVERAGE_CPPFLAGS) ...
+# my_program_CFLAGS = ... $(CODE_COVERAGE_CFLAGS) ...
+# my_program_CXXFLAGS = ... $(CODE_COVERAGE_CXXFLAGS) ...
+#
+# This results in a "check-code-coverage" rule being added to any
+# Makefile.am which includes "@CODE_COVERAGE_RULES@" (assuming the module
+# has been configured with --enable-code-coverage). Running `make
+# check-code-coverage` in that directory will run the module's test suite
+# (`make check`) and build a code coverage report detailing the code which
+# was touched, then print the URI for the report.
+#
+# In earlier versions of this macro, CODE_COVERAGE_LDFLAGS was defined
+# instead of CODE_COVERAGE_LIBS. They are both still defined, but use of
+# CODE_COVERAGE_LIBS is preferred for clarity; CODE_COVERAGE_LDFLAGS is
+# deprecated. They have the same value.
+#
+# This code was derived from Makefile.decl in GLib, originally licenced
+# under LGPLv2.1+.
+#
+# LICENSE
+#
+# Copyright (c) 2012, 2016 Philip Withnall
+# Copyright (c) 2012 Xan Lopez
+# Copyright (c) 2012 Christian Persch
+# Copyright (c) 2012 Paolo Borelli
+# Copyright (c) 2012 Dan Winship
+# Copyright (c) 2015 Bastien ROUCARIES
+#
+# This library is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or (at
+# your option) any later version.
+#
+# This library is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+# General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program. If not, see .
+
+#serial 25
+
+AC_DEFUN([AX_CODE_COVERAGE],[
+ dnl Check for --enable-code-coverage
+ AC_REQUIRE([AC_PROG_SED])
+
+ # allow to override gcov location
+ AC_ARG_WITH([gcov],
+ [AS_HELP_STRING([--with-gcov[=GCOV]], [use given GCOV for coverage (GCOV=gcov).])],
+ [_AX_CODE_COVERAGE_GCOV_PROG_WITH=$with_gcov],
+ [_AX_CODE_COVERAGE_GCOV_PROG_WITH=gcov])
+
+ AC_MSG_CHECKING([whether to build with code coverage support])
+ AC_ARG_ENABLE([code-coverage],
+ AS_HELP_STRING([--enable-code-coverage],
+ [Whether to enable code coverage support]),,
+ enable_code_coverage=no)
+
+ AM_CONDITIONAL([CODE_COVERAGE_ENABLED], [test x$enable_code_coverage = xyes])
+ AC_SUBST([CODE_COVERAGE_ENABLED], [$enable_code_coverage])
+ AC_MSG_RESULT($enable_code_coverage)
+
+ AS_IF([ test "$enable_code_coverage" = "yes" ], [
+ # check for gcov
+ AC_CHECK_TOOL([GCOV],
+ [$_AX_CODE_COVERAGE_GCOV_PROG_WITH],
+ [:])
+ AS_IF([test "X$GCOV" = "X:"],
+ [AC_MSG_ERROR([gcov is needed to do coverage])])
+ AC_SUBST([GCOV])
+
+ dnl Check if gcc is being used
+ AS_IF([ test "$GCC" = "no" ], [
+ AC_MSG_ERROR([not compiling with gcc, which is required for gcov code coverage])
+ ])
+
+ AC_CHECK_PROG([LCOV], [lcov], [lcov])
+ AC_CHECK_PROG([GENHTML], [genhtml], [genhtml])
+
+ AS_IF([ test -z "$LCOV" ], [
+ AC_MSG_ERROR([To enable code coverage reporting you must have lcov installed])
+ ])
+
+ AS_IF([ test -z "$GENHTML" ], [
+ AC_MSG_ERROR([Could not find genhtml from the lcov package])
+ ])
+
+ dnl Build the code coverage flags
+ dnl Define CODE_COVERAGE_LDFLAGS for backwards compatibility
+ CODE_COVERAGE_CPPFLAGS="-DNDEBUG"
+ CODE_COVERAGE_CFLAGS="-O0 -g -fprofile-arcs -ftest-coverage"
+ CODE_COVERAGE_CXXFLAGS="-O0 -g -fprofile-arcs -ftest-coverage"
+ CODE_COVERAGE_LIBS="-lgcov"
+ CODE_COVERAGE_LDFLAGS="$CODE_COVERAGE_LIBS"
+
+ AC_SUBST([CODE_COVERAGE_CPPFLAGS])
+ AC_SUBST([CODE_COVERAGE_CFLAGS])
+ AC_SUBST([CODE_COVERAGE_CXXFLAGS])
+ AC_SUBST([CODE_COVERAGE_LIBS])
+ AC_SUBST([CODE_COVERAGE_LDFLAGS])
+
+ [CODE_COVERAGE_RULES_CHECK='
+ -$(A''M_V_at)$(MAKE) $(AM_MAKEFLAGS) -k check
+ $(A''M_V_at)$(MAKE) $(AM_MAKEFLAGS) code-coverage-capture
+']
+ [CODE_COVERAGE_RULES_CAPTURE='
+ $(code_coverage_v_lcov_cap)$(LCOV) $(code_coverage_quiet) $(addprefix --directory ,$(CODE_COVERAGE_DIRECTORY)) --capture --output-file "$(CODE_COVERAGE_OUTPUT_FILE).tmp" --test-name "$(call code_coverage_sanitize,$(PACKAGE_NAME)-$(PACKAGE_VERSION))" --no-checksum --compat-libtool $(CODE_COVERAGE_LCOV_SHOPTS) $(CODE_COVERAGE_LCOV_OPTIONS)
+ $(code_coverage_v_lcov_ign)$(LCOV) $(code_coverage_quiet) $(addprefix --directory ,$(CODE_COVERAGE_DIRECTORY)) --remove "$(CODE_COVERAGE_OUTPUT_FILE).tmp" "/tmp/*" $(CODE_COVERAGE_IGNORE_PATTERN) --output-file "$(CODE_COVERAGE_OUTPUT_FILE)" $(CODE_COVERAGE_LCOV_SHOPTS) $(CODE_COVERAGE_LCOV_RMOPTS)
+ -@rm -f $(CODE_COVERAGE_OUTPUT_FILE).tmp
+ $(code_coverage_v_genhtml)LANG=C $(GENHTML) $(code_coverage_quiet) $(addprefix --prefix ,$(CODE_COVERAGE_DIRECTORY)) --output-directory "$(CODE_COVERAGE_OUTPUT_DIRECTORY)" --title "$(PACKAGE_NAME)-$(PACKAGE_VERSION) Code Coverage" --legend --show-details "$(CODE_COVERAGE_OUTPUT_FILE)" $(CODE_COVERAGE_GENHTML_OPTIONS)
+ @echo "file://$(abs_builddir)/$(CODE_COVERAGE_OUTPUT_DIRECTORY)/index.html"
+']
+ [CODE_COVERAGE_RULES_CLEAN='
+clean: code-coverage-clean
+distclean: code-coverage-clean
+code-coverage-clean:
+ -$(LCOV) --directory $(top_builddir) -z
+ -rm -rf $(CODE_COVERAGE_OUTPUT_FILE) $(CODE_COVERAGE_OUTPUT_FILE).tmp $(CODE_COVERAGE_OUTPUT_DIRECTORY)
+ -find . \( -name "*.gcda" -o -name "*.gcno" -o -name "*.gcov" \) -delete
+']
+ ], [
+ [CODE_COVERAGE_RULES_CHECK='
+ @echo "Need to reconfigure with --enable-code-coverage"
+']
+ CODE_COVERAGE_RULES_CAPTURE="$CODE_COVERAGE_RULES_CHECK"
+ CODE_COVERAGE_RULES_CLEAN=''
+ ])
+
+[CODE_COVERAGE_RULES='
+# Code coverage
+#
+# Optional:
+# - CODE_COVERAGE_DIRECTORY: Top-level directory for code coverage reporting.
+# Multiple directories may be specified, separated by whitespace.
+# (Default: $(top_builddir))
+# - CODE_COVERAGE_OUTPUT_FILE: Filename and path for the .info file generated
+# by lcov for code coverage. (Default:
+# $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage.info)
+# - CODE_COVERAGE_OUTPUT_DIRECTORY: Directory for generated code coverage
+# reports to be created. (Default:
+# $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage)
+# - CODE_COVERAGE_BRANCH_COVERAGE: Set to 1 to enforce branch coverage,
+# set to 0 to disable it and leave empty to stay with the default.
+# (Default: empty)
+# - CODE_COVERAGE_LCOV_SHOPTS_DEFAULT: Extra options shared between both lcov
+# instances. (Default: based on $CODE_COVERAGE_BRANCH_COVERAGE)
+# - CODE_COVERAGE_LCOV_SHOPTS: Extra options to shared between both lcov
+# instances. (Default: $CODE_COVERAGE_LCOV_SHOPTS_DEFAULT)
+# - CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH: --gcov-tool pathtogcov
+# - CODE_COVERAGE_LCOV_OPTIONS_DEFAULT: Extra options to pass to the
+# collecting lcov instance. (Default: $CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH)
+# - CODE_COVERAGE_LCOV_OPTIONS: Extra options to pass to the collecting lcov
+# instance. (Default: $CODE_COVERAGE_LCOV_OPTIONS_DEFAULT)
+# - CODE_COVERAGE_LCOV_RMOPTS_DEFAULT: Extra options to pass to the filtering
+# lcov instance. (Default: empty)
+# - CODE_COVERAGE_LCOV_RMOPTS: Extra options to pass to the filtering lcov
+# instance. (Default: $CODE_COVERAGE_LCOV_RMOPTS_DEFAULT)
+# - CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT: Extra options to pass to the
+# genhtml instance. (Default: based on $CODE_COVERAGE_BRANCH_COVERAGE)
+# - CODE_COVERAGE_GENHTML_OPTIONS: Extra options to pass to the genhtml
+# instance. (Default: $CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT)
+# - CODE_COVERAGE_IGNORE_PATTERN: Extra glob pattern of files to ignore
+#
+# The generated report will be titled using the $(PACKAGE_NAME) and
+# $(PACKAGE_VERSION). In order to add the current git hash to the title,
+# use the git-version-gen script, available online.
+
+# Optional variables
+CODE_COVERAGE_DIRECTORY ?= $(top_builddir)
+CODE_COVERAGE_OUTPUT_FILE ?= $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage.info
+CODE_COVERAGE_OUTPUT_DIRECTORY ?= $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage
+CODE_COVERAGE_BRANCH_COVERAGE ?=
+CODE_COVERAGE_LCOV_SHOPTS_DEFAULT ?= $(if $(CODE_COVERAGE_BRANCH_COVERAGE),\
+--rc lcov_branch_coverage=$(CODE_COVERAGE_BRANCH_COVERAGE))
+CODE_COVERAGE_LCOV_SHOPTS ?= $(CODE_COVERAGE_LCOV_SHOPTS_DEFAULT)
+CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH ?= --gcov-tool "$(GCOV)"
+CODE_COVERAGE_LCOV_OPTIONS_DEFAULT ?= $(CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH)
+CODE_COVERAGE_LCOV_OPTIONS ?= $(CODE_COVERAGE_LCOV_OPTIONS_DEFAULT)
+CODE_COVERAGE_LCOV_RMOPTS_DEFAULT ?=
+CODE_COVERAGE_LCOV_RMOPTS ?= $(CODE_COVERAGE_LCOV_RMOPTS_DEFAULT)
+CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT ?=\
+$(if $(CODE_COVERAGE_BRANCH_COVERAGE),\
+--rc genhtml_branch_coverage=$(CODE_COVERAGE_BRANCH_COVERAGE))
+CODE_COVERAGE_GENHTML_OPTIONS ?= $(CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT)
+CODE_COVERAGE_IGNORE_PATTERN ?=
+
+GITIGNOREFILES ?=
+GITIGNOREFILES += $(CODE_COVERAGE_OUTPUT_FILE) $(CODE_COVERAGE_OUTPUT_DIRECTORY)
+
+code_coverage_v_lcov_cap = $(code_coverage_v_lcov_cap_$(V))
+code_coverage_v_lcov_cap_ = $(code_coverage_v_lcov_cap_$(AM_DEFAULT_VERBOSITY))
+code_coverage_v_lcov_cap_0 = @echo " LCOV --capture"\
+ $(CODE_COVERAGE_OUTPUT_FILE);
+code_coverage_v_lcov_ign = $(code_coverage_v_lcov_ign_$(V))
+code_coverage_v_lcov_ign_ = $(code_coverage_v_lcov_ign_$(AM_DEFAULT_VERBOSITY))
+code_coverage_v_lcov_ign_0 = @echo " LCOV --remove /tmp/*"\
+ $(CODE_COVERAGE_IGNORE_PATTERN);
+code_coverage_v_genhtml = $(code_coverage_v_genhtml_$(V))
+code_coverage_v_genhtml_ = $(code_coverage_v_genhtml_$(AM_DEFAULT_VERBOSITY))
+code_coverage_v_genhtml_0 = @echo " GEN " $(CODE_COVERAGE_OUTPUT_DIRECTORY);
+code_coverage_quiet = $(code_coverage_quiet_$(V))
+code_coverage_quiet_ = $(code_coverage_quiet_$(AM_DEFAULT_VERBOSITY))
+code_coverage_quiet_0 = --quiet
+
+# sanitizes the test-name: replaces with underscores: dashes and dots
+code_coverage_sanitize = $(subst -,_,$(subst .,_,$(1)))
+
+# Use recursive makes in order to ignore errors during check
+check-code-coverage:'"$CODE_COVERAGE_RULES_CHECK"'
+
+# Capture code coverage data
+code-coverage-capture: code-coverage-capture-hook'"$CODE_COVERAGE_RULES_CAPTURE"'
+
+# Hook rule executed before code-coverage-capture, overridable by the user
+code-coverage-capture-hook:
+
+'"$CODE_COVERAGE_RULES_CLEAN"'
+
+A''M_DISTCHECK_CONFIGURE_FLAGS ?=
+A''M_DISTCHECK_CONFIGURE_FLAGS += --disable-code-coverage
+
+.PHONY: check-code-coverage code-coverage-capture code-coverage-capture-hook code-coverage-clean
+']
+
+ AC_SUBST([CODE_COVERAGE_RULES])
+ m4_ifdef([_AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE([CODE_COVERAGE_RULES])])
+])
diff --git a/config/deb.am b/config/deb.am
index 1b51f93163a0..58ab96e186ad 100644
--- a/config/deb.am
+++ b/config/deb.am
@@ -2,16 +2,16 @@ deb-local:
@(if test "${HAVE_DPKGBUILD}" = "no"; then \
echo -e "\n" \
"*** Required util ${DPKGBUILD} missing. Please install the\n" \
- "*** package for your distribution which provides ${DPKGBUILD},\n" \
+ "*** package for your distribution which provides ${DPKGBUILD},\n" \
"*** re-run configure, and try again.\n"; \
- exit 1; \
+ exit 1; \
fi; \
if test "${HAVE_ALIEN}" = "no"; then \
echo -e "\n" \
"*** Required util ${ALIEN} missing. Please install the\n" \
- "*** package for your distribution which provides ${ALIEN},\n" \
+ "*** package for your distribution which provides ${ALIEN},\n" \
"*** re-run configure, and try again.\n"; \
- exit 1; \
+ exit 1; \
fi)
deb-kmod: deb-local rpm-kmod
diff --git a/config/kernel-acl-refcount.m4 b/config/kernel-acl-refcount.m4
new file mode 100644
index 000000000000..43e3c442dcd4
--- /dev/null
+++ b/config/kernel-acl-refcount.m4
@@ -0,0 +1,20 @@
+dnl #
+dnl # 4.16 kernel: check if struct posix_acl acl.a_refcount is a refcount_t.
+dnl # It's an atomic_t on older kernels.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_ACL_HAS_REFCOUNT], [
+ AC_MSG_CHECKING([whether posix_acl has refcount_t])
+ ZFS_LINUX_TRY_COMPILE([
+ #include
+ #include
+ #include
+ ],[
+ struct posix_acl acl;
+ refcount_t *r __attribute__ ((unused)) = &acl.a_refcount;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_ACL_REFCOUNT, 1, [posix_acl has refcount_t])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
diff --git a/config/kernel-acl.m4 b/config/kernel-acl.m4
index 311484349d4a..02cc020e5c90 100644
--- a/config/kernel-acl.m4
+++ b/config/kernel-acl.m4
@@ -184,6 +184,7 @@ AC_DEFUN([ZFS_AC_KERNEL_INODE_OPERATIONS_PERMISSION_WITH_NAMEIDATA], [
AC_MSG_CHECKING([whether iops->permission() wants nameidata])
ZFS_LINUX_TRY_COMPILE([
#include
+ #include
int permission_fn(struct inode *inode, int mask,
struct nameidata *nd) { return 0; }
diff --git a/config/kernel-create-nameidata.m4 b/config/kernel-create-nameidata.m4
index a71490a004a6..d4c155c57fc9 100644
--- a/config/kernel-create-nameidata.m4
+++ b/config/kernel-create-nameidata.m4
@@ -5,6 +5,7 @@ AC_DEFUN([ZFS_AC_KERNEL_CREATE_NAMEIDATA], [
AC_MSG_CHECKING([whether iops->create() passes nameidata])
ZFS_LINUX_TRY_COMPILE([
#include
+ #include
#ifdef HAVE_MKDIR_UMODE_T
int inode_create(struct inode *inode ,struct dentry *dentry,
diff --git a/config/kernel-dentry-operations.m4 b/config/kernel-dentry-operations.m4
index 3182490c908c..61f5a27af5a7 100644
--- a/config/kernel-dentry-operations.m4
+++ b/config/kernel-dentry-operations.m4
@@ -5,6 +5,7 @@ AC_DEFUN([ZFS_AC_KERNEL_D_REVALIDATE_NAMEIDATA], [
AC_MSG_CHECKING([whether dops->d_revalidate() takes struct nameidata])
ZFS_LINUX_TRY_COMPILE([
#include
+ #include
int revalidate (struct dentry *dentry,
struct nameidata *nidata) { return 0; }
diff --git a/config/kernel-get-disk-and-module.m4 b/config/kernel-get-disk-and-module.m4
new file mode 100644
index 000000000000..2a51a5af7dc1
--- /dev/null
+++ b/config/kernel-get-disk-and-module.m4
@@ -0,0 +1,19 @@
+dnl #
+dnl # 4.16 API change
+dnl # Verify if get_disk_and_module() symbol is available.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_GET_DISK_AND_MODULE],
+ [AC_MSG_CHECKING([whether get_disk_and_module() is available])
+ ZFS_LINUX_TRY_COMPILE_SYMBOL([
+ #include
+ ], [
+ struct gendisk *disk = NULL;
+ (void) get_disk_and_module(disk);
+ ], [get_disk_and_module], [block/genhd.c], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_GET_DISK_AND_MODULE,
+ 1, [get_disk_and_module() is available])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+])
diff --git a/config/kernel-get-link.m4 b/config/kernel-get-link.m4
index 022c49c54de7..3cda08c1b4d5 100644
--- a/config/kernel-get-link.m4
+++ b/config/kernel-get-link.m4
@@ -41,7 +41,7 @@ AC_DEFUN([ZFS_AC_KERNEL_FOLLOW_LINK], [
AC_DEFINE(HAVE_FOLLOW_LINK_NAMEIDATA, 1,
[iops->follow_link() nameidata])
],[
- AC_MSG_ERROR(no; please file a bug report)
+ AC_MSG_ERROR(no; please file a bug report)
])
])
])
diff --git a/config/kernel-global_page_state.m4 b/config/kernel-global_page_state.m4
new file mode 100644
index 000000000000..f4a40011f6f8
--- /dev/null
+++ b/config/kernel-global_page_state.m4
@@ -0,0 +1,109 @@
+dnl #
+dnl # 4.8 API change
+dnl #
+dnl # 75ef71840539 mm, vmstat: add infrastructure for per-node vmstats
+dnl # 599d0c954f91 mm, vmscan: move LRU lists to node
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_NODE_PAGE_STATE], [
+ AC_MSG_CHECKING([whether global_node_page_state() exists])
+ ZFS_LINUX_TRY_COMPILE([
+ #include
+ #include
+ ],[
+ (void) global_node_page_state(0);
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(ZFS_GLOBAL_NODE_PAGE_STATE, 1, [global_node_page_state() exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
+
+dnl #
+dnl # 4.14 API change
+dnl #
+dnl # c41f012ade0b mm: rename global_page_state to global_zone_page_state
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE], [
+ AC_MSG_CHECKING([whether global_zone_page_state() exists])
+ ZFS_LINUX_TRY_COMPILE([
+ #include
+ #include
+ ],[
+ (void) global_zone_page_state(0);
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(ZFS_GLOBAL_ZONE_PAGE_STATE, 1, [global_zone_page_state() exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
+
+dnl #
+dnl # Create a define and autoconf variable for an enum member
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_ENUM_MEMBER], [
+ AC_MSG_CHECKING([whether enum $2 contains $1])
+ AS_IF([AC_TRY_COMMAND("${srcdir}/scripts/enum-extract.pl" "$2" "$3" | egrep -qx $1)],[
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(m4_join([_], [ZFS_ENUM], m4_toupper($2), $1), 1, [enum $2 contains $1])
+ m4_join([_], [ZFS_ENUM], m4_toupper($2), $1)=1
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
+
+dnl #
+dnl # Sanity check helpers
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_ERROR],[
+ AC_MSG_RESULT(no)
+ AC_MSG_RESULT([$1 in either node_stat_item or zone_stat_item: $2])
+ AC_MSG_RESULT([configure needs updating, see: config/kernel-global_page_state.m4])
+ AC_MSG_FAILURE([SHUT 'ER DOWN CLANCY, SHE'S PUMPIN' MUD!])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK], [
+ enum_check_a="m4_join([_], [$ZFS_ENUM_NODE_STAT_ITEM], $1)"
+ enum_check_b="m4_join([_], [$ZFS_ENUM_ZONE_STAT_ITEM], $1)"
+ AS_IF([test -n "$enum_check_a" -a -n "$enum_check_b"],[
+ ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_ERROR([$1], [DUPLICATE])
+ ])
+ AS_IF([test -z "$enum_check_a" -a -z "$enum_check_b"],[
+ ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_ERROR([$1], [NOT FOUND])
+ ])
+])
+
+dnl #
+dnl # Ensure the config tests are finding one and only one of each enum of interest
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE_SANITY], [
+ AC_MSG_CHECKING([global_page_state enums are sane])
+
+ ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_FILE_PAGES])
+ ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_INACTIVE_ANON])
+ ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_INACTIVE_FILE])
+ ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_SLAB_RECLAIMABLE])
+
+ AC_MSG_RESULT(yes)
+])
+
+dnl #
+dnl # enum members in which we're interested
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE], [
+ ZFS_AC_KERNEL_GLOBAL_NODE_PAGE_STATE
+ ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE
+
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_FILE_PAGES], [node_stat_item], [$LINUX/include/linux/mmzone.h])
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_ANON], [node_stat_item], [$LINUX/include/linux/mmzone.h])
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_FILE], [node_stat_item], [$LINUX/include/linux/mmzone.h])
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_SLAB_RECLAIMABLE], [node_stat_item], [$LINUX/include/linux/mmzone.h])
+
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_FILE_PAGES], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_ANON], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_FILE], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
+ ZFS_AC_KERNEL_ENUM_MEMBER([NR_SLAB_RECLAIMABLE], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
+
+ ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE_SANITY
+])
diff --git a/config/kernel-inode-set-iversion.m4 b/config/kernel-inode-set-iversion.m4
new file mode 100644
index 000000000000..9a7d7890e54e
--- /dev/null
+++ b/config/kernel-inode-set-iversion.m4
@@ -0,0 +1,19 @@
+dnl #
+dnl # 4.16 API change
+dnl # inode_set_iversion introduced to set i_version
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_INODE_SET_IVERSION], [
+ AC_MSG_CHECKING([whether inode_set_iversion() exists])
+ ZFS_LINUX_TRY_COMPILE([
+ #include
+ ],[
+ struct inode inode;
+ inode_set_iversion(&inode, 1);
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_INODE_SET_IVERSION, 1,
+ [inode_set_iversion() exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
diff --git a/config/kernel-lookup-nameidata.m4 b/config/kernel-lookup-nameidata.m4
index 43f5fb4cbc7b..5453be5e8e38 100644
--- a/config/kernel-lookup-nameidata.m4
+++ b/config/kernel-lookup-nameidata.m4
@@ -5,6 +5,7 @@ AC_DEFUN([ZFS_AC_KERNEL_LOOKUP_NAMEIDATA], [
AC_MSG_CHECKING([whether iops->lookup() passes nameidata])
ZFS_LINUX_TRY_COMPILE([
#include
+ #include
struct dentry *inode_lookup(struct inode *inode,
struct dentry *dentry, struct nameidata *nidata)
diff --git a/config/kernel-vm_node_stat.m4 b/config/kernel-vm_node_stat.m4
deleted file mode 100644
index e1c42f884b0b..000000000000
--- a/config/kernel-vm_node_stat.m4
+++ /dev/null
@@ -1,22 +0,0 @@
-dnl #
-dnl # 4.8 API change
-dnl # kernel vm counters change
-dnl #
-AC_DEFUN([ZFS_AC_KERNEL_VM_NODE_STAT], [
- AC_MSG_CHECKING([whether to use vm_node_stat based fn's])
- ZFS_LINUX_TRY_COMPILE([
- #include
- #include
- ],[
- int a __attribute__ ((unused)) = NR_VM_NODE_STAT_ITEMS;
- long x __attribute__ ((unused)) =
- atomic_long_read(&vm_node_stat[0]);
- (void) global_node_page_state(0);
- ],[
- AC_MSG_RESULT(yes)
- AC_DEFINE(ZFS_GLOBAL_NODE_PAGE_STATE, 1,
- [using global_node_page_state()])
- ],[
- AC_MSG_RESULT(no)
- ])
-])
diff --git a/config/kernel.m4 b/config/kernel.m4
index b759ccd39a29..419ed1a2c76f 100644
--- a/config/kernel.m4
+++ b/config/kernel.m4
@@ -40,6 +40,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS
ZFS_AC_KERNEL_BLK_QUEUE_HAVE_BIO_RW_UNPLUG
ZFS_AC_KERNEL_BLK_QUEUE_HAVE_BLK_PLUG
+ ZFS_AC_KERNEL_GET_DISK_AND_MODULE
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GET_GENDISK
ZFS_AC_KERNEL_HAVE_BIO_SET_OP_ATTRS
@@ -65,6 +66,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_INODE_OPERATIONS_SET_ACL
ZFS_AC_KERNEL_INODE_OPERATIONS_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
+ ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_GET_ACL_HANDLE_CACHE
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
@@ -122,7 +124,8 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_RENAME_WANTS_FLAGS
ZFS_AC_KERNEL_HAVE_GENERIC_SETXATTR
ZFS_AC_KERNEL_CURRENT_TIME
- ZFS_AC_KERNEL_VM_NODE_STAT
+ ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
+ ZFS_AC_KERNEL_ACL_HAS_REFCOUNT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNELMAKE_PARAMS="$KERNELMAKE_PARAMS O=$LINUX_OBJ"
@@ -721,7 +724,7 @@ AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
modpost_flag=''
test "x$enable_linux_builtin" = xyes && modpost_flag='modpost=true' # fake modpost stage
AS_IF(
- [AC_TRY_COMMAND(cp conftest.c conftest.h build && make [$2] -C $LINUX_OBJ EXTRA_CFLAGS="-Werror $EXTRA_KCFLAGS" $ARCH_UM M=$PWD/build $modpost_flag) >/dev/null && AC_TRY_COMMAND([$3])],
+ [AC_TRY_COMMAND(cp conftest.c conftest.h build && make [$2] -C $LINUX_OBJ EXTRA_CFLAGS="-Werror $FRAME_LARGER_THAN $EXTRA_KCFLAGS" $ARCH_UM M=$PWD/build $modpost_flag) >/dev/null && AC_TRY_COMMAND([$3])],
[$4],
[_AC_MSG_LOG_CONFTEST m4_ifvaln([$5],[$5])]
)
diff --git a/config/tgz.am b/config/tgz.am
index 2997b1de2306..0657d045d164 100644
--- a/config/tgz.am
+++ b/config/tgz.am
@@ -2,9 +2,9 @@ tgz-local:
@(if test "${HAVE_ALIEN}" = "no"; then \
echo -e "\n" \
"*** Required util ${ALIEN} missing. Please install the\n" \
- "*** package for your distribution which provides ${ALIEN},\n" \
+ "*** package for your distribution which provides ${ALIEN},\n" \
"*** re-run configure, and try again.\n"; \
- exit 1; \
+ exit 1; \
fi)
tgz-kmod: tgz-local rpm-kmod
diff --git a/config/user-libblkid.m4 b/config/user-libblkid.m4
index 5bc7f466ae7a..88e6f990b74a 100644
--- a/config/user-libblkid.m4
+++ b/config/user-libblkid.m4
@@ -6,7 +6,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_LIBBLKID], [
LIBBLKID=
AC_CHECK_HEADER([blkid/blkid.h], [], [AC_MSG_FAILURE([
- *** blkid.h missing, libblkid-devel package required])])
+ *** blkid.h missing, libblkid-devel package required])])
AC_SUBST([LIBBLKID], ["-lblkid"])
AC_DEFINE([HAVE_LIBBLKID], 1, [Define if you have libblkid])
diff --git a/config/user-systemd.m4 b/config/user-systemd.m4
index c2105abce0ec..de2a44f10c6b 100644
--- a/config/user-systemd.m4
+++ b/config/user-systemd.m4
@@ -2,7 +2,8 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_SYSTEMD], [
AC_ARG_ENABLE(systemd,
AC_HELP_STRING([--enable-systemd],
[install systemd unit/preset files [[default: yes]]]),
- [],enable_systemd=yes)
+ [enable_systemd=$enableval],
+ [enable_systemd=check])
AC_ARG_WITH(systemdunitdir,
AC_HELP_STRING([--with-systemdunitdir=DIR],
@@ -19,16 +20,27 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_SYSTEMD], [
[install systemd module load files into dir [[/usr/lib/modules-load.d]]]),
systemdmoduleloaddir=$withval,systemdmodulesloaddir=/usr/lib/modules-load.d)
+ AS_IF([test "x$enable_systemd" = xcheck], [
+ AS_IF([systemctl --version >/dev/null 2>&1],
+ [enable_systemd=yes],
+ [enable_systemd=no])
+ ])
- AS_IF([test "x$enable_systemd" = xyes],
- [
+ AC_MSG_CHECKING(for systemd support)
+ AC_MSG_RESULT([$enable_systemd])
+
+ AS_IF([test "x$enable_systemd" = xyes], [
ZFS_INIT_SYSTEMD=systemd
ZFS_MODULE_LOAD=modules-load.d
+ DEFINE_SYSTEMD='--with systemd --define "_unitdir $(systemdunitdir)" --define "_presetdir $(systemdpresetdir)"'
modulesloaddir=$systemdmodulesloaddir
- ])
+ ],[
+ DEFINE_SYSTEMD='--without systemd'
+ ])
AC_SUBST(ZFS_INIT_SYSTEMD)
AC_SUBST(ZFS_MODULE_LOAD)
+ AC_SUBST(DEFINE_SYSTEMD)
AC_SUBST(systemdunitdir)
AC_SUBST(systemdpresetdir)
AC_SUBST(modulesloaddir)
diff --git a/config/zfs-build.m4 b/config/zfs-build.m4
index 7651dc2c12e4..fa913e815f2e 100644
--- a/config/zfs-build.m4
+++ b/config/zfs-build.m4
@@ -6,37 +6,76 @@ AC_DEFUN([ZFS_AC_LICENSE], [
AC_MSG_RESULT([$ZFS_META_LICENSE])
])
+AC_DEFUN([ZFS_AC_DEBUG_ENABLE], [
+ KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG -Werror"
+ HOSTCFLAGS="${HOSTCFLAGS} -DDEBUG -Werror"
+ DEBUG_CFLAGS="-DDEBUG -Werror"
+ DEBUG_STACKFLAGS="-fstack-check"
+ DEBUG_ZFS="_with_debug"
+ AC_DEFINE(ZFS_DEBUG, 1, [zfs debugging enabled])
+])
+
+AC_DEFUN([ZFS_AC_DEBUG_DISABLE], [
+ KERNELCPPFLAGS="${KERNELCPPFLAGS} -DNDEBUG "
+ HOSTCFLAGS="${HOSTCFLAGS} -DNDEBUG "
+ DEBUG_CFLAGS="-DNDEBUG"
+ DEBUG_STACKFLAGS=""
+ DEBUG_ZFS="_without_debug"
+])
+
AC_DEFUN([ZFS_AC_DEBUG], [
- AC_MSG_CHECKING([whether debugging is enabled])
+ AC_MSG_CHECKING([whether assertion support will be enabled])
AC_ARG_ENABLE([debug],
[AS_HELP_STRING([--enable-debug],
- [Enable generic debug support @<:@default=no@:>@])],
+ [Enable assertion support @<:@default=no@:>@])],
[],
[enable_debug=no])
- AS_IF([test "x$enable_debug" = xyes],
- [
- KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG -Werror"
- HOSTCFLAGS="${HOSTCFLAGS} -DDEBUG -Werror"
- DEBUG_CFLAGS="-DDEBUG -Werror"
- DEBUG_STACKFLAGS="-fstack-check"
- DEBUG_ZFS="_with_debug"
- AC_DEFINE(ZFS_DEBUG, 1, [zfs debugging enabled])
- ],
- [
- KERNELCPPFLAGS="${KERNELCPPFLAGS} -DNDEBUG "
- HOSTCFLAGS="${HOSTCFLAGS} -DNDEBUG "
- DEBUG_CFLAGS="-DNDEBUG"
- DEBUG_STACKFLAGS=""
- DEBUG_ZFS="_without_debug"
- ])
+ AS_CASE(["x$enable_debug"],
+ ["xyes"],
+ [ZFS_AC_DEBUG_ENABLE],
+ ["xno"],
+ [ZFS_AC_DEBUG_DISABLE],
+ [AC_MSG_ERROR([Unknown option $enable_debug])])
- AC_SUBST(DEBUG_CFLAGS)
AC_SUBST(DEBUG_STACKFLAGS)
AC_SUBST(DEBUG_ZFS)
AC_MSG_RESULT([$enable_debug])
])
+AC_DEFUN([ZFS_AC_DEBUGINFO_KERNEL], [
+ KERNELMAKE_PARAMS="$KERNELMAKE_PARAMS CONFIG_DEBUG_INFO=y"
+ KERNELCPPFLAGS="${KERNELCPPFLAGS} -fno-inline"
+])
+
+AC_DEFUN([ZFS_AC_DEBUGINFO_USER], [
+ DEBUG_CFLAGS="${DEBUG_CFLAGS} -g -fno-inline"
+])
+
+AC_DEFUN([ZFS_AC_DEBUGINFO], [
+ AC_MSG_CHECKING([whether debuginfo support will be forced])
+ AC_ARG_ENABLE([debuginfo],
+ [AS_HELP_STRING([--enable-debuginfo],
+ [Force generation of debuginfo @<:@default=no@:>@])],
+ [],
+ [enable_debuginfo=no])
+
+ AS_CASE(["x$enable_debuginfo"],
+ ["xyes"],
+ [ZFS_AC_DEBUGINFO_KERNEL
+ ZFS_AC_DEBUGINFO_USER],
+ ["xkernel"],
+ [ZFS_AC_DEBUGINFO_KERNEL],
+ ["xuser"],
+ [ZFS_AC_DEBUGINFO_USER],
+ ["xno"],
+ [],
+ [AC_MSG_ERROR([Unknown option $enable_debug])])
+
+ AC_SUBST(DEBUG_CFLAGS)
+ AC_MSG_RESULT([$enable_debuginfo])
+])
+
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
ZFS_AC_CONFIG_ALWAYS_NO_UNUSED_BUT_SET_VARIABLE
ZFS_AC_CONFIG_ALWAYS_NO_BOOL_COMPARE
@@ -121,9 +160,36 @@ AC_DEFUN([ZFS_AC_RPM], [
])
RPM_DEFINE_COMMON='--define "$(DEBUG_ZFS) 1"'
- RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)" --define "_initconfdir $(DEFAULT_INITCONF_DIR)" $(DEFINE_INITRAMFS)'
- RPM_DEFINE_KMOD='--define "kernels $(LINUX_VERSION)" --define "require_spldir $(SPL)" --define "require_splobj $(SPL_OBJ)" --define "ksrc $(LINUX)" --define "kobj $(LINUX_OBJ)"'
- RPM_DEFINE_DKMS=
+ RPM_DEFINE_COMMON+=' --define "$(DEBUGINFO_ZFS) 1"'
+ RPM_DEFINE_COMMON+=' --define "$(ASAN_ZFS) 1"'
+
+ RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)"'
+ RPM_DEFINE_UTIL+=' --define "_udevdir $(udevdir)"'
+ RPM_DEFINE_UTIL+=' --define "_udevruledir $(udevruledir)"'
+ RPM_DEFINE_UTIL+=' --define "_initconfdir $(DEFAULT_INITCONF_DIR)"'
+ RPM_DEFINE_UTIL+=' $(DEFINE_INITRAMFS)'
+ RPM_DEFINE_UTIL+=' $(DEFINE_SYSTEMD)'
+
+ dnl # Override default lib directory on Debian/Ubuntu systems. The provided
+ dnl # /usr/lib/rpm/platform//macros files do not specify the correct
+ dnl # path for multiarch systems as described by the packaging guidelines.
+ dnl #
+ dnl # https://wiki.ubuntu.com/MultiarchSpec
+ dnl # https://wiki.debian.org/Multiarch/Implementation
+ dnl #
+ AS_IF([test "$DEFAULT_PACKAGE" = "deb"], [
+ MULTIARCH_LIBDIR="lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)"
+ RPM_DEFINE_UTIL+=' --define "_lib $(MULTIARCH_LIBDIR)"'
+ AC_SUBST(MULTIARCH_LIBDIR)
+ ])
+
+ RPM_DEFINE_KMOD='--define "kernels $(LINUX_VERSION)"'
+ RPM_DEFINE_KMOD+=' --define "require_spldir $(SPL)"'
+ RPM_DEFINE_KMOD+=' --define "require_splobj $(SPL_OBJ)"'
+ RPM_DEFINE_KMOD+=' --define "ksrc $(LINUX)"'
+ RPM_DEFINE_KMOD+=' --define "kobj $(LINUX_OBJ)"'
+
+ RPM_DEFINE_DKMS=''
SRPM_DEFINE_COMMON='--define "build_src_rpm 1"'
SRPM_DEFINE_UTIL=
diff --git a/configure.ac b/configure.ac
index 0c7977ef80ea..d71712e4cd12 100644
--- a/configure.ac
+++ b/configure.ac
@@ -50,11 +50,13 @@ AC_PROG_CC
AC_PROG_LIBTOOL
AM_PROG_AS
AM_PROG_CC_C_O
+AX_CODE_COVERAGE
ZFS_AC_LICENSE
ZFS_AC_PACKAGE
ZFS_AC_CONFIG
ZFS_AC_DEBUG
+ZFS_AC_DEBUGINFO
AC_CONFIG_FILES([
Makefile
diff --git a/contrib/initramfs/Makefile.am b/contrib/initramfs/Makefile.am
index 998e588aba72..b2294082180e 100644
--- a/contrib/initramfs/Makefile.am
+++ b/contrib/initramfs/Makefile.am
@@ -1,8 +1,10 @@
initrddir = $(datarootdir)/initramfs-tools
-initrd_SCRIPTS = conf-hooks.d/zfs hooks/zfs scripts/zfs scripts/local-top/zfs
+initrd_SCRIPTS = \
+ conf.d/zfs conf-hooks.d/zfs hooks/zfs scripts/zfs scripts/local-top/zfs
EXTRA_DIST = \
+ $(top_srcdir)/contrib/initramfs/conf.d/zfs \
$(top_srcdir)/contrib/initramfs/conf-hooks.d/zfs \
$(top_srcdir)/contrib/initramfs/hooks/zfs \
$(top_srcdir)/contrib/initramfs/scripts/zfs \
@@ -10,7 +12,7 @@ EXTRA_DIST = \
$(top_srcdir)/contrib/initramfs/README.initramfs.markdown
install-initrdSCRIPTS: $(EXTRA_DIST)
- for d in conf-hooks.d hooks scripts scripts/local-top; do \
+ for d in conf.d conf-hooks.d hooks scripts scripts/local-top; do \
$(MKDIR_P) $(DESTDIR)$(initrddir)/$$d; \
cp $(top_srcdir)/contrib/initramfs/$$d/zfs \
$(DESTDIR)$(initrddir)/$$d/; \
diff --git a/contrib/initramfs/conf.d/zfs b/contrib/initramfs/conf.d/zfs
new file mode 100644
index 000000000000..c67d75ba8672
--- /dev/null
+++ b/contrib/initramfs/conf.d/zfs
@@ -0,0 +1,8 @@
+for x in $(cat /proc/cmdline)
+do
+ case $x in
+ root=ZFS=*|root=zfs:*)
+ BOOT=zfs
+ ;;
+ esac
+done
diff --git a/contrib/initramfs/scripts/zfs b/contrib/initramfs/scripts/zfs
index 8770a2e8e1b7..86329e764f06 100644
--- a/contrib/initramfs/scripts/zfs
+++ b/contrib/initramfs/scripts/zfs
@@ -478,7 +478,7 @@ destroy_fs()
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
- echo "Failed to destroy '$fs'. Please make sure that '$fs' is not availible."
+ echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
echo "Hint: Try: zfs destroy -Rfn $fs"
echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
/bin/sh
diff --git a/etc/systemd/system/zfs-import-cache.service.in b/etc/systemd/system/zfs-import-cache.service.in
index 9d677f8dda38..726c468ca30c 100644
--- a/etc/systemd/system/zfs-import-cache.service.in
+++ b/etc/systemd/system/zfs-import-cache.service.in
@@ -12,7 +12,7 @@ ConditionPathExists=@sysconfdir@/zfs/zpool.cache
[Service]
Type=oneshot
RemainAfterExit=yes
-ExecStartPre=/sbin/modprobe zfs
+ExecStartPre=-/sbin/modprobe zfs
ExecStart=@sbindir@/zpool import -c @sysconfdir@/zfs/zpool.cache -aN
[Install]
diff --git a/etc/systemd/system/zfs-import-scan.service.in b/etc/systemd/system/zfs-import-scan.service.in
index 227f5b74f36e..abc8e8e6d94a 100644
--- a/etc/systemd/system/zfs-import-scan.service.in
+++ b/etc/systemd/system/zfs-import-scan.service.in
@@ -11,7 +11,7 @@ ConditionPathExists=!@sysconfdir@/zfs/zpool.cache
[Service]
Type=oneshot
RemainAfterExit=yes
-ExecStartPre=/sbin/modprobe zfs
+ExecStartPre=-/sbin/modprobe zfs
ExecStart=@sbindir@/zpool import -aN -o cachefile=none
[Install]
diff --git a/include/linux/Makefile.am b/include/linux/Makefile.am
index 9bb0b3493e5d..89c2689f6f0c 100644
--- a/include/linux/Makefile.am
+++ b/include/linux/Makefile.am
@@ -9,7 +9,8 @@ KERNEL_H = \
$(top_srcdir)/include/linux/kmap_compat.h \
$(top_srcdir)/include/linux/simd_x86.h \
$(top_srcdir)/include/linux/simd_aarch64.h \
- $(top_srcdir)/include/linux/mod_compat.h
+ $(top_srcdir)/include/linux/mod_compat.h \
+ $(top_srcdir)/include/linux/page_compat.h
USER_H =
diff --git a/include/linux/blkdev_compat.h b/include/linux/blkdev_compat.h
index c8a8e856dee5..4406493e4caa 100644
--- a/include/linux/blkdev_compat.h
+++ b/include/linux/blkdev_compat.h
@@ -139,6 +139,14 @@ blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
#endif
}
+#ifndef HAVE_GET_DISK_AND_MODULE
+static inline struct kobject *
+get_disk_and_module(struct gendisk *disk)
+{
+ return (get_disk(disk));
+}
+#endif
+
#ifndef HAVE_GET_DISK_RO
static inline int
get_disk_ro(struct gendisk *disk)
diff --git a/include/linux/page_compat.h b/include/linux/page_compat.h
new file mode 100644
index 000000000000..95acb7d53647
--- /dev/null
+++ b/include/linux/page_compat.h
@@ -0,0 +1,78 @@
+#ifndef _ZFS_PAGE_COMPAT_H
+#define _ZFS_PAGE_COMPAT_H
+
+/*
+ * We have various enum members moving between two separate enum types,
+ * and accessed by different functions at various times. Centralise the
+ * insanity.
+ *
+ * < v4.8: all enums in zone_stat_item, via global_page_state()
+ * v4.8: some enums moved to node_stat_item, global_node_page_state() introduced
+ * v4.13: some enums moved from zone_stat_item to node_state_item
+ * v4.14: global_page_state() rename to global_zone_page_state()
+ *
+ * The defines used here are created by config/kernel-global_page_state.m4
+ */
+
+/*
+ * Create our own accessor functions to follow the Linux API changes
+ */
+#if defined(ZFS_GLOBAL_ZONE_PAGE_STATE)
+
+/* global_zone_page_state() introduced */
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES)
+#define nr_file_pages() global_node_page_state(NR_FILE_PAGES)
+#else
+#define nr_file_pages() global_zone_page_state(NR_FILE_PAGES)
+#endif
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON)
+#define nr_inactive_anon_pages() global_node_page_state(NR_INACTIVE_ANON)
+#else
+#define nr_inactive_anon_pages() global_zone_page_state(NR_INACTIVE_ANON)
+#endif
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE)
+#define nr_inactive_file_pages() global_node_page_state(NR_INACTIVE_FILE)
+#else
+#define nr_inactive_file_pages() global_zone_page_state(NR_INACTIVE_FILE)
+#endif
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_SLAB_RECLAIMABLE)
+#define nr_slab_reclaimable_pages() global_node_page_state(NR_SLAB_RECLAIMABLE)
+#else
+#define nr_slab_reclaimable_pages() global_zone_page_state(NR_SLAB_RECLAIMABLE)
+#endif
+
+#elif defined(ZFS_GLOBAL_NODE_PAGE_STATE)
+
+/* global_node_page_state() introduced */
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES)
+#define nr_file_pages() global_node_page_state(NR_FILE_PAGES)
+#else
+#define nr_file_pages() global_page_state(NR_FILE_PAGES)
+#endif
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON)
+#define nr_inactive_anon_pages() global_node_page_state(NR_INACTIVE_ANON)
+#else
+#define nr_inactive_anon_pages() global_page_state(NR_INACTIVE_ANON)
+#endif
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE)
+#define nr_inactive_file_pages() global_node_page_state(NR_INACTIVE_FILE)
+#else
+#define nr_inactive_file_pages() global_page_state(NR_INACTIVE_FILE)
+#endif
+#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_SLAB_RECLAIMABLE)
+#define nr_slab_reclaimable_pages() global_node_page_state(NR_SLAB_RECLAIMABLE)
+#else
+#define nr_slab_reclaimable_pages() global_page_state(NR_SLAB_RECLAIMABLE)
+#endif
+
+#else
+
+/* global_page_state() only */
+#define nr_file_pages() global_page_state(NR_FILE_PAGES)
+#define nr_inactive_anon_pages() global_page_state(NR_INACTIVE_ANON)
+#define nr_inactive_file_pages() global_page_state(NR_INACTIVE_FILE)
+#define nr_slab_reclaimable_pages() global_page_state(NR_SLAB_RECLAIMABLE)
+
+#endif /* ZFS_GLOBAL_ZONE_PAGE_STATE */
+
+#endif /* _ZFS_PAGE_COMPAT_H */
diff --git a/include/linux/vfs_compat.h b/include/linux/vfs_compat.h
index 6111f0afca1d..6347268af9ed 100644
--- a/include/linux/vfs_compat.h
+++ b/include/linux/vfs_compat.h
@@ -272,6 +272,10 @@ lseek_execute(
* This is several orders of magnitude larger than expected grace period.
* At 60 seconds the kernel will also begin issuing RCU stall warnings.
*/
+#ifdef refcount_t
+#undef refcount_t
+#endif
+
#include
#if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
@@ -284,9 +288,13 @@ zpl_posix_acl_release(struct posix_acl *acl)
{
if ((acl == NULL) || (acl == ACL_NOT_CACHED))
return;
-
+#ifdef HAVE_ACL_REFCOUNT
+ if (refcount_dec_and_test(&acl->a_refcount))
+ zpl_posix_acl_release_impl(acl);
+#else
if (atomic_dec_and_test(&acl->a_refcount))
zpl_posix_acl_release_impl(acl);
+#endif
}
#endif /* HAVE_POSIX_ACL_RELEASE */
@@ -397,6 +405,8 @@ typedef mode_t zpl_equivmode_t;
#define zpl_posix_acl_valid(ip, acl) posix_acl_valid(acl)
#endif
+#define refcount_t zfs_refcount_t
+
#endif /* CONFIG_FS_POSIX_ACL */
/*
@@ -578,4 +588,18 @@ current_time(struct inode *ip)
}
#endif
+/*
+ * 4.16 API change
+ * Added iversion interface for managing inode version field.
+ */
+#ifdef HAVE_INODE_SET_IVERSION
+#include
+#else
+static inline void
+inode_set_iversion(struct inode *ip, u64 val)
+{
+ ip->i_version = val;
+}
+#endif
+
#endif /* _ZFS_VFS_H */
diff --git a/include/sys/mmp.h b/include/sys/mmp.h
index 5b2fea1a66b1..1ce685f9c38e 100644
--- a/include/sys/mmp.h
+++ b/include/sys/mmp.h
@@ -42,6 +42,7 @@ typedef struct mmp_thread {
uint64_t mmp_delay; /* decaying avg ns between MMP writes */
uberblock_t mmp_ub; /* last ub written by sync */
zio_t *mmp_zio_root; /* root of mmp write zios */
+ uint64_t mmp_kstat_id; /* unique id for next MMP write kstat */
} mmp_thread_t;
diff --git a/include/sys/spa.h b/include/sys/spa.h
index 67235871fb7f..53fa5514a856 100644
--- a/include/sys/spa.h
+++ b/include/sys/spa.h
@@ -759,8 +759,10 @@ extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
struct dsl_pool *);
extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
+extern int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
+ hrtime_t duration);
extern void spa_mmp_history_add(uint64_t txg, uint64_t timestamp,
- uint64_t mmp_delay, vdev_t *vd, int label);
+ uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id);
/* Pool configuration locks */
extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw);
diff --git a/include/sys/trace_acl.h b/include/sys/trace_acl.h
index 1057e560ba6c..610bbe29c25c 100644
--- a/include/sys/trace_acl.h
+++ b/include/sys/trace_acl.h
@@ -68,7 +68,6 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
__field(uint32_t, i_gid)
__field(unsigned long, i_ino)
__field(unsigned int, i_nlink)
- __field(u64, i_version)
__field(loff_t, i_size)
__field(unsigned int, i_blkbits)
__field(unsigned short, i_bytes)
@@ -103,7 +102,6 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
__entry->i_gid = KGID_TO_SGID(ZTOI(zn)->i_gid);
__entry->i_ino = zn->z_inode.i_ino;
__entry->i_nlink = zn->z_inode.i_nlink;
- __entry->i_version = zn->z_inode.i_version;
__entry->i_size = zn->z_inode.i_size;
__entry->i_blkbits = zn->z_inode.i_blkbits;
__entry->i_bytes = zn->z_inode.i_bytes;
@@ -121,7 +119,7 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
"mapcnt %llu size %llu pflags %llu "
"sync_cnt %u mode 0x%x is_sa %d "
"is_mapped %d is_ctldir %d is_stale %d inode { "
- "uid %u gid %u ino %lu nlink %u version %llu size %lli "
+ "uid %u gid %u ino %lu nlink %u size %lli "
"blkbits %u bytes %u mode 0x%x generation %x } } "
"ace { type %u flags %u access_mask %u } mask_matched %u",
__entry->z_id, __entry->z_unlinked, __entry->z_atime_dirty,
@@ -131,7 +129,7 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
__entry->z_is_sa, __entry->z_is_mapped,
__entry->z_is_ctldir, __entry->z_is_stale, __entry->i_uid,
__entry->i_gid, __entry->i_ino, __entry->i_nlink,
- __entry->i_version, __entry->i_size, __entry->i_blkbits,
+ __entry->i_size, __entry->i_blkbits,
__entry->i_bytes, __entry->i_mode, __entry->i_generation,
__entry->z_type, __entry->z_flags, __entry->z_access_mask,
__entry->mask_matched)
diff --git a/include/sys/vdev_impl.h b/include/sys/vdev_impl.h
index 4c2e3cd2e0af..4f9f1a903b5a 100644
--- a/include/sys/vdev_impl.h
+++ b/include/sys/vdev_impl.h
@@ -238,6 +238,7 @@ struct vdev {
vdev_aux_t vdev_label_aux; /* on-disk aux state */
uint64_t vdev_leaf_zap;
hrtime_t vdev_mmp_pending; /* 0 if write finished */
+ uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
/*
* For DTrace to work in userland (libzpool) context, these fields must
@@ -254,8 +255,6 @@ struct vdev {
* We rate limit ZIO delay and ZIO checksum events, since they
* can flood ZED with tons of events when a drive is acting up.
*/
-#define DELAYS_PER_SECOND 5
-#define CHECKSUMS_PER_SECOND 5
zfs_ratelimit_t vdev_delay_rl;
zfs_ratelimit_t vdev_checksum_rl;
};
diff --git a/include/sys/zap_leaf.h b/include/sys/zap_leaf.h
index e784c5963b2e..a3da1036a5ee 100644
--- a/include/sys/zap_leaf.h
+++ b/include/sys/zap_leaf.h
@@ -46,10 +46,15 @@ struct zap_stats;
* block size (1<l_bs) - hash entry size (2) * number of hash
* entries - header space (2*chunksize)
*/
-#define ZAP_LEAF_NUMCHUNKS(l) \
- (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \
+#define ZAP_LEAF_NUMCHUNKS_BS(bs) \
+ (((1<<(bs)) - 2*ZAP_LEAF_HASH_NUMENTRIES_BS(bs)) / \
ZAP_LEAF_CHUNKSIZE - 2)
+#define ZAP_LEAF_NUMCHUNKS(l) (ZAP_LEAF_NUMCHUNKS_BS(((l)->l_bs)))
+
+#define ZAP_LEAF_NUMCHUNKS_DEF \
+ (ZAP_LEAF_NUMCHUNKS_BS(fzap_default_block_shift))
+
/*
* The amount of space within the chunk available for the array is:
* chunk size - space for type (1) - space for next pointer (2)
@@ -74,8 +79,10 @@ struct zap_stats;
* which is less than block size / CHUNKSIZE (24) / minimum number of
* chunks per entry (3).
*/
-#define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5)
-#define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l))
+#define ZAP_LEAF_HASH_SHIFT_BS(bs) ((bs) - 5)
+#define ZAP_LEAF_HASH_NUMENTRIES_BS(bs) (1 << ZAP_LEAF_HASH_SHIFT_BS(bs))
+#define ZAP_LEAF_HASH_SHIFT(l) (ZAP_LEAF_HASH_SHIFT_BS(((l)->l_bs)))
+#define ZAP_LEAF_HASH_NUMENTRIES(l) (ZAP_LEAF_HASH_NUMENTRIES_BS(((l)->l_bs)))
/*
* The chunks start immediately after the hash table. The end of the
diff --git a/include/sys/zfs_ratelimit.h b/include/sys/zfs_ratelimit.h
index f36e07841f92..012825fadb22 100644
--- a/include/sys/zfs_ratelimit.h
+++ b/include/sys/zfs_ratelimit.h
@@ -25,13 +25,19 @@
typedef struct {
hrtime_t start;
unsigned int count;
- unsigned int burst; /* Number to allow per interval */
- unsigned int interval; /* Interval length in seconds */
+
+ /*
+ * Pointer to number of events per interval. We do this to
+ * allow the burst to be a (changeable) module parameter.
+ */
+ unsigned int *burst;
+
+ unsigned int interval; /* Interval length in seconds */
kmutex_t lock;
} zfs_ratelimit_t;
int zfs_ratelimit(zfs_ratelimit_t *rl);
-void zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int burst,
+void zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int *burst,
unsigned int interval);
void zfs_ratelimit_fini(zfs_ratelimit_t *rl);
diff --git a/include/sys/zio.h b/include/sys/zio.h
index 4eaabc38c86f..0d741f8e2ee4 100644
--- a/include/sys/zio.h
+++ b/include/sys/zio.h
@@ -215,6 +215,9 @@ enum zio_flag {
(((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \
ZIO_FLAG_CANFAIL)
+#define ZIO_CHILD_BIT(x) (1 << (x))
+#define ZIO_CHILD_BIT_IS_SET(val, x) ((val) & (1 << (x)))
+
enum zio_child {
ZIO_CHILD_VDEV = 0,
ZIO_CHILD_GANG,
@@ -223,6 +226,14 @@ enum zio_child {
ZIO_CHILD_TYPES
};
+#define ZIO_CHILD_VDEV_BIT ZIO_CHILD_BIT(ZIO_CHILD_VDEV)
+#define ZIO_CHILD_GANG_BIT ZIO_CHILD_BIT(ZIO_CHILD_GANG)
+#define ZIO_CHILD_DDT_BIT ZIO_CHILD_BIT(ZIO_CHILD_DDT)
+#define ZIO_CHILD_LOGICAL_BIT ZIO_CHILD_BIT(ZIO_CHILD_LOGICAL)
+#define ZIO_CHILD_ALL_BITS \
+ (ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
+ ZIO_CHILD_DDT_BIT | ZIO_CHILD_LOGICAL_BIT)
+
enum zio_wait_type {
ZIO_WAIT_READY = 0,
ZIO_WAIT_DONE,
diff --git a/lib/libzfs/libzfs_sendrecv.c b/lib/libzfs/libzfs_sendrecv.c
index ec190022f0af..66d89067bcee 100644
--- a/lib/libzfs/libzfs_sendrecv.c
+++ b/lib/libzfs/libzfs_sendrecv.c
@@ -3254,6 +3254,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
zfs_type_t type;
boolean_t toplevel = B_FALSE;
boolean_t zoned = B_FALSE;
+ boolean_t hastoken = B_FALSE;
begin_time = time(NULL);
bzero(origin, MAXNAMELEN);
@@ -3535,6 +3536,11 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
/* we want to know if we're zoned when validating -o|-x props */
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+ /* may need this info later, get it now we have zhp around */
+ if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0,
+ NULL, NULL, 0, B_TRUE) == 0)
+ hastoken = B_TRUE;
+
/* gather existing properties on destination */
origprops = fnvlist_alloc();
fnvlist_merge(origprops, zhp->zfs_props);
@@ -3741,9 +3747,19 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
break;
case EDQUOT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "destination %s space quota exceeded"), name);
+ "destination %s space quota exceeded."), name);
(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
break;
+ case EBUSY:
+ if (hastoken) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination %s contains "
+ "partially-complete state from "
+ "\"zfs receive -s\"."), name);
+ (void) zfs_error(hdl, EZFS_BUSY, errbuf);
+ break;
+ }
+ /* fallthru */
default:
(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
}
diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5
index 19e6becfd1c6..8d5ac2576141 100644
--- a/man/man5/zfs-module-parameters.5
+++ b/man/man5/zfs-module-parameters.5
@@ -674,17 +674,6 @@ max arc_p
Default value: \fB0\fR.
.RE
-.sp
-.ne 2
-.na
-\fBzfs_arc_p_aggressive_disable\fR (int)
-.ad
-.RS 12n
-Disable aggressive arc_p growth
-.sp
-Use \fB1\fR for yes (default) and \fB0\fR to disable.
-.RE
-
.sp
.ne 2
.na
@@ -750,6 +739,34 @@ Disable pool import at module load by ignoring the cache file (typically \fB/etc
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
+.sp
+.ne 2
+.na
+\fBzfs_checksums_per_second\fR (int)
+.ad
+.RS 12n
+Rate limit checksum events to this many per second. Note that this should
+not be set below the zed thresholds (currently 10 checksums over 10 sec)
+or else zed may not trigger any action.
+.sp
+Default value: 20
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_commit_timeout_pct\fR (int)
+.ad
+.RS 12n
+This controls the amount of time that a ZIL block (lwb) will remain "open"
+when it isn't "full", and it has a thread waiting for it to be committed to
+stable storage. The timeout is scaled based on a percentage of the last lwb
+latency to avoid significantly impacting the latency of each individual
+transaction record (itx).
+.sp
+Default value: \fB5\fR%.
+.RE
+
.sp
.ne 2
.na
@@ -877,6 +894,17 @@ Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
Default value: \fB500,000\fR.
.RE
+.sp
+.ne 2
+.na
+\fBzfs_delays_per_second\fR (int)
+.ad
+.RS 12n
+Rate limit IO delay events to this many per second.
+.sp
+Default value: 20
+.RE
+
.sp
.ne 2
.na
diff --git a/man/man8/zdb.8 b/man/man8/zdb.8
index 4e47de7bef8d..d991aae4caac 100644
--- a/man/man8/zdb.8
+++ b/man/man8/zdb.8
@@ -246,7 +246,9 @@ and, optionally,
.It Sy b Ar offset
Print block pointer
.It Sy d
-Decompress the block
+Decompress the block. Set environment variable
+.Nm ZBD_NO_ZLE
+to skip zle when guessing.
.It Sy e
Byte swap the block
.It Sy g
diff --git a/man/man8/zed.8.in b/man/man8/zed.8.in
index 2ab088d98a3c..645e91795aaa 100644
--- a/man/man8/zed.8.in
+++ b/man/man8/zed.8.in
@@ -27,6 +27,7 @@ ZED \- ZFS Event Daemon
[\fB\-L\fR]
[\fB\-M\fR]
[\fB\-p\fR \fIpidfile\fR]
+[\fB\-P\fR \fIpath\fR]
[\fB\-s\fR \fIstatefile\fR]
[\fB\-v\fR]
[\fB\-V\fR]
@@ -78,9 +79,16 @@ Read the enabled ZEDLETs from the specified directory.
.BI \-p\ pidfile
Write the daemon's process ID to the specified file.
.TP
+.BI \-P\ path
+Custom $PATH for zedlets to use. Normally zedlets run in a locked-down
+environment, with hardcoded paths to the ZFS commands ($ZFS, $ZPOOL, $ZED, ...),
+and a hardcoded $PATH. This is done for security reasons. However, the
+ZFS test suite uses a custom PATH for its ZFS commands, and passes it to zed
+with -P. In short, -P is only to be used by the ZFS test suite; never use
+it in production!
+.TP
.BI \-s\ statefile
Write the daemon's state to the specified file.
-
.SH ZEVENTS
.PP
A zevent is comprised of a list of nvpairs (name/value pairs). Each zevent
diff --git a/man/man8/zinject.8 b/man/man8/zinject.8
index 50fecfb64364..7f363974b3bb 100644
--- a/man/man8/zinject.8
+++ b/man/man8/zinject.8
@@ -111,6 +111,9 @@ Specify
.BR "dtl" " for an ECHILD error,"
.BR "io" " for an EIO error where reopening the device will succeed, or"
.BR "nxio" " for an ENXIO error where reopening the device will fail."
+For EIO and ENXIO, the "failed" reads or writes still occur. The probe simply
+sets the error value reported by the I/O pipeline so it appears the read or
+write failed.
.TP
.BI "\-f" " frequency"
Only inject errors a fraction of the time. Expressed as a real number
diff --git a/man/man8/zpool.8 b/man/man8/zpool.8
index 328ba3dce6a9..6d7c2271cd2c 100644
--- a/man/man8/zpool.8
+++ b/man/man8/zpool.8
@@ -655,7 +655,7 @@ Because the kernel destroys and recreates this file when pools are added and
removed, care should be taken when attempting to access this file.
When the last pool using a
.Sy cachefile
-is exported or destroyed, the file is removed.
+is exported or destroyed, the file will be empty.
.It Sy comment Ns = Ns Ar text
A text string consisting of printable ASCII characters that will be stored
such that it is available even if the pool becomes faulted.
@@ -733,7 +733,7 @@ man page. In order to enable this property each host must set a unique hostid.
See
.Xr genhostid 1
.Xr zgenhostid 8
-.Xr spl-module-paramters 5
+.Xr spl-module-parameters 5
for additional details. The default value is
.Sy off .
.It Sy version Ns = Ns Ar version
@@ -2087,10 +2087,10 @@ is faulted due to a missing device.
The results from this command are similar to the following:
.Bd -literal
# zpool list
-NAME SIZE ALLOC FREE FRAG EXPANDSZ CAP DEDUP HEALTH ALTROOT
-rpool 19.9G 8.43G 11.4G 33% - 42% 1.00x ONLINE -
-tank 61.5G 20.0G 41.5G 48% - 32% 1.00x ONLINE -
-zion - - - - - - - FAULTED -
+NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+rpool 19.9G 8.43G 11.4G - 33% 42% 1.00x ONLINE -
+tank 61.5G 20.0G 41.5G - 48% 32% 1.00x ONLINE -
+zion - - - - - - - FAULTED -
.Ed
.It Sy Example 7 No Destroying a ZFS Storage Pool
The following command destroys the pool
@@ -2215,12 +2215,12 @@ In this example, the pool will not be able to utilize this extra capacity until
all the devices under the raidz vdev have been expanded.
.Bd -literal
# zpool list -v data
-NAME SIZE ALLOC FREE FRAG EXPANDSZ CAP DEDUP HEALTH ALTROOT
-data 23.9G 14.6G 9.30G 48% - 61% 1.00x ONLINE -
- raidz1 23.9G 14.6G 9.30G 48% -
- sda - - - - -
- sdb - - - - 10G
- sdc - - - - -
+NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
+ raidz1 23.9G 14.6G 9.30G - 48%
+ sda - - - - -
+ sdb - - - 10G -
+ sdc - - - - -
.Ed
.It Sy Example 16 No Adding output columns
Additional columns can be added to the
diff --git a/module/zcommon/zfs_comutil.c b/module/zcommon/zfs_comutil.c
index 52cb7e365559..44cdc852345c 100644
--- a/module/zcommon/zfs_comutil.c
+++ b/module/zcommon/zfs_comutil.c
@@ -215,7 +215,7 @@ const char *zfs_history_event_names[ZFS_NUM_LEGACY_HISTORY_EVENTS] = {
* interval: Interval time in seconds
*/
void
-zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int burst,
+zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int *burst,
unsigned int interval)
{
rl->count = 0;
@@ -270,7 +270,7 @@ zfs_ratelimit(zfs_ratelimit_t *rl)
rl->start = now;
rl->count = 0;
} else {
- if (rl->count >= rl->burst) {
+ if (rl->count >= *rl->burst) {
rc = 0; /* We're ratelimiting */
}
}
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 264e67735743..236794672e2e 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -280,6 +280,7 @@
#include
#include
#include
+#include
#endif
#include
#include
@@ -391,7 +392,6 @@ unsigned long zfs_arc_dnode_limit_percent = 10;
*/
unsigned long zfs_arc_sys_free = 0;
int zfs_arc_min_prefetch_lifespan = 0;
-int zfs_arc_p_aggressive_disable = 1;
int zfs_arc_p_dampener_disable = 1;
int zfs_arc_meta_prune = 10000;
int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
@@ -4017,17 +4017,11 @@ arc_free_memory(void)
si_meminfo(&si);
return (ptob(si.freeram - si.freehigh));
#else
-#ifdef ZFS_GLOBAL_NODE_PAGE_STATE
return (ptob(nr_free_pages() +
- global_node_page_state(NR_INACTIVE_FILE) +
- global_node_page_state(NR_INACTIVE_ANON) +
- global_node_page_state(NR_SLAB_RECLAIMABLE)));
-#else
- return (ptob(nr_free_pages() +
- global_page_state(NR_INACTIVE_FILE) +
- global_page_state(NR_INACTIVE_ANON) +
- global_page_state(NR_SLAB_RECLAIMABLE)));
-#endif /* ZFS_GLOBAL_NODE_PAGE_STATE */
+ nr_inactive_file_pages() +
+ nr_inactive_anon_pages() +
+ nr_slab_reclaimable_pages()));
+
#endif /* CONFIG_HIGHMEM */
#else
return (spa_get_random(arc_all_memory() * 20 / 100));
@@ -4438,13 +4432,7 @@ arc_evictable_memory(void)
* Scale reported evictable memory in proportion to page cache, cap
* at specified min/max.
*/
-#ifdef ZFS_GLOBAL_NODE_PAGE_STATE
- uint64_t min = (ptob(global_node_page_state(NR_FILE_PAGES)) / 100) *
- zfs_arc_pc_percent;
-#else
- uint64_t min = (ptob(global_page_state(NR_FILE_PAGES)) / 100) *
- zfs_arc_pc_percent;
-#endif
+ uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent;
min = MAX(arc_c_min, MIN(arc_c_max, min));
if (arc_dirty >= min)
@@ -7928,9 +7916,6 @@ MODULE_PARM_DESC(zfs_arc_meta_strategy, "Meta reclaim strategy");
module_param(zfs_arc_grow_retry, int, 0644);
MODULE_PARM_DESC(zfs_arc_grow_retry, "Seconds before growing arc size");
-module_param(zfs_arc_p_aggressive_disable, int, 0644);
-MODULE_PARM_DESC(zfs_arc_p_aggressive_disable, "disable aggressive arc_p grow");
-
module_param(zfs_arc_p_dampener_disable, int, 0644);
MODULE_PARM_DESC(zfs_arc_p_dampener_disable, "disable arc_p adapt dampener");
diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c
index c78228d74588..b494bef35831 100644
--- a/module/zfs/dmu_traverse.c
+++ b/module/zfs/dmu_traverse.c
@@ -599,19 +599,27 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
/* See comment on ZIL traversal in dsl_scan_visitds. */
if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) {
+ enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
uint32_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
- err = arc_read(NULL, td->td_spa, rootbp,
- arc_getbuf_func, &buf,
- ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, czb);
- if (err != 0)
- return (err);
-
- osp = buf->b_data;
- traverse_zil(td, &osp->os_zil_header);
- arc_buf_destroy(buf, &buf);
+ err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func,
+ &buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb);
+ if (err != 0) {
+ /*
+ * If both TRAVERSE_HARD and TRAVERSE_PRE are set,
+ * continue to visitbp so that td_func can be called
+ * in pre stage, and err will reset to zero.
+ */
+ if (!(td->td_flags & TRAVERSE_HARD) ||
+ !(td->td_flags & TRAVERSE_PRE))
+ return (err);
+ } else {
+ osp = buf->b_data;
+ traverse_zil(td, &osp->os_zil_header);
+ arc_buf_destroy(buf, &buf);
+ }
}
if (!(flags & TRAVERSE_PREFETCH_DATA) ||
diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c
index 6f2aa3f59315..ee8e9201bfb8 100644
--- a/module/zfs/mmp.c
+++ b/module/zfs/mmp.c
@@ -26,6 +26,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -124,6 +125,7 @@ uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
static void mmp_thread(spa_t *spa);
+char *mmp_tag = "mmp_write_uberblock";
void
mmp_init(spa_t *spa)
@@ -133,6 +135,7 @@ mmp_init(spa_t *spa)
mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
+ mmp->mmp_kstat_id = 1;
}
void
@@ -242,7 +245,8 @@ mmp_write_done(zio_t *zio)
mmp_thread_t *mts = zio->io_private;
mutex_enter(&mts->mmp_io_lock);
- vd->vdev_mmp_pending = 0;
+ uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
+ hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
if (zio->io_error)
goto unlock;
@@ -276,8 +280,14 @@ mmp_write_done(zio_t *zio)
mts->mmp_last_write = gethrtime();
unlock:
+ vd->vdev_mmp_pending = 0;
+ vd->vdev_mmp_kstat_id = 0;
+
mutex_exit(&mts->mmp_io_lock);
- spa_config_exit(spa, SCL_STATE, FTAG);
+ spa_config_exit(spa, SCL_STATE, mmp_tag);
+
+ spa_mmp_history_set(spa, mmp_kstat_id, zio->io_error,
+ mmp_write_duration);
abd_free(zio->io_abd);
}
@@ -313,7 +323,7 @@ mmp_write_uberblock(spa_t *spa)
int label;
uint64_t offset;
- spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+ spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
vd = mmp_random_leaf(spa->spa_root_vdev);
if (vd == NULL) {
spa_config_exit(spa, SCL_STATE, FTAG);
@@ -331,6 +341,7 @@ mmp_write_uberblock(spa_t *spa)
ub->ub_mmp_magic = MMP_MAGIC;
ub->ub_mmp_delay = mmp->mmp_delay;
vd->vdev_mmp_pending = gethrtime();
+ vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id++;
zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
@@ -348,7 +359,7 @@ mmp_write_uberblock(spa_t *spa)
flags | ZIO_FLAG_DONT_PROPAGATE);
spa_mmp_history_add(ub->ub_txg, ub->ub_timestamp, ub->ub_mmp_delay, vd,
- label);
+ label, vd->vdev_mmp_kstat_id);
zio_nowait(zio);
}
@@ -428,10 +439,14 @@ mmp_thread(spa_t *spa)
*/
if (!suspended && mmp_fail_intervals && multihost &&
(start - mmp->mmp_last_write) > max_fail_ns) {
+ cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
+ "succeeded in over %llus; suspending pool",
+ spa_name(spa),
+ NSEC2SEC(start - mmp->mmp_last_write));
zio_suspend(spa, NULL);
}
- if (multihost)
+ if (multihost && !suspended)
mmp_write_uberblock(spa);
CALLB_CPR_SAFE_BEGIN(&cpr);
diff --git a/module/zfs/qat_compress.c b/module/zfs/qat_compress.c
index 4d17d7ac9a18..62655f56db67 100644
--- a/module/zfs/qat_compress.c
+++ b/module/zfs/qat_compress.c
@@ -364,10 +364,6 @@ qat_compress(qat_compress_dir_t dir, char *src, int src_len,
Cpa32U dst_buffer_list_mem_size = sizeof (CpaBufferList) +
(num_dst_buf * sizeof (CpaFlatBuffer));
- if (!is_vmalloc_addr(src) || !is_vmalloc_addr(src + src_len - 1) ||
- !is_vmalloc_addr(dst) || !is_vmalloc_addr(dst + dst_len - 1))
- return (-1);
-
if (PHYS_CONTIG_ALLOC(&in_pages,
num_src_buf * sizeof (struct page *)) != CPA_STATUS_SUCCESS)
goto fail;
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index 5b792b868455..fea239014db4 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -147,6 +147,26 @@ spa_config_load(void)
kobj_close_file(file);
}
+static int
+spa_config_remove(spa_config_dirent_t *dp)
+{
+#if defined(__linux__) && defined(_KERNEL)
+ int error, flags = FWRITE | FTRUNC;
+ uio_seg_t seg = UIO_SYSSPACE;
+ vnode_t *vp;
+
+ error = vn_open(dp->scd_path, seg, flags, 0644, &vp, 0, 0);
+ if (error == 0) {
+ (void) VOP_FSYNC(vp, FSYNC, kcred, NULL);
+ (void) VOP_CLOSE(vp, 0, 1, 0, kcred, NULL);
+ }
+
+ return (error);
+#else
+ return (vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE));
+#endif
+}
+
static int
spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
{
@@ -161,7 +181,10 @@ spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
* If the nvlist is empty (NULL), then remove the old cachefile.
*/
if (nvl == NULL) {
- err = vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE);
+ err = spa_config_remove(dp);
+ if (err == ENOENT)
+ err = 0;
+
return (err);
}
@@ -174,9 +197,9 @@ spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
#if defined(__linux__) && defined(_KERNEL)
/*
* Write the configuration to disk. Due to the complexity involved
- * in performing a rename from within the kernel the file is truncated
- * and overwritten in place. In the event of an error the file is
- * unlinked to make sure we always have a consistent view of the data.
+ * in performing a rename and remove from within the kernel the file
+ * is instead truncated and overwritten in place. This way we always
+ * have a consistent view of the data or a zero length file.
*/
err = vn_open(dp->scd_path, UIO_SYSSPACE, oflags, 0644, &vp, 0, 0);
if (err == 0) {
@@ -186,9 +209,8 @@ spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
err = VOP_FSYNC(vp, FSYNC, kcred, NULL);
(void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL);
-
if (err)
- (void) vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE);
+ (void) spa_config_remove(dp);
}
#else
/*
diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c
index 7ca359806174..58967e9fcbd8 100644
--- a/module/zfs/spa_stats.c
+++ b/module/zfs/spa_stats.c
@@ -718,21 +718,24 @@ spa_io_history_destroy(spa_t *spa)
*/
typedef struct spa_mmp_history {
+ uint64_t mmp_kstat_id; /* unique # for updates */
uint64_t txg; /* txg of last sync */
uint64_t timestamp; /* UTC time of of last sync */
uint64_t mmp_delay; /* nanosec since last MMP write */
uint64_t vdev_guid; /* unique ID of leaf vdev */
char *vdev_path;
uint64_t vdev_label; /* vdev label */
+ int io_error; /* error status of MMP write */
+ hrtime_t duration; /* time from submission to completion */
list_node_t smh_link;
} spa_mmp_history_t;
static int
spa_mmp_history_headers(char *buf, size_t size)
{
- (void) snprintf(buf, size, "%-10s %-10s %-12s %-24s %-10s %s\n",
- "txg", "timestamp", "mmp_delay", "vdev_guid", "vdev_label",
- "vdev_path");
+ (void) snprintf(buf, size, "%-10s %-10s %-10s %-6s %-10s %-12s %-24s "
+ "%-10s %s\n", "id", "txg", "timestamp", "error", "duration",
+ "mmp_delay", "vdev_guid", "vdev_label", "vdev_path");
return (0);
}
@@ -741,11 +744,12 @@ spa_mmp_history_data(char *buf, size_t size, void *data)
{
spa_mmp_history_t *smh = (spa_mmp_history_t *)data;
- (void) snprintf(buf, size, "%-10llu %-10llu %-12llu %-24llu %-10llu "
- "%s\n",
- (u_longlong_t)smh->txg, (u_longlong_t)smh->timestamp,
- (u_longlong_t)smh->mmp_delay, (u_longlong_t)smh->vdev_guid,
- (u_longlong_t)smh->vdev_label,
+ (void) snprintf(buf, size, "%-10llu %-10llu %-10llu %-6lld %-10lld "
+ "%-12llu %-24llu %-10llu %s\n",
+ (u_longlong_t)smh->mmp_kstat_id, (u_longlong_t)smh->txg,
+ (u_longlong_t)smh->timestamp, (longlong_t)smh->io_error,
+ (longlong_t)smh->duration, (u_longlong_t)smh->mmp_delay,
+ (u_longlong_t)smh->vdev_guid, (u_longlong_t)smh->vdev_label,
(smh->vdev_path ? smh->vdev_path : "-"));
return (0);
@@ -861,11 +865,40 @@ spa_mmp_history_destroy(spa_t *spa)
}
/*
- * Add a new MMP update to historical record.
+ * Set MMP write duration and error status in existing record.
+ */
+int
+spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
+ hrtime_t duration)
+{
+ spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
+ spa_mmp_history_t *smh;
+ int error = ENOENT;
+
+ if (zfs_multihost_history == 0 && ssh->size == 0)
+ return (0);
+
+ mutex_enter(&ssh->lock);
+ for (smh = list_head(&ssh->list); smh != NULL;
+ smh = list_next(&ssh->list, smh)) {
+ if (smh->mmp_kstat_id == mmp_kstat_id) {
+ smh->io_error = io_error;
+ smh->duration = duration;
+ error = 0;
+ break;
+ }
+ }
+ mutex_exit(&ssh->lock);
+
+ return (error);
+}
+
+/*
+ * Add a new MMP write to historical record.
*/
void
spa_mmp_history_add(uint64_t txg, uint64_t timestamp, uint64_t mmp_delay,
- vdev_t *vd, int label)
+ vdev_t *vd, int label, uint64_t mmp_kstat_id)
{
spa_t *spa = vd->vdev_spa;
spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
@@ -882,6 +915,7 @@ spa_mmp_history_add(uint64_t txg, uint64_t timestamp, uint64_t mmp_delay,
if (vd->vdev_path)
smh->vdev_path = strdup(vd->vdev_path);
smh->vdev_label = label;
+ smh->mmp_kstat_id = mmp_kstat_id;
mutex_enter(&ssh->lock);
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index df07d893dba2..0786fbb834d7 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -56,6 +56,16 @@
*/
int metaslabs_per_vdev = 200;
+/*
+ * Rate limit delay events to this many IO delays per second.
+ */
+unsigned int zfs_delays_per_second = 20;
+
+/*
+ * Rate limit checksum events after this many checksum errors per second.
+ */
+unsigned int zfs_checksums_per_second = 20;
+
/*
* Virtual device management.
*/
@@ -357,8 +367,8 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
- zfs_ratelimit_init(&vd->vdev_delay_rl, DELAYS_PER_SECOND, 1);
- zfs_ratelimit_init(&vd->vdev_checksum_rl, CHECKSUMS_PER_SECOND, 1);
+ zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_delays_per_second, 1);
+ zfs_ratelimit_init(&vd->vdev_checksum_rl, &zfs_checksums_per_second, 1);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
@@ -3776,5 +3786,14 @@ module_param(metaslabs_per_vdev, int, 0644);
MODULE_PARM_DESC(metaslabs_per_vdev,
"Divide added vdev into approximately (but no more than) this number "
"of metaslabs");
+
+module_param(zfs_delays_per_second, uint, 0644);
+MODULE_PARM_DESC(zfs_delays_per_second, "Rate limit delay events to this many "
+ "IO delays per second");
+
+module_param(zfs_checksums_per_second, uint, 0644);
+ MODULE_PARM_DESC(zfs_checksums_per_second, "Rate limit checksum events "
+ "to this many checksum errors per second (do not set below zed"
+ "threshold).");
/* END CSTYLED */
#endif
diff --git a/module/zfs/zap.c b/module/zfs/zap.c
index ee9962bff394..9843d8c500e5 100644
--- a/module/zfs/zap.c
+++ b/module/zfs/zap.c
@@ -819,15 +819,19 @@ fzap_lookup(zap_name_t *zn,
return (err);
}
+#define MAX_EXPAND_RETRIES 2
+
int
fzap_add_cd(zap_name_t *zn,
uint64_t integer_size, uint64_t num_integers,
const void *val, uint32_t cd, void *tag, dmu_tx_t *tx)
{
zap_leaf_t *l;
+ zap_leaf_t *prev_l = NULL;
int err;
zap_entry_handle_t zeh;
zap_t *zap = zn->zn_zap;
+ int expand_retries = 0;
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT(!zap->zap_ismicro);
@@ -851,10 +855,29 @@ fzap_add_cd(zap_name_t *zn,
if (err == 0) {
zap_increment_num_entries(zap, 1, tx);
} else if (err == EAGAIN) {
+ /*
+ * If the last two expansions did not help, there is no point
+ * trying to expand again
+ */
+ if (expand_retries > MAX_EXPAND_RETRIES && prev_l == l) {
+ err = SET_ERROR(ENOSPC);
+ goto out;
+ }
+
err = zap_expand_leaf(zn, l, tag, tx, &l);
zap = zn->zn_zap; /* zap_expand_leaf() may change zap */
- if (err == 0)
+ if (err == 0) {
+ prev_l = l;
+ expand_retries++;
goto retry;
+ } else if (err == ENOSPC) {
+ /*
+ * If we failed to expand the leaf, then bailout
+ * as there is no point trying
+ * zap_put_leaf_maybe_grow_ptrtbl().
+ */
+ return (err);
+ }
}
out:
diff --git a/module/zfs/zap_leaf.c b/module/zfs/zap_leaf.c
index c342695c7f42..526e4660651f 100644
--- a/module/zfs/zap_leaf.c
+++ b/module/zfs/zap_leaf.c
@@ -53,7 +53,7 @@ static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry);
((h) >> \
(64 - ZAP_LEAF_HASH_SHIFT(l) - zap_leaf_phys(l)->l_hdr.lh_prefix_len)))
-#define LEAF_HASH_ENTPTR(l, h) (&zap_leaf_phys(l)->l_hash[LEAF_HASH(l, h)])
+#define LEAF_HASH_ENTPTR(l, h) (&zap_leaf_phys(l)->l_hash[LEAF_HASH(l, h)])
extern inline zap_leaf_phys_t *zap_leaf_phys(zap_leaf_t *l);
diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c
index 3ebf995c6780..34bef3e63da8 100644
--- a/module/zfs/zap_micro.c
+++ b/module/zfs/zap_micro.c
@@ -363,6 +363,41 @@ mze_find_unused_cd(zap_t *zap, uint64_t hash)
return (cd);
}
+/*
+ * Each mzap entry requires at max : 4 chunks
+ * 3 chunks for names + 1 chunk for value.
+ */
+#define MZAP_ENT_CHUNKS (1 + ZAP_LEAF_ARRAY_NCHUNKS(MZAP_NAME_LEN) + \
+ ZAP_LEAF_ARRAY_NCHUNKS(sizeof (uint64_t)))
+
+/*
+ * Check if the current entry keeps the colliding entries under the fatzap leaf
+ * size.
+ */
+static boolean_t
+mze_canfit_fzap_leaf(zap_name_t *zn, uint64_t hash)
+{
+ zap_t *zap = zn->zn_zap;
+ mzap_ent_t mze_tofind;
+ mzap_ent_t *mze;
+ avl_index_t idx;
+ avl_tree_t *avl = &zap->zap_m.zap_avl;
+ uint32_t mzap_ents = 0;
+
+ mze_tofind.mze_hash = hash;
+ mze_tofind.mze_cd = 0;
+
+ for (mze = avl_find(avl, &mze_tofind, &idx);
+ mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
+ mzap_ents++;
+ }
+
+ /* Include the new entry being added */
+ mzap_ents++;
+
+ return (ZAP_LEAF_NUMCHUNKS_DEF > (mzap_ents * MZAP_ENT_CHUNKS));
+}
+
static void
mze_remove(zap_t *zap, mzap_ent_t *mze)
{
@@ -1191,7 +1226,8 @@ zap_add_impl(zap_t *zap, const char *key,
err = fzap_add(zn, integer_size, num_integers, val, tag, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
} else if (integer_size != 8 || num_integers != 1 ||
- strlen(key) >= MZAP_NAME_LEN) {
+ strlen(key) >= MZAP_NAME_LEN ||
+ !mze_canfit_fzap_leaf(zn, zn->zn_hash)) {
err = mzap_upgrade(&zn->zn_zap, tag, tx, 0);
if (err == 0) {
err = fzap_add(zn, integer_size, num_integers, val,
diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c
index 7ddedeaafb03..1fcfca0c7268 100644
--- a/module/zfs/zfs_acl.c
+++ b/module/zfs/zfs_acl.c
@@ -1323,6 +1323,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
+ zfs_acl_phys_t acl_phys;
mode = zp->z_mode;
@@ -1369,7 +1370,6 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
- zfs_acl_phys_t acl_phys;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
diff --git a/module/zfs/zfs_dir.c b/module/zfs/zfs_dir.c
index 9a8bbccd92d7..6398a1d155e2 100644
--- a/module/zfs/zfs_dir.c
+++ b/module/zfs/zfs_dir.c
@@ -742,7 +742,11 @@ zfs_dirent(znode_t *zp, uint64_t mode)
}
/*
- * Link zp into dl. Can only fail if zp has been unlinked.
+ * Link zp into dl. Can fail in the following cases :
+ * - if zp has been unlinked.
+ * - if the number of entries with the same hash (aka. colliding entries)
+ * exceed the capacity of a leaf-block of fatzap and splitting of the
+ * leaf-block does not help.
*/
int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
@@ -776,6 +780,24 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
NULL, &links, sizeof (links));
}
}
+
+ value = zfs_dirent(zp, zp->z_mode);
+ error = zap_add(ZTOZSB(zp)->z_os, dzp->z_id, dl->dl_name, 8, 1,
+ &value, tx);
+
+ /*
+ * zap_add could fail to add the entry if it exceeds the capacity of the
+ * leaf-block and zap_leaf_split() failed to help.
+ * The caller of this routine is responsible for failing the transaction
+ * which will rollback the SA updates done above.
+ */
+ if (error != 0) {
+ if (!(flag & ZRENAMING) && !(flag & ZNEW))
+ drop_nlink(ZTOI(zp));
+ mutex_exit(&zp->z_lock);
+ return (error);
+ }
+
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&dzp->z_id, sizeof (dzp->z_id));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
@@ -813,11 +835,6 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
ASSERT(error == 0);
mutex_exit(&dzp->z_lock);
- value = zfs_dirent(zp, zp->z_mode);
- error = zap_add(ZTOZSB(zp)->z_os, dzp->z_id, dl->dl_name,
- 8, 1, &value, tx);
- ASSERT(error == 0);
-
return (0);
}
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 6f6ce79db20e..8a7ad702ca21 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -1443,10 +1443,22 @@ zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
+ error = zfs_link_create(dl, zp, tx, ZNEW);
+ if (error != 0) {
+ /*
+ * Since, we failed to add the directory entry for it,
+ * delete the newly created dnode.
+ */
+ zfs_znode_delete(zp, tx);
+ remove_inode_hash(ZTOI(zp));
+ zfs_acl_ids_free(&acl_ids);
+ dmu_tx_commit(tx);
+ goto out;
+ }
+
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
- (void) zfs_link_create(dl, zp, tx, ZNEW);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
if (flag & FIGNORECASE)
txtype |= TX_CI;
@@ -2037,13 +2049,18 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
- if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
-
/*
* Now put new name in parent dir.
*/
- (void) zfs_link_create(dl, zp, tx, ZNEW);
+ error = zfs_link_create(dl, zp, tx, ZNEW);
+ if (error != 0) {
+ zfs_znode_delete(zp, tx);
+ remove_inode_hash(ZTOI(zp));
+ goto out;
+ }
+
+ if (fuid_dirtied)
+ zfs_fuid_sync(zfsvfs, tx);
*ipp = ZTOI(zp);
@@ -2053,6 +2070,7 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
acl_ids.z_fuidp, vap);
+out:
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
@@ -2062,10 +2080,14 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- zfs_inode_update(dzp);
- zfs_inode_update(zp);
+ if (error != 0) {
+ iput(ZTOI(zp));
+ } else {
+ zfs_inode_update(dzp);
+ zfs_inode_update(zp);
+ }
ZFS_EXIT(zfsvfs);
- return (0);
+ return (error);
}
/*
@@ -3683,6 +3705,13 @@ zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
VERIFY3U(zfs_link_destroy(tdl, szp, tx,
ZRENAMING, NULL), ==, 0);
}
+ } else {
+ /*
+ * If we had removed the existing target, subsequent
+ * call to zfs_link_create() to add back the same entry
+ * but, the new dnode (szp) should not fail.
+ */
+ ASSERT(tzp == NULL);
}
}
@@ -3853,14 +3882,18 @@ zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
/*
* Insert the new object into the directory.
*/
- (void) zfs_link_create(dl, zp, tx, ZNEW);
-
- if (flags & FIGNORECASE)
- txtype |= TX_CI;
- zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
+ error = zfs_link_create(dl, zp, tx, ZNEW);
+ if (error != 0) {
+ zfs_znode_delete(zp, tx);
+ remove_inode_hash(ZTOI(zp));
+ } else {
+ if (flags & FIGNORECASE)
+ txtype |= TX_CI;
+ zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
- zfs_inode_update(dzp);
- zfs_inode_update(zp);
+ zfs_inode_update(dzp);
+ zfs_inode_update(zp);
+ }
zfs_acl_ids_free(&acl_ids);
@@ -3868,10 +3901,14 @@ zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
zfs_dirent_unlock(dl);
- *ipp = ZTOI(zp);
+ if (error == 0) {
+ *ipp = ZTOI(zp);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ zil_commit(zilog, 0);
+ } else {
+ iput(ZTOI(zp));
+ }
ZFS_EXIT(zfsvfs);
return (error);
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index 4d714cefc758..645b1d4d80ba 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -1167,8 +1167,7 @@ zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
lrw->lr_offset += dnow;
lrw->lr_length -= dnow;
ZIL_STAT_BUMP(zil_itx_needcopy_count);
- ZIL_STAT_INCR(zil_itx_needcopy_bytes,
- lrw->lr_length);
+ ZIL_STAT_INCR(zil_itx_needcopy_bytes, dnow);
} else {
ASSERT(itx->itx_wr_state == WR_INDIRECT);
dbuf = NULL;
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 1d69d8d8ded9..cd0a473e0e13 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -491,21 +491,26 @@ zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
}
static boolean_t
-zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait)
+zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
- uint64_t *countp = &zio->io_children[child][wait];
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
- if (*countp != 0) {
- zio->io_stage >>= 1;
- ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
- zio->io_stall = countp;
- waiting = B_TRUE;
+ for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
+ if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
+ continue;
+
+ uint64_t *countp = &zio->io_children[c][wait];
+ if (*countp != 0) {
+ zio->io_stage >>= 1;
+ ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
+ zio->io_stall = countp;
+ waiting = B_TRUE;
+ break;
+ }
}
mutex_exit(&zio->io_lock);
-
return (waiting);
}
@@ -1296,9 +1301,10 @@ zio_write_compress(zio_t *zio)
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
- if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) ||
- zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY))
+ if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
+ ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (ZIO_PIPELINE_STOP);
+ }
if (!IO_IS_ALLOCATING(zio))
return (ZIO_PIPELINE_CONTINUE);
@@ -2229,8 +2235,9 @@ zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
- if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE))
+ if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
+ }
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
@@ -2561,8 +2568,9 @@ zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
- if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE))
+ if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
+ }
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
@@ -3292,8 +3300,9 @@ zio_vdev_io_done(zio_t *zio)
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
- if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE))
+ if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
+ }
ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
@@ -3362,8 +3371,9 @@ zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
- if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE))
+ if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
+ }
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
@@ -3578,9 +3588,10 @@ zio_ready(zio_t *zio)
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
- if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) ||
- zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY))
+ if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
+ ZIO_WAIT_READY)) {
return (ZIO_PIPELINE_STOP);
+ }
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
@@ -3721,11 +3732,9 @@ zio_done(zio_t *zio)
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
- if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) ||
- zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) ||
- zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) ||
- zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE))
+ if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
+ }
/*
* If the allocation throttle is enabled, then update the accounting.
diff --git a/module/zfs/zle.c b/module/zfs/zle.c
index 13c5673fbe26..613607faaa97 100644
--- a/module/zfs/zle.c
+++ b/module/zfs/zle.c
@@ -74,10 +74,14 @@ zle_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
while (src < s_end && dst < d_end) {
int len = 1 + *src++;
if (len <= n) {
+ if (src + len > s_end || dst + len > d_end)
+ return (-1);
while (len-- != 0)
*dst++ = *src++;
} else {
len -= n;
+ if (dst + len > d_end)
+ return (-1);
while (len-- != 0)
*dst++ = 0;
}
diff --git a/module/zfs/zpl_super.c b/module/zfs/zpl_super.c
index b6ef60277664..25e75a897355 100644
--- a/module/zfs/zpl_super.c
+++ b/module/zfs/zpl_super.c
@@ -36,7 +36,7 @@ zpl_inode_alloc(struct super_block *sb)
struct inode *ip;
VERIFY3S(zfs_inode_alloc(sb, &ip), ==, 0);
- ip->i_version = 1;
+ inode_set_iversion(ip, 1);
return (ip);
}
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index aac4942098ad..62176e1cfbbc 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
@@ -1559,7 +1559,7 @@ zvol_probe(dev_t dev, int *part, void *arg)
struct kobject *kobj;
zv = zvol_find_by_dev(dev);
- kobj = zv ? get_disk(zv->zv_disk) : NULL;
+ kobj = zv ? get_disk_and_module(zv->zv_disk) : NULL;
ASSERT(zv == NULL || MUTEX_HELD(&zv->zv_state_lock));
if (zv)
mutex_exit(&zv->zv_state_lock);
diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in
index 8df57fa46e46..ce028794c05f 100644
--- a/rpm/generic/zfs.spec.in
+++ b/rpm/generic/zfs.spec.in
@@ -87,11 +87,11 @@ BuildRequires: libblkid-devel
BuildRequires: libudev-devel
BuildRequires: libattr-devel
%endif
+
%if 0%{?_systemd}
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
-BuildRequires: systemd
%endif
# The zpool iostat/status -c scripts call some utilities like lsblk and iostat
@@ -245,8 +245,15 @@ find %{?buildroot}%{_libdir} -name '*.la' -exec rm -f {} \;
%post
%if 0%{?_systemd}
+%if 0%{?systemd_post:1}
%systemd_post %{systemd_svcs}
%else
+if [ "$1" = "1" -o "$1" = "install" ] ; then
+ # Initial installation
+ systemctl preset %{systemd_svcs} >/dev/null || true
+fi
+%endif
+%else
if [ -x /sbin/chkconfig ]; then
/sbin/chkconfig --add zfs-import
/sbin/chkconfig --add zfs-mount
@@ -258,9 +265,17 @@ exit 0
%preun
%if 0%{?_systemd}
+%if 0%{?systemd_preun:1}
%systemd_preun %{systemd_svcs}
%else
-if [ "$1" = "0" ] && [ -x /sbin/chkconfig ]; then
+if [ "$1" = "0" -o "$1" = "remove" ] ; then
+ # Package removal, not upgrade
+ systemctl --no-reload disable %{systemd_svcs} >/dev/null || true
+ systemctl stop %{systemd_svcs} >/dev/null || true
+fi
+%endif
+%else
+if [ "$1" = "0" -o "$1" = "remove" ] && [ -x /sbin/chkconfig ]; then
/sbin/chkconfig --del zfs-import
/sbin/chkconfig --del zfs-mount
/sbin/chkconfig --del zfs-share
@@ -271,7 +286,11 @@ exit 0
%postun
%if 0%{?_systemd}
+%if 0%{?systemd_postun:1}
%systemd_postun %{systemd_svcs}
+%else
+systemctl --system daemon-reload >/dev/null || true
+%endif
%endif
%files
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 74b8b31a50d9..5a8abd1354b3 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -5,6 +5,7 @@ EXTRA_DIST = dkms.mkconf dkms.postbuild kmodtool zfs2zol-patch.sed cstyle.pl
pkgdatadir = $(datadir)/@PACKAGE@
dist_pkgdata_SCRIPTS = \
$(top_builddir)/scripts/common.sh \
+ $(top_srcdir)/scripts/enum-extract.pl \
$(top_srcdir)/scripts/zimport.sh \
$(top_srcdir)/scripts/zfs.sh \
$(top_srcdir)/scripts/zfs-tests.sh \
@@ -15,3 +16,4 @@ dist_pkgdata_SCRIPTS = \
$(top_srcdir)/scripts/zpios-survey.sh \
$(top_srcdir)/scripts/smb.sh \
$(top_srcdir)/scripts/zfs-helpers.sh
+
diff --git a/scripts/enum-extract.pl b/scripts/enum-extract.pl
new file mode 100755
index 000000000000..5112cc807f67
--- /dev/null
+++ b/scripts/enum-extract.pl
@@ -0,0 +1,58 @@
+#!/usr/bin/perl -w
+
+my $usage = <) {
+ # comments
+ s/\/\*.*\*\///;
+ if (m/\/\*/) {
+ while ($_ .= <>) {
+ last if s/\/\*.*\*\///s;
+ }
+ }
+
+ # preprocessor stuff
+ next if /^#/;
+
+ # find our enum
+ $in_enum = 1 if s/^\s*enum\s+${enum}(?:\s|$)//;
+ next unless $in_enum;
+
+ # remove explicit values
+ s/\s*=[^,]+,/,/g;
+
+ # extract each identifier
+ while (m/\b([a-z_][a-z0-9_]*)\b/ig) {
+ print $1, "\n";
+ }
+
+ #
+ # don't exit: there may be multiple versions of the same enum, e.g.
+ # inside different #ifdef blocks. Let's explicitly return all of
+ # them and let external tooling deal with it.
+ #
+ $in_enum = 0 if m/}\s*;/;
+}
+
+exit 0;
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
index 303c275299d8..89c923db1841 100644
--- a/tests/runfiles/linux.run
+++ b/tests/runfiles/linux.run
@@ -55,7 +55,7 @@ tags = ['functional', 'cachefile']
# 'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
# 'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
[tests/functional/casenorm]
-tests = ['case_all_values', 'norm_all_values']
+tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure']
tags = ['functional', 'casenorm']
[tests/functional/chattr]
@@ -73,7 +73,7 @@ tags = ['functional', 'clean_mirror']
[tests/functional/cli_root/zdb]
tests = ['zdb_001_neg', 'zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos',
- 'zdb_005_pos']
+ 'zdb_005_pos', 'zdb_006_pos']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
@@ -421,7 +421,7 @@ tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/fault]
-tests = ['auto_online_001_pos', 'auto_replace_001_pos']
+tests = ['auto_online_001_pos', 'auto_replace_001_pos', 'scrub_after_resilver']
tags = ['functional', 'fault']
[tests/functional/features/async_destroy]
diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg
index f6fd239de262..936e54c1a065 100644
--- a/tests/zfs-tests/include/commands.cfg
+++ b/tests/zfs-tests/include/commands.cfg
@@ -83,6 +83,7 @@ export SYSTEM_FILES='arp
pgrep
ping
pkill
+ printenv
printf
ps
pwd
diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib
index 86f172a6d348..48fb5e7c5021 100644
--- a/tests/zfs-tests/include/libtest.shlib
+++ b/tests/zfs-tests/include/libtest.shlib
@@ -3339,9 +3339,32 @@ function wait_replacing #pool
done
}
+#
+# Wait for a pool to be scrubbed
+#
+# $1 pool name
+# $2 number of seconds to wait (optional)
+#
+# Returns true when pool has been scrubbed, or false if there's a timeout or if
+# no scrub was done.
+#
+function wait_scrubbed
+{
+ typeset pool=${1:-$TESTPOOL}
+ typeset iter=${2:-10}
+ for i in {1..$iter} ; do
+ if is_pool_scrubbed $pool ; then
+ return 0
+ fi
+ sleep 1
+ done
+ return 1
+}
+
#
# Setup custom environment for the ZED.
#
+# $@ Optional list of zedlets to run under zed.
function zed_setup
{
if ! is_linux; then
@@ -3359,6 +3382,7 @@ function zed_setup
if [[ -e $VDEVID_CONF_ETC ]]; then
log_fail "Must not have $VDEVID_CONF_ETC file present on system"
fi
+ EXTRA_ZEDLETS=$@
# Create a symlink for /etc/zfs/vdev_id.conf file.
log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
@@ -3368,32 +3392,44 @@ function zed_setup
log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
+ # Scripts must only be user writable.
+ if [[ -n "$EXTRA_ZEDLETS" ]] ; then
+ saved_umask=$(umask)
+ log_must umask 0022
+ for i in $EXTRA_ZEDLETS ; do
+ log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
+ done
+ log_must umask $saved_umask
+ fi
+
# Customize the zed.rc file to enable the full debug log.
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
- # Scripts must only be user writable.
- saved_umask=$(umask)
- log_must umask 0022
- log_must cp ${ZEDLET_LIBEXEC_DIR}/all-syslog.sh $ZEDLET_DIR
- log_must cp ${ZEDLET_LIBEXEC_DIR}/all-debug.sh $ZEDLET_DIR
- log_must umask $saved_umask
}
#
# Cleanup custom ZED environment.
#
+# $@ Optional list of zedlets to remove from our test zed.d directory.
function zed_cleanup
{
if ! is_linux; then
return
fi
+ EXTRA_ZEDLETS=$@
log_must rm -f ${ZEDLET_DIR}/zed.rc
log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
log_must rm -f ${ZEDLET_DIR}/all-debug.sh
log_must rm -f ${ZEDLET_DIR}/state
+
+ if [[ -n "$EXTRA_ZEDLETS" ]] ; then
+ for i in $EXTRA_ZEDLETS ; do
+ log_must rm -f ${ZEDLET_DIR}/$i
+ done
+ fi
log_must rm -f $ZED_LOG
log_must rm -f $ZED_DEBUG_LOG
log_must rm -f $VDEVID_CONF_ETC
@@ -3425,7 +3461,7 @@ function zed_start
# run ZED in the background and redirect foreground logging
# output to $ZED_LOG.
log_must truncate -s 0 $ZED_DEBUG_LOG
- log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
+ log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
"-s $ZEDLET_DIR/state 2>$ZED_LOG &"
return 0
diff --git a/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh b/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh
index ae54a9365f54..e0b81e166279 100755
--- a/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh
@@ -98,13 +98,13 @@ log_must zpool set cachefile=$CPATH2 $TESTPOOL1
log_must pool_in_cache $TESTPOOL1 $CPATH2
log_must zpool set cachefile=$CPATH2 $TESTPOOL2
log_must pool_in_cache $TESTPOOL2 $CPATH2
-if [[ -f $CPATH1 ]]; then
+if [[ -s $CPATH1 ]]; then
log_fail "Verify set when cachefile is set on pool."
fi
log_must zpool export $TESTPOOL1
log_must zpool export $TESTPOOL2
-if [[ -f $CPATH2 ]]; then
+if [[ -s $CPATH2 ]]; then
log_fail "Verify export when cachefile is set on pool."
fi
@@ -117,7 +117,7 @@ log_must pool_in_cache $TESTPOOL2 $CPATH2
log_must zpool destroy $TESTPOOL1
log_must zpool destroy $TESTPOOL2
-if [[ -f $CPATH2 ]]; then
+if [[ -s $CPATH2 ]]; then
log_fail "Verify destroy when cachefile is set on pool."
fi
diff --git a/tests/zfs-tests/tests/functional/casenorm/Makefile.am b/tests/zfs-tests/tests/functional/casenorm/Makefile.am
index 00a19c7ff77d..00cb59074ed2 100644
--- a/tests/zfs-tests/tests/functional/casenorm/Makefile.am
+++ b/tests/zfs-tests/tests/functional/casenorm/Makefile.am
@@ -9,6 +9,7 @@ dist_pkgdata_SCRIPTS = \
insensitive_formd_lookup.ksh \
insensitive_none_delete.ksh \
insensitive_none_lookup.ksh \
+ mixed_create_failure.ksh \
mixed_formd_delete.ksh \
mixed_formd_lookup_ci.ksh \
mixed_formd_lookup.ksh \
diff --git a/tests/zfs-tests/tests/functional/casenorm/mixed_create_failure.ksh b/tests/zfs-tests/tests/functional/casenorm/mixed_create_failure.ksh
new file mode 100755
index 000000000000..51b5bb3f6584
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/casenorm/mixed_create_failure.ksh
@@ -0,0 +1,136 @@
+#!/bin/ksh -p
+#
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+#
+# Copyright 2018 Nutanix Inc. All rights reserved.
+#
+
+. $STF_SUITE/tests/functional/casenorm/casenorm.kshlib
+
+# DESCRIPTION:
+# For the filesystem with casesensitivity=mixed, normalization=none,
+# when multiple files with the same name (differing only in case) are created,
+# the number of files is limited to what can fit in a fatzap leaf-block.
+# And beyond that, it fails with ENOSPC.
+#
+# Ensure that the create/rename operations fail gracefully and not trigger an
+# ASSERT.
+#
+# STRATEGY:
+# Repeat the below steps for objects: files, directories, symlinks and hardlinks
+# 1. Create objects with same name but varying in case.
+# E.g. 'abcdefghijklmnop', 'Abcdefghijklmnop', 'ABcdefghijklmnop' etc.
+# The create should fail with ENOSPC.
+# 2. Create an object with name 'tmp_obj' and try to rename it to name that we
+# failed to add in step 1 above.
+# This should fail as well.
+
+verify_runnable "global"
+
+function cleanup
+{
+ destroy_testfs
+}
+
+log_onexit cleanup
+log_assert "With mixed mode: ensure create fails with ENOSPC beyond a certain limit"
+
+create_testfs "-o casesensitivity=mixed -o normalization=none"
+
+# Different object types
+obj_type=('file' 'dir' 'symlink' 'hardlink')
+
+# Commands to create different object types
+typeset -A ops
+ops['file']='touch'
+ops['dir']='mkdir'
+ops['symlink']='ln -s'
+ops['hardlink']='ln'
+
+# This function tests the following for a give object type :
+# - Create multiple objects with the same name (varying only in case).
+# Ensure that it eventually fails once the leaf-block limit is exceeded.
+# - Create another object with a different name. And attempt rename it to the
+# name (for which the create had failed in the previous step).
+# This should fail as well.
+# Args :
+# $1 - object type (file/dir/symlink/hardlink)
+# $2 - test directory
+#
+function test_ops
+{
+ typeset obj_type=$1
+ typeset testdir=$2
+
+ target_obj='target-file'
+
+ op="${ops[$obj_type]}"
+
+ log_note "The op : $op"
+ log_note "testdir=$testdir obj_type=$obj_type"
+
+ test_path="$testdir/$obj_type"
+ mkdir $test_path
+ log_note "Created test dir $test_path"
+
+ if [[ $obj_type = "symlink" || $obj_type = "hardlink" ]]; then
+ touch $test_path/$target_obj
+ log_note "Created target: $test_path/$target_obj"
+ op="$op $test_path/$target_obj"
+ fi
+
+ log_note "op : $op"
+ names='{a,A}{b,B}{c,C}{d,D}{e,E}{f,F}{g,G}{h,H}{i,I}{j,J}{k,K}{l,L}'
+ for name in $names; do
+ cmd="$op $test_path/$name"
+ out=$($cmd 2>&1)
+ ret=$?
+ log_note "cmd: $cmd ret: $ret out=$out"
+ if (($ret != 0)); then
+ if [[ $out = *@(No space left on device)* ]]; then
+ save_name="$test_path/$name"
+ break;
+ else
+ log_err "$cmd failed with unexpected error : $out"
+ fi
+ fi
+ done
+
+ log_note 'Test rename \"sample_name\" rename'
+ TMP_OBJ="$test_path/tmp_obj"
+ cmd="$op $TMP_OBJ"
+ out=$($cmd 2>&1)
+ ret=$?
+ if (($ret != 0)); then
+ log_err "cmd:$cmd failed out:$out"
+ fi
+
+ # Now, try to rename the tmp_obj to the name which we failed to add earlier.
+ # This should fail as well.
+ out=$(mv $TMP_OBJ $save_name 2>&1)
+ ret=$?
+ if (($ret != 0)); then
+ if [[ $out = *@(No space left on device)* ]]; then
+ log_note "$cmd failed as expected : $out"
+ else
+ log_err "$cmd failed with : $out"
+ fi
+ fi
+}
+
+for obj_type in ${obj_type[*]};
+do
+ log_note "Testing create of $obj_type"
+ test_ops $obj_type $TESTDIR
+done
+
+log_pass "Mixed mode FS: Ops on large number of colliding names fail gracefully"
diff --git a/tests/zfs-tests/tests/functional/clean_mirror/cleanup.ksh b/tests/zfs-tests/tests/functional/clean_mirror/cleanup.ksh
index ac3bfbca8940..fb0db312ebba 100755
--- a/tests/zfs-tests/tests/functional/clean_mirror/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/clean_mirror/cleanup.ksh
@@ -38,10 +38,10 @@ df -F zfs -h | grep "$TESTFS " >/dev/null
[[ $? == 0 ]] && log_must zfs umount -f $TESTDIR
destroy_pool $TESTPOOL
-if is_mpath_device $MIRROR_PRIMARY; then
+if ( is_mpath_device $MIRROR_PRIMARY || is_loop_device $MIRROR_SECONDARY); then
parted $DEV_DSKDIR/$MIRROR_PRIMARY -s rm 1
fi
-if is_mpath_device $MIRROR_SECONDARY; then
+if ( is_mpath_device $MIRROR_SECONDARY || is_loop_device $MIRROR_SECONDARY); then
parted $DEV_DSKDIR/$MIRROR_SECONDARY -s rm 1
fi
# recreate and destroy a zpool over the disks to restore the partitions to
diff --git a/tests/zfs-tests/tests/functional/cli_root/zdb/Makefile.am b/tests/zfs-tests/tests/functional/cli_root/zdb/Makefile.am
index 51170fbc894d..d37bcf607f46 100644
--- a/tests/zfs-tests/tests/functional/cli_root/zdb/Makefile.am
+++ b/tests/zfs-tests/tests/functional/cli_root/zdb/Makefile.am
@@ -4,4 +4,5 @@ dist_pkgdata_SCRIPTS = \
zdb_002_pos.ksh \
zdb_003_pos.ksh \
zdb_004_pos.ksh \
- zdb_005_pos.ksh
+ zdb_005_pos.ksh \
+ zdb_006_pos.ksh
diff --git a/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_006_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_006_pos.ksh
new file mode 100755
index 000000000000..97b00e9e1996
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_006_pos.ksh
@@ -0,0 +1,64 @@
+#!/bin/ksh
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2018 by Nutanix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# Description:
+# zdb -d will work on imported/exported pool with pool/dataset argument
+#
+# Strategy:
+# 1. Create a pool
+# 2. Run zdb -d with pool and dataset arguments.
+# 3. Export the pool
+# 4. Run zdb -ed with pool and dataset arguments.
+#
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ for DISK in $DISKS; do
+ zpool labelclear -f $DEV_RDSKDIR/$DISK
+ done
+}
+
+log_assert "Verify zdb -d works on imported/exported pool with pool/dataset argument"
+log_onexit cleanup
+
+verify_runnable "global"
+verify_disk_count "$DISKS" 2
+
+default_mirror_setup_noexit $DISKS
+log_must zfs snap $TESTPOOL/$TESTFS@snap
+
+log_must zdb -d $TESTPOOL
+log_must zdb -d $TESTPOOL/
+log_must zdb -d $TESTPOOL/$TESTFS
+log_must zdb -d $TESTPOOL/$TESTFS@snap
+
+log_must zpool export $TESTPOOL
+
+log_must zdb -ed $TESTPOOL
+log_must zdb -ed $TESTPOOL/
+log_must zdb -ed $TESTPOOL/$TESTFS
+log_must zdb -ed $TESTPOOL/$TESTFS@snap
+
+log_must zpool import $TESTPOOL
+
+cleanup
+
+log_pass "zdb -d works on imported/exported pool with pool/dataset argument"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/create-o_ashift.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/create-o_ashift.ksh
index 6449c8a913ed..6a9c3e28c3a4 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/create-o_ashift.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/create-o_ashift.ksh
@@ -44,47 +44,45 @@ verify_runnable "global"
function cleanup
{
- poolexists $TESTPOOL && destroy_pool $TESTPOOL
+ destroy_pool $TESTPOOL
log_must rm -f $disk
}
#
-# Commit the specified number of TXGs to the provided pool
-# We use 'zpool sync' here because we can't force it via sync(1) like on illumos
-# $1 pool name
-# $2 number of txg syncs
+# Fill the uberblock ring in every label: we do this by committing
+# TXGs to the provided until every slot contains a valid uberblock.
+# NOTE: We use 'zpool sync' here because we can't force it via sync(1) like on
+# illumos
#
-function txg_sync
+function write_device_uberblocks #
{
- typeset pool=$1
- typeset -i count=$2
- typeset -i i=0;
+ typeset device=$1
+ typeset pool=$2
- while [ $i -lt $count ]
+ while [ "$(zdb -quuul $device | grep -c 'invalid')" -ne 0 ]
do
- log_must sync_pool $pool true
- ((i = i + 1))
+ sync_pool $pool true
done
}
#
-# Verify device $1 labels contains $2 valid uberblocks in every label
-# $1 device
-# $2 uberblocks count
+# Verify every label on contains (valid) uberblocks
#
-function verify_device_uberblocks
+function verify_device_uberblocks #
{
typeset device=$1
typeset ubcount=$2
zdb -quuul $device | egrep '^(\s+)?Uberblock' |
- egrep -v 'invalid$' | awk \
- -v ubcount=$ubcount '{ uberblocks[$0]++; }
- END { for (i in uberblocks) {
- count++;
- if (uberblocks[i] != 4) { exit 1; }
- }
- if (count != ubcount) { exit 1; } }'
+ awk -v ubcount=$ubcount 'BEGIN { count=0 } { uberblocks[$0]++; }
+ END {
+ for (i in uberblocks) {
+ if (i ~ /invalid/) { continue; }
+ if (uberblocks[i] != 4) { exit 1; }
+ count++;
+ }
+ if (count != ubcount) { exit 1; }
+ }'
return $?
}
@@ -110,8 +108,7 @@ do
log_fail "Pool was created without setting ashift value to "\
"$ashift (current = $pprop)"
fi
- # force 128 txg sync to fill the uberblock ring
- txg_sync $TESTPOOL 128
+ write_device_uberblocks $disk $TESTPOOL
verify_device_uberblocks $disk ${ubcount[$i]}
if [[ $? -ne 0 ]]
then
diff --git a/tests/zfs-tests/tests/functional/ctime/.gitignore b/tests/zfs-tests/tests/functional/ctime/.gitignore
index ead826c04b9c..9e4539d5fee0 100644
--- a/tests/zfs-tests/tests/functional/ctime/.gitignore
+++ b/tests/zfs-tests/tests/functional/ctime/.gitignore
@@ -1 +1 @@
-/ctime_001_pos
+/ctime
diff --git a/tests/zfs-tests/tests/functional/ctime/ctime_001_pos.ksh b/tests/zfs-tests/tests/functional/ctime/ctime_001_pos.ksh
old mode 100644
new mode 100755
diff --git a/tests/zfs-tests/tests/functional/events/cleanup.ksh b/tests/zfs-tests/tests/functional/events/cleanup.ksh
index bc536e260f97..4905342b713b 100755
--- a/tests/zfs-tests/tests/functional/events/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/events/cleanup.ksh
@@ -26,6 +26,6 @@
. $STF_SUITE/include/libtest.shlib
-zed_cleanup
+zed_cleanup all-debug.sh all-syslog.sh
default_cleanup
diff --git a/tests/zfs-tests/tests/functional/events/setup.ksh b/tests/zfs-tests/tests/functional/events/setup.ksh
index 7113c1f39fd7..2f81d16b1814 100755
--- a/tests/zfs-tests/tests/functional/events/setup.ksh
+++ b/tests/zfs-tests/tests/functional/events/setup.ksh
@@ -28,6 +28,6 @@
DISK=${DISKS%% *}
-zed_setup
+zed_setup all-debug.sh all-syslog.sh
default_setup $DISK
diff --git a/tests/zfs-tests/tests/functional/fault/Makefile.am b/tests/zfs-tests/tests/functional/fault/Makefile.am
index eeff31261080..abe28501d026 100644
--- a/tests/zfs-tests/tests/functional/fault/Makefile.am
+++ b/tests/zfs-tests/tests/functional/fault/Makefile.am
@@ -4,4 +4,5 @@ dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
auto_online_001_pos.ksh \
- auto_replace_001_pos.ksh
+ auto_replace_001_pos.ksh \
+ scrub_after_resilver.ksh
diff --git a/tests/zfs-tests/tests/functional/fault/cleanup.ksh b/tests/zfs-tests/tests/functional/fault/cleanup.ksh
index f39f05d6fe8e..d3de742b37b6 100755
--- a/tests/zfs-tests/tests/functional/fault/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/fault/cleanup.ksh
@@ -31,7 +31,7 @@ verify_runnable "global"
cleanup_devices $DISKS
zed_stop
-zed_cleanup
+zed_cleanup resilver_finish-start-scrub.sh
SD=$(lsscsi | nawk '/scsi_debug/ {print $6; exit}')
SDDEVICE=$(echo $SD | nawk -F / '{print $3}')
diff --git a/tests/zfs-tests/tests/functional/fault/scrub_after_resilver.ksh b/tests/zfs-tests/tests/functional/fault/scrub_after_resilver.ksh
new file mode 100755
index 000000000000..558cb065f7f9
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/fault/scrub_after_resilver.ksh
@@ -0,0 +1,65 @@
+#!/bin/ksh -p
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
+# All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/fault/fault.cfg
+
+#
+# DESCRIPTION:
+# Test the scrub after resilver zedlet
+#
+# STRATEGY:
+# 1. Create a mirrored pool
+# 2. Fault a disk
+# 3. Replace the disk, starting a resilver
+# 4. Verify that a scrub happens after the resilver finishes
+#
+
+log_assert "Testing the scrub after resilver zedlet"
+
+# Backup our zed.rc
+zedrc_backup="$(mktemp)"
+log_must cp $ZEDLET_DIR/zed.rc $zedrc_backup
+
+# Enable ZED_SCRUB_AFTER_RESILVER
+eval "sed -i 's/\#ZED_SCRUB_AFTER_RESILVER/ZED_SCRUB_AFTER_RESILVER/g' $ZEDLET_DIR/zed.rc"
+
+function cleanup
+{
+ # Restore our zed.rc
+ log_must mv $zedrc_backup $ZEDLET_DIR/zed.rc
+ default_cleanup_noexit
+}
+
+log_onexit cleanup
+
+verify_disk_count "$DISKS" 3
+default_mirror_setup_noexit $DISK1 $DISK2
+
+log_must zpool offline -f $TESTPOOL $DISK1
+
+# Write to our degraded pool so we have some data to resilver
+log_must mkfile 16M $TESTDIR/file1
+
+# Replace the failed disks, forcing a resilver
+log_must zpool replace $TESTPOOL $DISK1 $DISK3
+
+# Wait for the resilver to finish, and then the subsequent scrub to finish.
+# Waiting for the scrub has the effect of waiting for both. Timeout after 10
+# seconds if nothing is happening.
+log_must wait_scrubbed $TESTPOOL 10
+log_pass "Successfully ran the scrub after resilver zedlet"
diff --git a/tests/zfs-tests/tests/functional/fault/setup.ksh b/tests/zfs-tests/tests/functional/fault/setup.ksh
index 3d54d4f21754..484bc4587519 100755
--- a/tests/zfs-tests/tests/functional/fault/setup.ksh
+++ b/tests/zfs-tests/tests/functional/fault/setup.ksh
@@ -28,7 +28,7 @@
verify_runnable "global"
-zed_setup
+zed_setup resilver_finish-start-scrub.sh
zed_start
# Create a scsi_debug device to be used with auto-online (if using loop devices)
diff --git a/tests/zfs-tests/tests/functional/migration/migration_001_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_001_pos.ksh
index 4d5fbb9ff5ed..875d2f7c78be 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_001_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "tar cf $TESTDIR/tar$$.tar $BNAME"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $TESTDIR $SUMA $SUMB "tar xf $TESTDIR/tar$$.tar"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to ZFS fs"
-log_pass "Successully migrated test file from ZFS fs to ZFS fs".
+log_pass "Successfully migrated test file from ZFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_002_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_002_pos.ksh
index e0655248dee5..6b97e2a4071b 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_002_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_002_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "tar cf $TESTDIR/tar$$.tar $BNAME"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $NONZFS_TESTDIR $SUMA $SUMB "tar xf $TESTDIR/tar$$.tar"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to UFS fs"
-log_pass "Successully migrated test file from ZFS fs to UFS fs".
+log_pass "Successfully migrated test file from ZFS fs to UFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_003_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_003_pos.ksh
index 904a2b1a0427..dd0baeaa9b78 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_003_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_003_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "tar cf $NONZFS_TESTDIR/tar$$.tar $BNAME"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $TESTDIR $SUMA $SUMB "tar xvf $NONZFS_TESTDIR/tar$$.tar"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"UFS fs to ZFS fs"
-log_pass "Successully migrated test file from UFS fs to ZFS fs".
+log_pass "Successfully migrated test file from UFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_004_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_004_pos.ksh
index 6d33dd5b321c..00a6cc172ab6 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_004_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_004_pos.ksh
@@ -67,7 +67,7 @@ cd $cwd
(( $? != 0 )) && log_untested "Could not change directory to $cwd"
migrate_cpio $TESTDIR "$TESTDIR/cpio$$.cpio" $SUMA $SUMB
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to ZFS fs"
-log_pass "Successully migrated test file from ZFS fs to ZFS fs".
+log_pass "Successfully migrated test file from ZFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_005_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_005_pos.ksh
index a41b19b5fa08..4386596f777d 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_005_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_005_pos.ksh
@@ -67,7 +67,7 @@ cd $cwd
(( $? != 0 )) && log_untested "Could not change directory to $cwd"
migrate_cpio $NONZFS_TESTDIR "$TESTDIR/cpio$$.cpio" $SUMA $SUMB
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to UFS fs"
-log_pass "Successully migrated test file from ZFS fs to UFS fs".
+log_pass "Successfully migrated test file from ZFS fs to UFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_006_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_006_pos.ksh
index 5b444421a323..9b5c9166ed97 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_006_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_006_pos.ksh
@@ -67,7 +67,7 @@ cd $cwd
(( $? != 0 )) && log_untested "Could not change directory to $cwd"
migrate_cpio $TESTDIR "$NONZFS_TESTDIR/cpio$$.cpio" $SUMA $SUMB
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to ZFS fs"
-log_pass "Successully migrated test file from UFS fs to ZFS fs".
+log_pass "Successfully migrated test file from UFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_007_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_007_pos.ksh
index c3197052ce08..0d136550f740 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_007_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_007_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "dd if=$BNAME obs=128k of=$TESTDIR/dd$$.dd"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $TESTDIR $SUMA $SUMB "dd if=$TESTDIR/dd$$.dd obs=128k of=$BNAME"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to ZFS fs"
-log_pass "Successully migrated test file from ZFS fs to ZFS fs".
+log_pass "Successfully migrated test file from ZFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_008_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_008_pos.ksh
index 2e51eef369b3..f62b1f33a3e5 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_008_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_008_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "dd if=$BNAME obs=128k of=$TESTDIR/dd$$.dd"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $NONZFS_TESTDIR $SUMA $SUMB "dd if=$TESTDIR/dd$$.dd obs=128k of=$BNAME"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to ZFS fs"
-log_pass "Successully migrated test file from ZFS fs to UFS fs".
+log_pass "Successfully migrated test file from ZFS fs to UFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_009_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_009_pos.ksh
index 7749494e58e4..907be39eb4dd 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_009_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_009_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "dd if=$BNAME obs=128k of=$NONZFS_TESTDIR/dd$$.dd"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $TESTDIR $SUMA $SUMB "dd if=$NONZFS_TESTDIR/dd$$.dd obs=128k of=$BNAME"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to ZFS fs"
-log_pass "Successully migrated test file from UFS fs to ZFS fs".
+log_pass "Successfully migrated test file from UFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_010_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_010_pos.ksh
index a11ab72dff17..e80dd67cdc21 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_010_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_010_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "cp $BNAME $TESTDIR/cp$$.cp"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $TESTDIR $SUMA $SUMB "cp $TESTDIR/cp$$.cp $BNAME"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to ZFS fs"
-log_pass "Successully migrated test file from ZFS fs to ZFS fs".
+log_pass "Successfully migrated test file from ZFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_011_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_011_pos.ksh
index 17e1c78f987b..2d7ecb45eadb 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_011_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_011_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "cp $BNAME $TESTDIR/cp$$.cp"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $NONZFS_TESTDIR $SUMA $SUMB "cp $TESTDIR/cp$$.cp $BNAME"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"ZFS fs to UFS fs"
-log_pass "Successully migrated test file from ZFS fs to UFS fs".
+log_pass "Successfully migrated test file from ZFS fs to UFS fs".
diff --git a/tests/zfs-tests/tests/functional/migration/migration_012_pos.ksh b/tests/zfs-tests/tests/functional/migration/migration_012_pos.ksh
index 823dabeae4b5..fd9c4549164a 100755
--- a/tests/zfs-tests/tests/functional/migration/migration_012_pos.ksh
+++ b/tests/zfs-tests/tests/functional/migration/migration_012_pos.ksh
@@ -60,7 +60,7 @@ prepare $DNAME "cp $BNAME $NONZFS_TESTDIR/cp$$.cp"
(( $? != 0 )) && log_fail "Unable to create src archive"
migrate $TESTDIR $SUMA $SUMB "cp $NONZFS_TESTDIR/cp$$.cp $BNAME"
-(( $? != 0 )) && log_fail "Uable to successfully migrate test file from" \
+(( $? != 0 )) && log_fail "Unable to successfully migrate test file from" \
"UFS fs to ZFS fs"
-log_pass "Successully migrated test file from UFS fs to ZFS fs".
+log_pass "Successfully migrated test file from UFS fs to ZFS fs".
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib
index 4c46ae7a2add..1f8d66fd962c 100644
--- a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib
+++ b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib
@@ -198,6 +198,6 @@ function count_uberblocks # pool duration
typeset -i duration=$2
typeset hist_path="/proc/spl/kstat/zfs/$pool/multihost"
- log_must sleep $duration
+ sleep $duration
echo $(cat "$hist_path" | sed '1,2d' | wc -l)
}