+ combrec=1 ++ dirname zfs/scripts/raidz_expand_test.sh + BASE_DIR=zfs/scripts/.. + echo 1 + zpool destroy test cannot open 'test': no such pool + zpool create filepool sdb + zfs destroy -R filepool/files cannot open 'filepool/files': dataset does not exist + zfs create -o compression=on filepool/files + dir=/filepool/files + (( i=0 )) + (( i<7 )) + truncate -s 512M /filepool/files/0 + (( i=i+1 )) + (( i<7 )) + truncate -s 512M /filepool/files/1 + (( i=i+1 )) + (( i<7 )) + truncate -s 512M /filepool/files/2 + (( i=i+1 )) + (( i<7 )) + truncate -s 512M /filepool/files/3 + (( i=i+1 )) + (( i<7 )) + truncate -s 512M /filepool/files/4 + (( i=i+1 )) + (( i<7 )) + truncate -s 512M /filepool/files/5 + (( i=i+1 )) + (( i<7 )) + truncate -s 512M /filepool/files/6 + (( i=i+1 )) + (( i<7 )) + dotest 2 + nparity=2 + zpool create -o cachefile=none test raidz2 /filepool/files/0 /filepool/files/1 /filepool/files/2 /filepool/files/3 /filepool/files/4 /filepool/files/5 + zfs set primarycache=metadata test + zfs create test/fs + dd if=/dev/urandom of=/test/fs/file bs=1024k count=1 1+0 records in 1+0 records out 1048576 bytes (1.0 MB, 1.0 MiB) copied, 0.0048697 s, 215 MB/s + zfs create -o compress=on test/fs2 + cp -r zfs/scripts/.. /test/fs2/ + zfs create -o compress=on -o recordsize=8k test/fs3 + cp -r zfs/scripts/.. /test/fs3/ + zfs snapshot filepool/files@pre-attach + sum /test/fs/file 01704 1024 + sum /test/fs2/file sum: /test/fs2/file: No such file or directory + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zfs list test NAME USED AVAIL REFER MOUNTPOINT test 378M 1.34G 46.0K /test + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 568M 2.20G - - 0% 20% 1.00x ONLINE - raidz2 2.75G 568M 2.20G - - 0% 20.1% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + sleep 2 + zpool attach test raidz2-0 /filepool/files/6 cannot attach /filepool/files/6 to raidz2-0: can only attach to mirrors and top-level disks ***************************************************** * Thank you for testing this alpha-quality release * * of RAID-Z expansion. This feature should only * * be used on test pools. The pool will eventually * * need to be DESTROYED, because the on-disk format * * will not be compatible with the final release. * * Additionally, there are currently bugs in RAID-Z * * expansion which can occasionally cause data loss. * * Please report bugs to mahrens@delphix.com. * ***************************************************** You have 5 seconds to abort by pressing ^C (control-C) You have 4 seconds to abort by pressing ^C (control-C) You have 3 seconds to abort by pressing ^C (control-C) You have 2 seconds to abort by pressing ^C (control-C) You have 1 seconds to abort by pressing ^C (control-C) + wait_completion + zpool status test + grep 'in progress' + zfs list test NAME USED AVAIL REFER MOUNTPOINT test 385M 1.33G 46.0K /test + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 579M 2.18G - - 0% 20% 1.00x ONLINE - raidz2 2.75G 579M 2.18G - - 0% 20.6% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + zfs snapshot filepool/files@post-attach + zpool export test + zpool import -o cachefile=none -d /filepool/files test + zfs snapshot filepool/files@post-import + sum /test/fs/file 01704 1024 + sum /test/fs2/file sum: /test/fs2/file: No such file or directory + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zfs list -r test NAME USED AVAIL REFER MOUNTPOINT test 385M 1.33G 46.0K /test test/fs 1.04M 1.33G 1.04M /test/fs test/fs2 174M 1.33G 174M /test/fs2 test/fs3 210M 1.33G 210M /test/fs3 + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 579M 2.19G - - 0% 20% 1.00x ONLINE - raidz2 2.75G 579M 2.19G - - 0% 20.5% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + zpool status -v test pool: test state: ONLINE scan: none requested config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:39:56 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:39:58 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool export test + zpool import -o cachefile=none -d /filepool/files test + (( i=0 )) + (( i<2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/0 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.209113 s, 2.5 GB/s + (( i=i+1 )) + (( i<2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/1 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.194468 s, 2.7 GB/s + (( i=i+1 )) + (( i<2 )) + sum /test/fs/file 01704 1024 + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:39:58 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 9 /filepool/files/1 ONLINE 0 0 8 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + [[ -n 1 ]] + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:40:04 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 183M in 0 days 00:00:04 with 0 errors on Tue Mar 31 10:40:08 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 50.6K /filepool/files/1 ONLINE 0 0 51.6K /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool clear test + (( i=2 )) + (( i<2*2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/2 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.17767 s, 3.0 GB/s + (( i=i+1 )) + (( i<2*2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/3 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.210454 s, 2.5 GB/s + (( i=i+1 )) + (( i<2*2 )) + zpool status -v test pool: test state: ONLINE scan: scrub repaired 183M in 0 days 00:00:04 with 0 errors on Tue Mar 31 10:40:08 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + [[ -n 1 ]] + zpool scrub test + wait_completion + grep 'in progress' + zpool status test scan: scrub in progress since Tue Mar 31 10:40:10 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 186M in 0 days 00:00:04 with 0 errors on Tue Mar 31 10:40:14 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 52.5K /filepool/files/3 ONLINE 0 0 50.8K /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool clear test + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:40:15 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:40:17 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool destroy test + dotest 3 + nparity=3 + zpool create -o cachefile=none test raidz3 /filepool/files/0 /filepool/files/1 /filepool/files/2 /filepool/files/3 /filepool/files/4 /filepool/files/5 + zfs set primarycache=metadata test + zfs create test/fs + dd if=/dev/urandom of=/test/fs/file bs=1024k count=1 1+0 records in 1+0 records out 1048576 bytes (1.0 MB, 1.0 MiB) copied, 0.00601748 s, 174 MB/s + zfs create -o compress=on test/fs2 + cp -r zfs/scripts/.. /test/fs2/ + zfs create -o compress=on -o recordsize=8k test/fs3 + cp -r zfs/scripts/.. /test/fs3/ + zfs snapshot filepool/files@pre-attach cannot create snapshot 'filepool/files@pre-attach': dataset already exists + sum /test/fs/file 28597 1024 + sum /test/fs2/file sum: /test/fs2/file: No such file or directory + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zfs list test NAME USED AVAIL REFER MOUNTPOINT test 391M 878M 45.6K /test + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 789M 1.98G - - 0% 28% 1.00x ONLINE - raidz3 2.75G 789M 1.98G - - 0% 28.0% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + sleep 2 + zpool attach test raidz3-0 /filepool/files/6 cannot attach /filepool/files/6 to raidz3-0: can only attach to mirrors and top-level disks ***************************************************** * Thank you for testing this alpha-quality release * * of RAID-Z expansion. This feature should only * * be used on test pools. The pool will eventually * * need to be DESTROYED, because the on-disk format * * will not be compatible with the final release. * * Additionally, there are currently bugs in RAID-Z * * expansion which can occasionally cause data loss. * * Please report bugs to mahrens@delphix.com. * ***************************************************** You have 5 seconds to abort by pressing ^C (control-C) You have 4 seconds to abort by pressing ^C (control-C) You have 3 seconds to abort by pressing ^C (control-C) You have 2 seconds to abort by pressing ^C (control-C) You have 1 seconds to abort by pressing ^C (control-C) + wait_completion + zpool status test + grep 'in progress' + zfs list test NAME USED AVAIL REFER MOUNTPOINT test 393M 876M 45.6K /test + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 792M 1.98G - - 0% 28% 1.00x ONLINE - raidz3 2.75G 792M 1.98G - - 0% 28.1% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + zfs snapshot filepool/files@post-attach cannot create snapshot 'filepool/files@post-attach': dataset already exists + zpool export test + zpool import -o cachefile=none -d /filepool/files test + zfs snapshot filepool/files@post-import cannot create snapshot 'filepool/files@post-import': dataset already exists + sum /test/fs/file 28597 1024 + sum /test/fs2/file sum: /test/fs2/file: No such file or directory + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zfs list -r test NAME USED AVAIL REFER MOUNTPOINT test 393M 876M 45.6K /test test/fs 1.04M 876M 1.04M /test/fs test/fs2 174M 876M 174M /test/fs2 test/fs3 217M 876M 217M /test/fs3 + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 792M 1.98G - - 0% 28% 1.00x ONLINE - raidz3 2.75G 792M 1.98G - - 0% 28.1% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + zpool status -v test pool: test state: ONLINE scan: none requested config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz3-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:40:47 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:40:49 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz3-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool export test + zpool import -o cachefile=none -d /filepool/files test + (( i=0 )) + (( i<3 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/0 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.214293 s, 2.4 GB/s + (( i=i+1 )) + (( i<3 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/1 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.168997 s, 3.1 GB/s + (( i=i+1 )) + (( i<3 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/2 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.180286 s, 2.9 GB/s + (( i=i+1 )) + (( i<3 )) + sum /test/fs/file 28597 1024 + zpool status -v test pool: test state: ONLINE scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:40:49 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz3-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + [[ -n 1 ]] + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:40:55 2020 + sleep 5 + grep 'in progress' + zpool status test scan: scrub in progress since Tue Mar 31 10:40:55 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 377M in 0 days 00:00:08 with 0 errors on Tue Mar 31 10:41:03 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz3-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 55.8K /filepool/files/1 ONLINE 0 0 53.0K /filepool/files/2 ONLINE 0 0 55.7K /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool clear test + (( i=3 )) + (( i<3*2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/3 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.1314 s, 4.0 GB/s + (( i=i+1 )) + (( i<3*2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/4 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.151901 s, 3.5 GB/s + (( i=i+1 )) + (( i<3*2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/5 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.149647 s, 3.5 GB/s + (( i=i+1 )) + (( i<3*2 )) + zpool status -v test pool: test state: ONLINE scan: scrub repaired 377M in 0 days 00:00:08 with 0 errors on Tue Mar 31 10:41:03 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz3-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + [[ -n 1 ]] + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:41:06 2020 + sleep 5 + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:41:06 2020 + sleep 5 + grep 'in progress' + zpool status test + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 372M in 0 days 00:00:09 with 0 errors on Tue Mar 31 10:41:15 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz3-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 53.0K /filepool/files/4 ONLINE 0 0 55.8K /filepool/files/5 ONLINE 0 0 53.1K errors: No known data errors + zpool clear test + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:41:16 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:41:18 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz3-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool destroy test + dotest 1 + nparity=1 + zpool create -o cachefile=none test raidz1 /filepool/files/0 /filepool/files/1 /filepool/files/2 /filepool/files/3 /filepool/files/4 /filepool/files/5 + zfs set primarycache=metadata test + zfs create test/fs + dd if=/dev/urandom of=/test/fs/file bs=1024k count=1 1+0 records in 1+0 records out 1048576 bytes (1.0 MB, 1.0 MiB) copied, 0.00394107 s, 266 MB/s + zfs create -o compress=on test/fs2 + cp -r zfs/scripts/.. /test/fs2/ + zfs create -o compress=on -o recordsize=8k test/fs3 + cp -r zfs/scripts/.. /test/fs3/ + zfs snapshot filepool/files@pre-attach cannot create snapshot 'filepool/files@pre-attach': dataset already exists + sum /test/fs/file 09626 1024 + sum /test/fs2/file sum: /test/fs2/file: No such file or directory + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zfs list test NAME USED AVAIL REFER MOUNTPOINT test 377M 1.79G 38.2K /test + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 454M 2.31G - - 0% 16% 1.00x ONLINE - raidz1 2.75G 454M 2.31G - - 0% 16.1% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + sleep 2 + zpool attach test raidz1-0 /filepool/files/6 cannot attach /filepool/files/6 to raidz1-0: can only attach to mirrors and top-level disks ***************************************************** * Thank you for testing this alpha-quality release * * of RAID-Z expansion. This feature should only * * be used on test pools. The pool will eventually * * need to be DESTROYED, because the on-disk format * * will not be compatible with the final release. * * Additionally, there are currently bugs in RAID-Z * * expansion which can occasionally cause data loss. * * Please report bugs to mahrens@delphix.com. * ***************************************************** You have 5 seconds to abort by pressing ^C (control-C) You have 4 seconds to abort by pressing ^C (control-C) You have 3 seconds to abort by pressing ^C (control-C) You have 2 seconds to abort by pressing ^C (control-C) You have 1 seconds to abort by pressing ^C (control-C) + wait_completion + zpool status test + grep 'in progress' + zfs list test NAME USED AVAIL REFER MOUNTPOINT test 377M 1.79G 38.2K /test + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 455M 2.31G - - 0% 16% 1.00x ONLINE - raidz1 2.75G 455M 2.31G - - 0% 16.1% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + zfs snapshot filepool/files@post-attach cannot create snapshot 'filepool/files@post-attach': dataset already exists + zpool export test + zpool import -o cachefile=none -d /filepool/files test + zfs snapshot filepool/files@post-import cannot create snapshot 'filepool/files@post-import': dataset already exists + sum /test/fs/file 09626 1024 + sum /test/fs2/file sum: /test/fs2/file: No such file or directory + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zfs list -r test NAME USED AVAIL REFER MOUNTPOINT test 377M 1.79G 38.2K /test test/fs 1.04M 1.79G 1.04M /test/fs test/fs2 172M 1.79G 172M /test/fs2 test/fs3 205M 1.79G 205M /test/fs3 + zpool list -v test NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT test 2.75G 455M 2.31G - - 0% 16% 1.00x ONLINE - raidz1 2.75G 455M 2.31G - - 0% 16.1% - ONLINE /filepool/files/0 - - - - - - - - ONLINE /filepool/files/1 - - - - - - - - ONLINE /filepool/files/2 - - - - - - - - ONLINE /filepool/files/3 - - - - - - - - ONLINE /filepool/files/4 - - - - - - - - ONLINE /filepool/files/5 - - - - - - - - ONLINE + zpool status -v test pool: test state: ONLINE scan: none requested config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz1-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:41:42 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:41:44 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz1-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool export test + zpool import -o cachefile=none -d /filepool/files test + (( i=0 )) + (( i<1 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/0 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.221945 s, 2.4 GB/s + (( i=i+1 )) + (( i<1 )) + sum /test/fs/file 09626 1024 + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:41:44 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz1-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 7 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + [[ -n 1 ]] + zpool scrub test + wait_completion + grep 'in progress' + zpool status test scan: scrub in progress since Tue Mar 31 10:41:49 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 75.6M in 0 days 00:00:03 with 0 errors on Tue Mar 31 10:41:52 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz1-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 50.1K /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool clear test + (( i=1 )) + (( i<1*2 )) + [[ ! -n 1 ]] + dd conv=notrunc if=/dev/zero of=/filepool/files/1 bs=1024k seek=4 count=500 500+0 records in 500+0 records out 524288000 bytes (524 MB, 500 MiB) copied, 0.332798 s, 1.6 GB/s + (( i=i+1 )) + (( i<1*2 )) + zpool status -v test pool: test state: ONLINE scan: scrub repaired 75.6M in 0 days 00:00:03 with 0 errors on Tue Mar 31 10:41:52 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz1-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + [[ -n 1 ]] + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:41:55 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE status: One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected. action: Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'. see: http://zfsonlinux.org/msg/ZFS-8000-9P scan: scrub repaired 72.2M in 0 days 00:00:03 with 0 errors on Tue Mar 31 10:41:58 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz1-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 46.7K /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool clear test + sum /test/fs3/file sum: /test/fs3/file: No such file or directory + zpool scrub test + wait_completion + zpool status test + grep 'in progress' scan: scrub in progress since Tue Mar 31 10:42:01 2020 + sleep 5 + zpool status test + grep 'in progress' + zpool status -v test pool: test state: ONLINE scan: scrub repaired 0B in 0 days 00:00:02 with 0 errors on Tue Mar 31 10:42:03 2020 config: NAME STATE READ WRITE CKSUM test ONLINE 0 0 0 raidz1-0 ONLINE 0 0 0 /filepool/files/0 ONLINE 0 0 0 /filepool/files/1 ONLINE 0 0 0 /filepool/files/2 ONLINE 0 0 0 /filepool/files/3 ONLINE 0 0 0 /filepool/files/4 ONLINE 0 0 0 /filepool/files/5 ONLINE 0 0 0 errors: No known data errors + zpool destroy test