Skip to content

Commit f3de482

Browse files
bwatkinsonlundman
authored andcommitted
Always validate checksums for Direct I/O reads
This fixes an oversight in the Direct I/O PR. There is nothing that stops a process from manipulating the contents of a buffer for a Direct I/O read while the I/O is in flight. This can lead checksum verify failures. However, the disk contents are still correct, and this would lead to false reporting of checksum validation failures. To remedy this, all Direct I/O reads that have a checksum verification failure are treated as suspicious. In the event a checksum validation failure occurs for a Direct I/O read, then the I/O request will be reissued though the ARC. This allows for actual validation to happen and removes any possibility of the buffer being manipulated after the I/O has been issued. Just as with Direct I/O write checksum validation failures, Direct I/O read checksum validation failures are reported though zpool status -d in the DIO column. Also the zevent has been updated to have both: 1. dio_verify_wr -> Checksum verification failure for writes 2. dio_verify_rd -> Checksum verification failure for reads. This allows for determining what I/O operation was the culprit for the checksum verification failure. All DIO errors are reported only on the top-level VDEV. Even though FreeBSD can write protect pages (stable pages) it still has the same issue as Linux with Direct I/O reads. This commit updates the following: 1. Propogates checksum failures for reads all the way up to the top-level VDEV. 2. Reports errors through zpool status -d as DIO. 3. Has two zevents for checksum verify errors with Direct I/O. One for read and one for write. 4. Updates FreeBSD ABD code to also check for ABD_FLAG_FROM_PAGES and handle ABD buffer contents validation the same as Linux. 5. Updated manipulate_user_buffer.c to also manipulate a buffer while a Direct I/O read is taking place. 6. Adds a new ZTS test case dio_read_verify that stress tests the new code. 7. Updated man pages. 8. Added an IMPLY statement to zio_checksum_verify() to make sure that Direct I/O reads are not issued as speculative. 9. Removed self healing through mirror, raidz, and dRAID VDEVs for Direct I/O reads. This issue was first observed when installing a Windows 11 VM on a ZFS dataset with the dataset property direct set to always. The zpool devices would report checksum failures, but running a subsequent zpool scrub would not repair any data and report no errors. Reviewed-by: Tony Hutter <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Alexander Motin <[email protected]> Signed-off-by: Brian Atkinson <[email protected]> Closes openzfs#16598
1 parent 8ab02ca commit f3de482

24 files changed

+510
-146
lines changed

cmd/zpool/zpool_main.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9224,6 +9224,12 @@ vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
92249224
}
92259225
}
92269226

9227+
if (cb->cb_print_dio_verify) {
9228+
nice_num_str_nvlist(vds, "dio_verify_errors",
9229+
vs->vs_dio_verify_errors, cb->cb_literal,
9230+
cb->cb_json_as_int, ZFS_NICENUM_1024);
9231+
}
9232+
92279233
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
92289234
&notpresent) == 0) {
92299235
nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,

include/sys/fm/fs/zfs.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ extern "C" {
4242
#define FM_EREPORT_ZFS_DATA "data"
4343
#define FM_EREPORT_ZFS_DELAY "delay"
4444
#define FM_EREPORT_ZFS_DEADMAN "deadman"
45-
#define FM_EREPORT_ZFS_DIO_VERIFY "dio_verify"
45+
#define FM_EREPORT_ZFS_DIO_VERIFY_WR "dio_verify_wr"
46+
#define FM_EREPORT_ZFS_DIO_VERIFY_RD "dio_verify_rd"
4647
#define FM_EREPORT_ZFS_POOL "zpool"
4748
#define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown"
4849
#define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed"

include/sys/vdev_raidz.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ void vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
5757
void vdev_raidz_child_done(zio_t *);
5858
void vdev_raidz_io_done(zio_t *);
5959
void vdev_raidz_checksum_error(zio_t *, struct raidz_col *, abd_t *);
60-
struct raidz_row *vdev_raidz_row_alloc(int);
60+
struct raidz_row *vdev_raidz_row_alloc(int, zio_t *);
6161
void vdev_raidz_reflow_copy_scratch(spa_t *);
6262
void raidz_dtl_reassessed(vdev_t *);
6363

include/sys/zio.h

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -208,25 +208,25 @@ typedef uint64_t zio_flag_t;
208208
#define ZIO_FLAG_PROBE (1ULL << 16)
209209
#define ZIO_FLAG_TRYHARD (1ULL << 17)
210210
#define ZIO_FLAG_OPTIONAL (1ULL << 18)
211-
211+
#define ZIO_FLAG_DIO_READ (1ULL << 19)
212212
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
213213

214214
/*
215215
* Flags not inherited by any children.
216216
*/
217-
#define ZIO_FLAG_DONT_QUEUE (1ULL << 19) /* must be first for INHERIT */
218-
#define ZIO_FLAG_DONT_PROPAGATE (1ULL << 20)
219-
#define ZIO_FLAG_IO_BYPASS (1ULL << 21)
220-
#define ZIO_FLAG_IO_REWRITE (1ULL << 22)
221-
#define ZIO_FLAG_RAW_COMPRESS (1ULL << 23)
222-
#define ZIO_FLAG_RAW_ENCRYPT (1ULL << 24)
223-
#define ZIO_FLAG_GANG_CHILD (1ULL << 25)
224-
#define ZIO_FLAG_DDT_CHILD (1ULL << 26)
225-
#define ZIO_FLAG_GODFATHER (1ULL << 27)
226-
#define ZIO_FLAG_NOPWRITE (1ULL << 28)
227-
#define ZIO_FLAG_REEXECUTED (1ULL << 29)
228-
#define ZIO_FLAG_DELEGATED (1ULL << 30)
229-
#define ZIO_FLAG_DIO_CHKSUM_ERR (1ULL << 31)
217+
#define ZIO_FLAG_DONT_QUEUE (1ULL << 20) /* must be first for INHERIT */
218+
#define ZIO_FLAG_DONT_PROPAGATE (1ULL << 21)
219+
#define ZIO_FLAG_IO_BYPASS (1ULL << 22)
220+
#define ZIO_FLAG_IO_REWRITE (1ULL << 23)
221+
#define ZIO_FLAG_RAW_COMPRESS (1ULL << 24)
222+
#define ZIO_FLAG_RAW_ENCRYPT (1ULL << 25)
223+
#define ZIO_FLAG_GANG_CHILD (1ULL << 26)
224+
#define ZIO_FLAG_DDT_CHILD (1ULL << 27)
225+
#define ZIO_FLAG_GODFATHER (1ULL << 28)
226+
#define ZIO_FLAG_NOPWRITE (1ULL << 29)
227+
#define ZIO_FLAG_REEXECUTED (1ULL << 30)
228+
#define ZIO_FLAG_DELEGATED (1ULL << 31)
229+
#define ZIO_FLAG_DIO_CHKSUM_ERR (1ULL << 32)
230230

231231
#define ZIO_ALLOCATOR_NONE (-1)
232232
#define ZIO_HAS_ALLOCATOR(zio) ((zio)->io_allocator != ZIO_ALLOCATOR_NONE)
@@ -651,6 +651,7 @@ extern void zio_vdev_io_redone(zio_t *zio);
651651
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
652652

653653
extern void zio_checksum_verified(zio_t *zio);
654+
extern void zio_dio_chksum_verify_error_report(zio_t *zio);
654655
extern int zio_worst_error(int e1, int e2);
655656

656657
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,

man/man4/zfs.4

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,7 @@ write.
436436
It can also help to identify if reported checksum errors are tied to Direct I/O
437437
writes.
438438
Each verify error causes a
439-
.Sy dio_verify
439+
.Sy dio_verify_wr
440440
zevent.
441441
Direct Write I/O checkum verify errors can be seen with
442442
.Nm zpool Cm status Fl d .

man/man8/zpool-events.8

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,10 @@ This can be an indicator of problems with the underlying storage device.
9898
The number of delay events is ratelimited by the
9999
.Sy zfs_slow_io_events_per_second
100100
module parameter.
101-
.It Sy dio_verify
101+
.It Sy dio_verify_rd
102+
Issued when there was a checksum verify error after a Direct I/O read has been
103+
issued.
104+
.It Sy dio_verify_wr
102105
Issued when there was a checksum verify error after a Direct I/O write has been
103106
issued.
104107
This event can only take place if the module parameter

man/man8/zpool-status.8

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,14 +82,18 @@ Specify
8282
.Sy --json-pool-key-guid
8383
to set pool GUID as key for pool objects instead of pool names.
8484
.It Fl d
85-
Display the number of Direct I/O write checksum verify errors that have occured
86-
on a top-level VDEV.
85+
Display the number of Direct I/O read/write checksum verify errors that have
86+
occured on a top-level VDEV.
8787
See
8888
.Sx zfs_vdev_direct_write_verify
8989
in
9090
.Xr zfs 4
9191
for details about the conditions that can cause Direct I/O write checksum
9292
verify failures to occur.
93+
Direct I/O reads checksum verify errors can also occur if the contents of the
94+
buffer are being manipulated after the I/O has been issued and is in flight.
95+
In the case of Direct I/O read checksum verify errors, the I/O will be reissued
96+
through the ARC.
9397
.It Fl D
9498
Display a histogram of deduplication statistics, showing the allocated
9599
.Pq physically present on disk

module/os/freebsd/zfs/abd_os.c

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -620,9 +620,16 @@ abd_borrow_buf_copy(abd_t *abd, size_t n)
620620

621621
/*
622622
* Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
623-
* no change the contents of the ABD and will ASSERT that you didn't modify
624-
* the buffer since it was borrowed. If you want any changes you made to buf to
625-
* be copied back to abd, use abd_return_buf_copy() instead.
623+
* not change the contents of the ABD. If you want any changes you made to
624+
* buf to be copied back to abd, use abd_return_buf_copy() instead. If the
625+
* ABD is not constructed from user pages from Direct I/O then an ASSERT
626+
* checks to make sure the contents of the buffer have not changed since it was
627+
* borrowed. We can not ASSERT the contents of the buffer have not changed if
628+
* it is composed of user pages. While Direct I/O write pages are placed under
629+
* write protection and can not be changed, this is not the case for Direct I/O
630+
* reads. The pages of a Direct I/O read could be manipulated at any time.
631+
* Checksum verifications in the ZIO pipeline check for this issue and handle
632+
* it by returning an error on checksum verification failure.
626633
*/
627634
void
628635
abd_return_buf(abd_t *abd, void *buf, size_t n)
@@ -632,8 +639,34 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
632639
#ifdef ZFS_DEBUG
633640
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
634641
#endif
635-
if (abd_is_linear(abd)) {
642+
if (abd_is_from_pages(abd)) {
643+
if (!abd_is_linear_page(abd))
644+
zio_buf_free(buf, n);
645+
} else if (abd_is_linear(abd)) {
636646
ASSERT3P(buf, ==, abd_to_buf(abd));
647+
} else if (abd_is_gang(abd)) {
648+
#ifdef ZFS_DEBUG
649+
/*
650+
* We have to be careful with gang ABD's that we do not ASSERT
651+
* for any ABD's that contain user pages from Direct I/O. See
652+
* the comment above about Direct I/O read buffers possibly
653+
* being manipulated. In order to handle this, we jsut iterate
654+
* through the gang ABD and only verify ABD's that are not from
655+
* user pages.
656+
*/
657+
void *cmp_buf = buf;
658+
659+
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
660+
cabd != NULL;
661+
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
662+
if (!abd_is_from_pages(cabd)) {
663+
ASSERT0(abd_cmp_buf(cabd, cmp_buf,
664+
cabd->abd_size));
665+
}
666+
cmp_buf = (char *)cmp_buf + cabd->abd_size;
667+
}
668+
#endif
669+
zio_buf_free(buf, n);
637670
} else {
638671
ASSERT0(abd_cmp_buf(abd, buf, n));
639672
zio_buf_free(buf, n);

module/os/linux/zfs/abd_os.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1008,7 +1008,9 @@ abd_borrow_buf_copy(abd_t *abd, size_t n)
10081008
* borrowed. We can not ASSERT that the contents of the buffer have not changed
10091009
* if it is composed of user pages because the pages can not be placed under
10101010
* write protection and the user could have possibly changed the contents in
1011-
* the pages at any time.
1011+
* the pages at any time. This is also an issue for Direct I/O reads. Checksum
1012+
* verifications in the ZIO pipeline check for this issue and handle it by
1013+
* returning an error on checksum verification failure.
10121014
*/
10131015
void
10141016
abd_return_buf(abd_t *abd, void *buf, size_t n)

module/zcommon/zfs_valstr.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,7 @@ _VALSTR_BITFIELD_IMPL(zio_flag,
206206
{ '.', "PR", "PROBE" },
207207
{ '.', "TH", "TRYHARD" },
208208
{ '.', "OP", "OPTIONAL" },
209+
{ '.', "RD", "DIO_READ" },
209210
{ '.', "DQ", "DONT_QUEUE" },
210211
{ '.', "DP", "DONT_PROPAGATE" },
211212
{ '.', "BY", "IO_BYPASS" },

module/zfs/dmu_direct.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -330,7 +330,7 @@ dmu_read_abd(dnode_t *dn, uint64_t offset, uint64_t size,
330330
*/
331331
zio_t *cio = zio_read(rio, spa, bp, mbuf, db->db.db_size,
332332
dmu_read_abd_done, NULL, ZIO_PRIORITY_SYNC_READ,
333-
ZIO_FLAG_CANFAIL, &zb);
333+
ZIO_FLAG_CANFAIL | ZIO_FLAG_DIO_READ, &zb);
334334
mutex_exit(&db->db_mtx);
335335

336336
zfs_racct_read(spa, db->db.db_size, 1, flags);

module/zfs/vdev_draid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1026,7 +1026,7 @@ vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
10261026

10271027
ASSERT3U(vdc->vdc_nparity, >, 0);
10281028

1029-
raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth);
1029+
raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth, zio);
10301030
rr->rr_bigcols = bc;
10311031
rr->rr_firstdatacol = vdc->vdc_nparity;
10321032
#ifdef ZFS_DEBUG

module/zfs/vdev_indirect.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include <sys/zap.h>
3535
#include <sys/abd.h>
3636
#include <sys/zthr.h>
37+
#include <sys/fm/fs/zfs.h>
3738

3839
/*
3940
* An indirect vdev corresponds to a vdev that has been removed. Since
@@ -1832,6 +1833,19 @@ vdev_indirect_io_done(zio_t *zio)
18321833

18331834
zio_bad_cksum_t zbc;
18341835
int ret = zio_checksum_error(zio, &zbc);
1836+
/*
1837+
* Any Direct I/O read that has a checksum error must be treated as
1838+
* suspicious as the contents of the buffer could be getting
1839+
* manipulated while the I/O is taking place. The checksum verify error
1840+
* will be reported to the top-level VDEV.
1841+
*/
1842+
if (zio->io_flags & ZIO_FLAG_DIO_READ && ret == ECKSUM) {
1843+
zio->io_error = ret;
1844+
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
1845+
zio_dio_chksum_verify_error_report(zio);
1846+
ret = 0;
1847+
}
1848+
18351849
if (ret == 0) {
18361850
zio_checksum_verified(zio);
18371851
return;

module/zfs/vdev_mirror.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -764,6 +764,27 @@ vdev_mirror_io_done(zio_t *zio)
764764

765765
ASSERT(zio->io_type == ZIO_TYPE_READ);
766766

767+
/*
768+
* Any Direct I/O read that has a checksum error must be treated as
769+
* suspicious as the contents of the buffer could be getting
770+
* manipulated while the I/O is taking place. The checksum verify error
771+
* will be reported to the top-level Mirror VDEV.
772+
*
773+
* There will be no attampt at reading any additional data copies. If
774+
* the buffer is still being manipulated while attempting to read from
775+
* another child, there exists a possibly that the checksum could be
776+
* verified as valid. However, the buffer contents could again get
777+
* manipulated after verifying the checksum. This would lead to bad data
778+
* being written out during self healing.
779+
*/
780+
if ((zio->io_flags & ZIO_FLAG_DIO_READ) &&
781+
(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
782+
zio_dio_chksum_verify_error_report(zio);
783+
zio->io_error = vdev_mirror_worst_error(mm);
784+
ASSERT3U(zio->io_error, ==, ECKSUM);
785+
return;
786+
}
787+
767788
/*
768789
* If we don't have a good copy yet, keep trying other children.
769790
*/

module/zfs/vdev_raidz.c

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -433,7 +433,7 @@ const zio_vsd_ops_t vdev_raidz_vsd_ops = {
433433
};
434434

435435
raidz_row_t *
436-
vdev_raidz_row_alloc(int cols)
436+
vdev_raidz_row_alloc(int cols, zio_t *zio)
437437
{
438438
raidz_row_t *rr =
439439
kmem_zalloc(offsetof(raidz_row_t, rr_col[cols]), KM_SLEEP);
@@ -445,7 +445,17 @@ vdev_raidz_row_alloc(int cols)
445445
raidz_col_t *rc = &rr->rr_col[c];
446446
rc->rc_shadow_devidx = INT_MAX;
447447
rc->rc_shadow_offset = UINT64_MAX;
448-
rc->rc_allow_repair = 1;
448+
/*
449+
* We can not allow self healing to take place for Direct I/O
450+
* reads. There is nothing that stops the buffer contents from
451+
* being manipulated while the I/O is in flight. It is possible
452+
* that the checksum could be verified on the buffer and then
453+
* the contents of that buffer are manipulated afterwards. This
454+
* could lead to bad data being written out during self
455+
* healing.
456+
*/
457+
if (!(zio->io_flags & ZIO_FLAG_DIO_READ))
458+
rc->rc_allow_repair = 1;
449459
}
450460
return (rr);
451461
}
@@ -619,7 +629,7 @@ vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
619629
}
620630

621631
ASSERT3U(acols, <=, scols);
622-
rr = vdev_raidz_row_alloc(scols);
632+
rr = vdev_raidz_row_alloc(scols, zio);
623633
rm->rm_row[0] = rr;
624634
rr->rr_cols = acols;
625635
rr->rr_bigcols = bc;
@@ -765,7 +775,7 @@ vdev_raidz_map_alloc_expanded(zio_t *zio,
765775

766776
for (uint64_t row = 0; row < rows; row++) {
767777
boolean_t row_use_scratch = B_FALSE;
768-
raidz_row_t *rr = vdev_raidz_row_alloc(cols);
778+
raidz_row_t *rr = vdev_raidz_row_alloc(cols, zio);
769779
rm->rm_row[row] = rr;
770780

771781
/* The starting RAIDZ (parent) vdev sector of the row. */
@@ -2633,6 +2643,20 @@ raidz_checksum_verify(zio_t *zio)
26332643
raidz_map_t *rm = zio->io_vsd;
26342644

26352645
int ret = zio_checksum_error(zio, &zbc);
2646+
/*
2647+
* Any Direct I/O read that has a checksum error must be treated as
2648+
* suspicious as the contents of the buffer could be getting
2649+
* manipulated while the I/O is taking place. The checksum verify error
2650+
* will be reported to the top-level RAIDZ VDEV.
2651+
*/
2652+
if (zio->io_flags & ZIO_FLAG_DIO_READ && ret == ECKSUM) {
2653+
zio->io_error = ret;
2654+
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
2655+
zio_dio_chksum_verify_error_report(zio);
2656+
zio_checksum_verified(zio);
2657+
return (0);
2658+
}
2659+
26362660
if (ret != 0 && zbc.zbc_injected != 0)
26372661
rm->rm_ecksuminjected = 1;
26382662

@@ -2776,6 +2800,11 @@ vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
27762800
(rc->rc_error == 0 || rc->rc_size == 0)) {
27772801
continue;
27782802
}
2803+
/*
2804+
* We do not allow self healing for Direct I/O reads.
2805+
* See comment in vdev_raid_row_alloc().
2806+
*/
2807+
ASSERT0(zio->io_flags & ZIO_FLAG_DIO_READ);
27792808

27802809
zfs_dbgmsg("zio=%px repairing c=%u devidx=%u "
27812810
"offset=%llx",
@@ -2979,6 +3008,8 @@ raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
29793008

29803009
/* Check for success */
29813010
if (raidz_checksum_verify(zio) == 0) {
3011+
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
3012+
return (0);
29823013

29833014
/* Reconstruction succeeded - report errors */
29843015
for (int i = 0; i < rm->rm_nrows; i++) {
@@ -3379,7 +3410,6 @@ vdev_raidz_io_done_unrecoverable(zio_t *zio)
33793410
zio_bad_cksum_t zbc;
33803411
zbc.zbc_has_cksum = 0;
33813412
zbc.zbc_injected = rm->rm_ecksuminjected;
3382-
33833413
mutex_enter(&cvd->vdev_stat_lock);
33843414
cvd->vdev_stat.vs_checksum_errors++;
33853415
mutex_exit(&cvd->vdev_stat_lock);
@@ -3444,6 +3474,9 @@ vdev_raidz_io_done(zio_t *zio)
34443474
}
34453475

34463476
if (raidz_checksum_verify(zio) == 0) {
3477+
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
3478+
goto done;
3479+
34473480
for (int i = 0; i < rm->rm_nrows; i++) {
34483481
raidz_row_t *rr = rm->rm_row[i];
34493482
vdev_raidz_io_done_verified(zio, rr);
@@ -3538,6 +3571,7 @@ vdev_raidz_io_done(zio_t *zio)
35383571
}
35393572
}
35403573
}
3574+
done:
35413575
if (rm->rm_lr != NULL) {
35423576
zfs_rangelock_exit(rm->rm_lr);
35433577
rm->rm_lr = NULL;

0 commit comments

Comments
 (0)