From 0bfa1e37582a0976ef8bf53c4620183a1de53142 Mon Sep 17 00:00:00 2001 From: Rob Norris Date: Fri, 8 Aug 2025 14:08:26 +1000 Subject: [PATCH 1/5] ZTS: stress test concurrent zvol create/destroy Sponsored-by: Klara, Inc. Sponsored-by: Railway Corporation Signed-off-by: Rob Norris --- tests/runfiles/common.run | 2 +- tests/zfs-tests/tests/Makefile.am | 1 + .../zvol/zvol_stress/zvol_stress_destroy.ksh | 66 +++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh diff --git a/tests/runfiles/common.run b/tests/runfiles/common.run index 131845f5ed40..2da46458289a 100644 --- a/tests/runfiles/common.run +++ b/tests/runfiles/common.run @@ -1093,7 +1093,7 @@ tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse', tags = ['functional', 'zvol', 'zvol_misc'] [tests/functional/zvol/zvol_stress] -tests = ['zvol_stress'] +tests = ['zvol_stress', 'zvol_stress_destroy'] tags = ['functional', 'zvol', 'zvol_stress'] [tests/functional/zvol/zvol_swap] diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index b8b8bbe45a42..41e7b45ef4ec 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -2244,6 +2244,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/zvol/zvol_stress/cleanup.ksh \ functional/zvol/zvol_stress/setup.ksh \ functional/zvol/zvol_stress/zvol_stress.ksh \ + functional/zvol/zvol_stress/zvol_stress_destroy.ksh \ functional/zvol/zvol_swap/cleanup.ksh \ functional/zvol/zvol_swap/setup.ksh \ functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \ diff --git a/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh b/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh new file mode 100755 index 000000000000..669b59fac01f --- /dev/null +++ b/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh @@ -0,0 +1,66 @@ +#!/bin/ksh -p +# SPDX-License-Identifier: CDDL-1.0 +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or https://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright (c) 2025, Klara, Inc. +# + +. $STF_SUITE/include/libtest.shlib + +verify_runnable "global" + +typeset -i nzvols=1000 +typeset -i parallel=$(( $(get_num_cpus) * 2 )) + +function cleanup { + for zvol in $(zfs list -Ho name -t vol) ; do + log_must_busy zfs destroy $zvol + done +} + +log_onexit cleanup + +log_assert "stress test concurrent zvol create/destroy" + +function destroy_zvols_until { + typeset cond=$1 + while true ; do + IFS='' zfs list -Ho name -t vol | read -r -d '' zvols + if [[ -n $zvols ]] ; then + echo $zvols | xargs -n 1 -P $parallel zfs destroy + fi + if ! $cond ; then + break + fi + done +} + +( seq $nzvols | \ + xargs -P $parallel -I % zfs create -s -V 1G $TESTPOOL/testvol% ) & +cpid=$! +sleep 1 + +destroy_zvols_until "kill -0 $cpid" +destroy_zvols_until "false" + +log_pass "stress test done" From fb777450956d6cd7f916e13ccfbdb341e063e4d6 Mon Sep 17 00:00:00 2001 From: Rob Norris Date: Tue, 5 Aug 2025 12:50:31 +1000 Subject: [PATCH 2/5] zvol: generalise zvol_remove_minors_impl() for single zvol case zvol_remove_minor_impl() and zvol_remove_minors_impl() should be identical except for how they select zvols to remove, so lets just use the same function with a flag to indicate if we should include children and snapshots or not. Sponsored-by: Klara, Inc. Sponsored-by: Railway Corporation Signed-off-by: Rob Norris --- module/zfs/zvol.c | 71 ++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 41 deletions(-) diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 29f51e230a37..3556608dec3d 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -1614,12 +1614,18 @@ zvol_free_task(void *arg) zvol_os_free(arg); } +/* + * Remove minors for specified dataset and, optionally, its children and + * snapshots. + */ static void zvol_remove_minors_impl(zvol_task_t *task) { zvol_state_t *zv, *zv_next; const char *name = task ? task->zt_name1 : NULL; int namelen = ((name) ? strlen(name) : 0); + boolean_t children = task ? !!task->zt_value : B_TRUE; + taskqid_t t; list_t delay_list, free_list; @@ -1631,20 +1637,33 @@ zvol_remove_minors_impl(zvol_task_t *task) list_create(&free_list, sizeof (zvol_state_t), offsetof(zvol_state_t, zv_next)); + int error = ENOENT; + rw_enter(&zvol_state_lock, RW_WRITER); for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) { zv_next = list_next(&zvol_state_list, zv); mutex_enter(&zv->zv_state_lock); + + /* + * This zvol should be removed if: + * - no name was offered (ie removing all at shutdown); or + * - name matches exactly; or + * - we were asked to remove children, and + * - the start of the name matches, and + * - there is a '/' immediately after the matched name; or + * - there is a '@' immediately after the matched name + */ if (name == NULL || strcmp(zv->zv_name, name) == 0 || - (strncmp(zv->zv_name, name, namelen) == 0 && + (children && strncmp(zv->zv_name, name, namelen) == 0 && (zv->zv_name[namelen] == '/' || zv->zv_name[namelen] == '@'))) { /* * By holding zv_state_lock here, we guarantee that no * one is currently using this zv */ + error = 0; /* * If in use, try to throw everyone off and try again @@ -1697,57 +1716,26 @@ zvol_remove_minors_impl(zvol_task_t *task) /* Free any that we couldn't free in parallel earlier */ while ((zv = list_remove_head(&free_list)) != NULL) zvol_os_free(zv); + + if (error && task) + task->zt_error = SET_ERROR(error); } /* Remove minor for this specific volume only */ static int zvol_remove_minor_impl(const char *name) { - zvol_state_t *zv = NULL, *zv_next; - if (zvol_inhibit_dev) return (0); - rw_enter(&zvol_state_lock, RW_WRITER); - - for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) { - zv_next = list_next(&zvol_state_list, zv); - - mutex_enter(&zv->zv_state_lock); - if (strcmp(zv->zv_name, name) == 0) - /* Found, leave the the loop with zv_lock held */ - break; - mutex_exit(&zv->zv_state_lock); - } + zvol_task_t task; + memset(&task, 0, sizeof (zvol_task_t)); + strlcpy(task.zt_name1, name, sizeof (task.zt_name1)); + task.zt_value = B_FALSE; - if (zv == NULL) { - rw_exit(&zvol_state_lock); - return (SET_ERROR(ENOENT)); - } + zvol_remove_minors_impl(&task); - ASSERT(MUTEX_HELD(&zv->zv_state_lock)); - - if (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) { - /* - * In use, so try to throw everyone off, then wait - * until finished. - */ - zv->zv_flags |= ZVOL_REMOVING; - mutex_exit(&zv->zv_state_lock); - rw_exit(&zvol_state_lock); - zvol_remove_minor_task(zv); - return (0); - } - - zvol_remove(zv); - zvol_os_clear_private(zv); - - mutex_exit(&zv->zv_state_lock); - rw_exit(&zvol_state_lock); - - zvol_os_free(zv); - - return (0); + return (task.zt_error); } /* @@ -2067,6 +2055,7 @@ zvol_remove_minors(spa_t *spa, const char *name, boolean_t async) task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP); task->zt_op = ZVOL_ASYNC_REMOVE_MINORS; strlcpy(task->zt_name1, name, sizeof (task->zt_name1)); + task->zt_value = B_TRUE; id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP); if ((async == B_FALSE) && (id != TASKQID_INVALID)) taskq_wait_id(spa->spa_zvol_taskq, id); From d6c37ca8a789260108569760eb988d8f817bf0d7 Mon Sep 17 00:00:00 2001 From: Rob Norris Date: Tue, 5 Aug 2025 13:43:17 +1000 Subject: [PATCH 3/5] zvol: remove the OS-side minor before freeing the zvol When destroying a zvol, it is not "unpublished" from the system (that is, /dev/zd* node removed) until zvol_os_free(). Under Linux, at the time del_gendisk() and put_disk() are called, the device node may still be have an active hold, from a userspace program or something inside the kernel (a partition probe). As it is currently, this can lead to calls to zvol_open() or zvol_release() while the zvol_state_t is partially or fully freed. zvol_open() has some protection against this by checking that private_data is NULL, but zvol_release does not. This implements a better ordering for all of this by adding a new OS-side method, zvol_os_remove_minor(), which is responsible for fully decoupling the "private" (OS-side) objects from the zvol_state_t. For Linux, that means calling put_disk(), nulling private_data, and freeing zv_zso. This takes the place of zvol_os_clear_private(), which was a nod in that direction but did not do enough, and did not do it early enough. Equivalent changes are made on the FreeBSD side to follow the API change. Sponsored-by: Klara, Inc. Sponsored-by: Railway Corporation Signed-off-by: Rob Norris --- include/sys/zvol_impl.h | 4 +- module/os/freebsd/zfs/zvol_os.c | 92 ++++++++++++++------------------- module/os/linux/zfs/zvol_os.c | 86 ++++++++++++++++++------------ module/zfs/zvol.c | 13 ++--- 4 files changed, 99 insertions(+), 96 deletions(-) diff --git a/include/sys/zvol_impl.h b/include/sys/zvol_impl.h index f3dd9f26f23c..64ab2a8118f5 100644 --- a/include/sys/zvol_impl.h +++ b/include/sys/zvol_impl.h @@ -20,7 +20,7 @@ * CDDL HEADER END */ /* - * Copyright (c) 2024, Klara, Inc. + * Copyright (c) 2024, 2025, Klara, Inc. */ #ifndef _SYS_ZVOL_IMPL_H @@ -135,7 +135,7 @@ int zvol_os_rename_minor(zvol_state_t *zv, const char *newname); int zvol_os_create_minor(const char *name); int zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize); boolean_t zvol_os_is_zvol(const char *path); -void zvol_os_clear_private(zvol_state_t *zv); +void zvol_os_remove_minor(zvol_state_t *zv); void zvol_os_set_disk_ro(zvol_state_t *zv, int flags); void zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity); diff --git a/module/os/freebsd/zfs/zvol_os.c b/module/os/freebsd/zfs/zvol_os.c index 265dfd55fc4d..493da08e3a2c 100644 --- a/module/os/freebsd/zfs/zvol_os.c +++ b/module/os/freebsd/zfs/zvol_os.c @@ -31,7 +31,7 @@ * Copyright (c) 2012, 2017 by Delphix. All rights reserved. * Copyright (c) 2013, Joyent, Inc. All rights reserved. * Copyright (c) 2014 Integros [integros.com] - * Copyright (c) 2024, Klara, Inc. + * Copyright (c) 2024, 2025, Klara, Inc. */ /* Portions Copyright 2011 Martin Matuska */ @@ -196,7 +196,6 @@ DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol); static int zvol_geom_open(struct g_provider *pp, int flag, int count); static int zvol_geom_close(struct g_provider *pp, int flag, int count); -static void zvol_geom_destroy(zvol_state_t *zv); static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace); static void zvol_geom_bio_start(struct bio *bp); static int zvol_geom_bio_getattr(struct bio *bp); @@ -408,20 +407,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count) return (0); } -static void -zvol_geom_destroy(zvol_state_t *zv) -{ - struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; - struct g_provider *pp = zsg->zsg_provider; - - ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM); - - g_topology_assert(); - - zsg->zsg_provider = NULL; - g_wither_geom(pp->geom, ENXIO); -} - void zvol_wait_close(zvol_state_t *zv) { @@ -1400,42 +1385,65 @@ zvol_alloc(const char *name, uint64_t volsize, uint64_t volblocksize, * Remove minor node for the specified volume. */ void -zvol_os_free(zvol_state_t *zv) +zvol_os_remove_minor(zvol_state_t *zv) { - ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock)); - ASSERT(!MUTEX_HELD(&zv->zv_state_lock)); + ASSERT(MUTEX_HELD(&zv->zv_state_lock)); ASSERT0(zv->zv_open_count); + ASSERT0(atomic_read(&zv->zv_suspend_ref)); + ASSERT(zv->zv_flags & ZVOL_REMOVING); - ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name); - - rw_destroy(&zv->zv_suspend_lock); - zfs_rangelock_fini(&zv->zv_rangelock); + struct zvol_state_os *zso = zv->zv_zso; + zv->zv_zso = NULL; if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { - struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; - struct g_provider *pp __maybe_unused = zsg->zsg_provider; - - ASSERT0P(pp->private); + struct zvol_state_geom *zsg = &zso->zso_geom; + struct g_provider *pp = zsg->zsg_provider; + pp->private = NULL; + mutex_exit(&zv->zv_state_lock); g_topology_lock(); - zvol_geom_destroy(zv); + g_wither_geom(pp->geom, ENXIO); g_topology_unlock(); } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { - struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev; + struct zvol_state_dev *zsd = &zso->zso_dev; struct cdev *dev = zsd->zsd_cdev; + if (dev != NULL) + dev->si_drv2 = NULL; + mutex_exit(&zv->zv_state_lock); + if (dev != NULL) { - ASSERT0P(dev->si_drv2); destroy_dev(dev); knlist_clear(&zsd->zsd_selinfo.si_note, 0); knlist_destroy(&zsd->zsd_selinfo.si_note); } } + kmem_free(zso, sizeof (struct zvol_state_os)); + + mutex_enter(&zv->zv_state_lock); +} + +void +zvol_os_free(zvol_state_t *zv) +{ + ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock)); + ASSERT(!MUTEX_HELD(&zv->zv_state_lock)); + ASSERT0(zv->zv_open_count); + ASSERT0P(zv->zv_zso); + + ASSERT0P(zv->zv_objset); + ASSERT0P(zv->zv_zilog); + ASSERT0P(zv->zv_dn); + + ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name); + + rw_destroy(&zv->zv_suspend_lock); + zfs_rangelock_fini(&zv->zv_rangelock); + mutex_destroy(&zv->zv_state_lock); cv_destroy(&zv->zv_removing_cv); dataset_kstats_destroy(&zv->zv_kstat); - kmem_free(zv->zv_zso, sizeof (struct zvol_state_os)); kmem_free(zv, sizeof (zvol_state_t)); zvol_minors--; } @@ -1538,28 +1546,6 @@ zvol_os_create_minor(const char *name) return (error); } -void -zvol_os_clear_private(zvol_state_t *zv) -{ - ASSERT(RW_LOCK_HELD(&zvol_state_lock)); - if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { - struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom; - struct g_provider *pp = zsg->zsg_provider; - - if (pp->private == NULL) /* already cleared */ - return; - - pp->private = NULL; - ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock)); - } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { - struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev; - struct cdev *dev = zsd->zsd_cdev; - - if (dev != NULL) - dev->si_drv2 = NULL; - } -} - int zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize) { diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c index a73acdad34ae..57f5bd67d7a8 100644 --- a/module/os/linux/zfs/zvol_os.c +++ b/module/os/linux/zfs/zvol_os.c @@ -22,7 +22,7 @@ /* * Copyright (c) 2012, 2020 by Delphix. All rights reserved. * Copyright (c) 2024, Rob Norris - * Copyright (c) 2024, Klara, Inc. + * Copyright (c) 2024, 2025, Klara, Inc. */ #include @@ -971,16 +971,6 @@ zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize) return (0); } -void -zvol_os_clear_private(zvol_state_t *zv) -{ - /* - * Cleared while holding zvol_state_lock as a writer - * which will prevent zvol_open() from opening it. - */ - zv->zv_zso->zvo_disk->private_data = NULL; -} - /* * Provide a simple virtual geometry for legacy compatibility. For devices * smaller than 1 MiB a small head and sector count is used to allow very @@ -1417,6 +1407,54 @@ zvol_alloc(dev_t dev, const char *name, uint64_t volsize, uint64_t volblocksize, return (ret); } +void +zvol_os_remove_minor(zvol_state_t *zv) +{ + ASSERT(MUTEX_HELD(&zv->zv_state_lock)); + ASSERT0(zv->zv_open_count); + ASSERT0(atomic_read(&zv->zv_suspend_ref)); + ASSERT(zv->zv_flags & ZVOL_REMOVING); + + /* + * Cleared while holding zvol_state_lock as a writer + * which will prevent zvol_open() from opening it. + */ + struct zvol_state_os *zso = zv->zv_zso; + zv->zv_zso = NULL; + + /* Clearing private_data will make new callers return immediately. */ + zso->zvo_disk->private_data = NULL; + + /* + * Drop the state lock before calling del_gendisk(). There may be + * callers waiting to acquire it, but del_gendisk() will block until + * they exit, which would deadlock. + */ + mutex_exit(&zv->zv_state_lock); + + del_gendisk(zso->zvo_disk); +#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \ + (defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG)) +#if defined(HAVE_BLK_CLEANUP_DISK) + blk_cleanup_disk(zso->zvo_disk); +#else + put_disk(zso->zvo_disk); +#endif +#else + blk_cleanup_queue(zso->zvo_queue); + put_disk(zso->zvo_disk); +#endif + + if (zso->use_blk_mq) + blk_mq_free_tag_set(&zso->tag_set); + + ida_simple_remove(&zvol_ida, MINOR(zso->zvo_dev) >> ZVOL_MINOR_BITS); + + kmem_free(zso, sizeof (struct zvol_state_os)); + + mutex_enter(&zv->zv_state_lock); +} + /* * Cleanup then free a zvol_state_t which was created by zvol_alloc(). * At this time, the structure is not opened by anyone, is taken off @@ -1435,35 +1473,19 @@ zvol_os_free(zvol_state_t *zv) ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock)); ASSERT(!MUTEX_HELD(&zv->zv_state_lock)); ASSERT0(zv->zv_open_count); - ASSERT0P(zv->zv_zso->zvo_disk->private_data); + ASSERT0P(zv->zv_zso); + + ASSERT0P(zv->zv_objset); + ASSERT0P(zv->zv_zilog); + ASSERT0P(zv->zv_dn); rw_destroy(&zv->zv_suspend_lock); zfs_rangelock_fini(&zv->zv_rangelock); - del_gendisk(zv->zv_zso->zvo_disk); -#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \ - (defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG)) -#if defined(HAVE_BLK_CLEANUP_DISK) - blk_cleanup_disk(zv->zv_zso->zvo_disk); -#else - put_disk(zv->zv_zso->zvo_disk); -#endif -#else - blk_cleanup_queue(zv->zv_zso->zvo_queue); - put_disk(zv->zv_zso->zvo_disk); -#endif - - if (zv->zv_zso->use_blk_mq) - blk_mq_free_tag_set(&zv->zv_zso->tag_set); - - ida_simple_remove(&zvol_ida, - MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS); - cv_destroy(&zv->zv_removing_cv); mutex_destroy(&zv->zv_state_lock); dataset_kstats_destroy(&zv->zv_kstat); - kmem_free(zv->zv_zso, sizeof (struct zvol_state_os)); kmem_free(zv, sizeof (zvol_state_t)); } diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 3556608dec3d..cd3e1894c0ef 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -38,7 +38,7 @@ * Copyright 2014 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2016 Actifio, Inc. All rights reserved. * Copyright (c) 2012, 2019 by Delphix. All rights reserved. - * Copyright (c) 2024, Klara, Inc. + * Copyright (c) 2024, 2025, Klara, Inc. */ /* @@ -1599,8 +1599,8 @@ zvol_remove_minor_task(void *arg) rw_enter(&zvol_state_lock, RW_WRITER); mutex_enter(&zv->zv_state_lock); + zvol_os_remove_minor(zv); zvol_remove(zv); - zvol_os_clear_private(zv); mutex_exit(&zv->zv_state_lock); rw_exit(&zvol_state_lock); @@ -1669,9 +1669,9 @@ zvol_remove_minors_impl(zvol_task_t *task) * If in use, try to throw everyone off and try again * later. */ + zv->zv_flags |= ZVOL_REMOVING; if (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) { - zv->zv_flags |= ZVOL_REMOVING; t = taskq_dispatch( zv->zv_objset->os_spa->spa_zvol_taskq, zvol_remove_minor_task, zv, TQ_SLEEP); @@ -1687,14 +1687,9 @@ zvol_remove_minors_impl(zvol_task_t *task) continue; } + zvol_os_remove_minor(zv); zvol_remove(zv); - /* - * Cleared while holding zvol_state_lock as a writer - * which will prevent zvol_open() from opening it. - */ - zvol_os_clear_private(zv); - /* Drop zv_state_lock before zvol_free() */ mutex_exit(&zv->zv_state_lock); From b504cf014163c5556bb84fbf97ea168d66a63379 Mon Sep 17 00:00:00 2001 From: Rob Norris Date: Tue, 5 Aug 2025 14:19:24 +1000 Subject: [PATCH 4/5] zvol: stop using zvol_state_lock to protect OS-side private data zvol_state_lock is intended to protect access to the global name->zvol lists (zvol_find_by_name()), but has also been used to control access to OS-side private data, accessed through whatever kernel object is used to represent the volume (gendisk, geom, etc). This appears to have been necessary to some degree because the OS-side object is what's used to get a handle on zvol_state_t, so zv_state_lock and zv_suspend_lock can't be used to manage access, but also, with the private object and the zvol_state_t being shutdown and destroyed at the same time in zvol_os_free(), we must ensure that the private object pointer only ever corresponds to a real zvol_state_t, not one in partial destruction. Taking the global lock seems like a convenient way to ensure this. The problem with this is that zvol_state_lock does not actually protect access to the zvol_state_t internals, so we need to take zv_state_lock and/or zv_suspend_lock. If those are contended, this can then cause OS-side operations (eg zvol_open()) to sleep to wait for them while hold zvol_state_lock. This then blocks out all other OS-side operations which want to get the private data, and any ZFS-side control operations that would take the write half of the lock. It's even worse if ZFS-side operations induce OS-side calls back into the zvol (eg creating a zvol triggers a partition probe inside the kernel, and also a userspace access from udev to set up device links). And it gets even works again if anything decides to defer those ops to a task and wait on them, which zvol_remove_minors_impl() will do under high load. However, since the previous commit, we have a guarantee that the private data pointer will always be NULL'd out in zvol_os_remove_minor() _before_ the zvol_state_t is made invalid, but it won't happen until all users are ejected. So, if we make access to the private object pointer atomic, we remove the need to take a global lockout to access it, and so we can remove all acquisitions of zvol_state_lock from the OS side. While here, I've rewritten much of the locking theory comment at the top of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've tried to describe the purpose of each lock in a little more detail, and in particular describe where it should and shouldn't be used. Sponsored-by: Klara, Inc. Sponsored-by: Railway Corporation Signed-off-by: Rob Norris --- module/os/freebsd/zfs/zvol_os.c | 122 ++++++++++++++++++-------------- module/os/linux/zfs/zvol_os.c | 85 +++++++++++----------- module/zfs/zvol.c | 40 +++++++---- 3 files changed, 134 insertions(+), 113 deletions(-) diff --git a/module/os/freebsd/zfs/zvol_os.c b/module/os/freebsd/zfs/zvol_os.c index 493da08e3a2c..0dd2ecd7fd8d 100644 --- a/module/os/freebsd/zfs/zvol_os.c +++ b/module/os/freebsd/zfs/zvol_os.c @@ -225,25 +225,14 @@ zvol_geom_open(struct g_provider *pp, int flag, int count) } retry: - rw_enter(&zvol_state_lock, ZVOL_RW_READER); - /* - * Obtain a copy of private under zvol_state_lock to make sure either - * the result of zvol free code setting private to NULL is observed, - * or the zv is protected from being freed because of the positive - * zv_open_count. - */ - zv = pp->private; - if (zv == NULL) { - rw_exit(&zvol_state_lock); - err = SET_ERROR(ENXIO); - goto out_locked; - } + zv = atomic_load_ptr(&pp->private); + if (zv == NULL) + return (SET_ERROR(ENXIO)); mutex_enter(&zv->zv_state_lock); if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) { - rw_exit(&zvol_state_lock); err = SET_ERROR(ENXIO); - goto out_zv_locked; + goto out_locked; } ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM); @@ -256,8 +245,24 @@ zvol_geom_open(struct g_provider *pp, int flag, int count) drop_suspend = B_TRUE; if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) { mutex_exit(&zv->zv_state_lock); + + /* + * Removal may happen while the locks are down, so + * we can't trust zv any longer; we have to start over. + */ + zv = atomic_load_ptr(&pp->private); + if (zv == NULL) + return (SET_ERROR(ENXIO)); + rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); mutex_enter(&zv->zv_state_lock); + + if (zv->zv_zso->zso_dying || + zv->zv_flags & ZVOL_REMOVING) { + err = SET_ERROR(ENXIO); + goto out_locked; + } + /* Check to see if zv_suspend_lock is needed. */ if (zv->zv_open_count != 0) { rw_exit(&zv->zv_suspend_lock); @@ -265,7 +270,6 @@ zvol_geom_open(struct g_provider *pp, int flag, int count) } } } - rw_exit(&zvol_state_lock); ASSERT(MUTEX_HELD(&zv->zv_state_lock)); @@ -293,7 +297,7 @@ zvol_geom_open(struct g_provider *pp, int flag, int count) if (drop_namespace) mutex_exit(&spa_namespace_lock); if (err) - goto out_zv_locked; + goto out_locked; pp->mediasize = zv->zv_volsize; pp->stripeoffset = 0; pp->stripesize = zv->zv_volblocksize; @@ -328,9 +332,8 @@ zvol_geom_open(struct g_provider *pp, int flag, int count) zvol_last_close(zv); wakeup(zv); } -out_zv_locked: - mutex_exit(&zv->zv_state_lock); out_locked: + mutex_exit(&zv->zv_state_lock); if (drop_suspend) rw_exit(&zv->zv_suspend_lock); return (err); @@ -344,12 +347,9 @@ zvol_geom_close(struct g_provider *pp, int flag, int count) boolean_t drop_suspend = B_TRUE; int new_open_count; - rw_enter(&zvol_state_lock, ZVOL_RW_READER); - zv = pp->private; - if (zv == NULL) { - rw_exit(&zvol_state_lock); + zv = atomic_load_ptr(&pp->private); + if (zv == NULL) return (SET_ERROR(ENXIO)); - } mutex_enter(&zv->zv_state_lock); if (zv->zv_flags & ZVOL_EXCL) { @@ -376,6 +376,15 @@ zvol_geom_close(struct g_provider *pp, int flag, int count) mutex_exit(&zv->zv_state_lock); rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); mutex_enter(&zv->zv_state_lock); + + /* + * Unlike in zvol_geom_open(), we don't check if + * removal started here, because we might be one of the + * openers that needs to be thrown out! If we're the + * last, we need to call zvol_last_close() below to + * finish cleanup. So, no special treatment for us. + */ + /* Check to see if zv_suspend_lock is needed. */ new_open_count = zv->zv_open_count - count; if (new_open_count != 0) { @@ -386,7 +395,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count) } else { drop_suspend = B_FALSE; } - rw_exit(&zvol_state_lock); ASSERT(MUTEX_HELD(&zv->zv_state_lock)); @@ -439,7 +447,7 @@ zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace) ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).", pp->name, acr, acw, ace)); - if (pp->private == NULL) { + if (atomic_load_ptr(&pp->private) == NULL) { if (acr <= 0 && acw <= 0 && ace <= 0) return (0); return (pp->error); @@ -906,25 +914,14 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) boolean_t drop_suspend = B_FALSE; retry: - rw_enter(&zvol_state_lock, ZVOL_RW_READER); - /* - * Obtain a copy of si_drv2 under zvol_state_lock to make sure either - * the result of zvol free code setting si_drv2 to NULL is observed, - * or the zv is protected from being freed because of the positive - * zv_open_count. - */ - zv = dev->si_drv2; - if (zv == NULL) { - rw_exit(&zvol_state_lock); - err = SET_ERROR(ENXIO); - goto out_locked; - } + zv = atomic_load_ptr(&dev->si_drv2); + if (zv == NULL) + return (SET_ERROR(ENXIO)); mutex_enter(&zv->zv_state_lock); - if (zv->zv_zso->zso_dying) { - rw_exit(&zvol_state_lock); + if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) { err = SET_ERROR(ENXIO); - goto out_zv_locked; + goto out_locked; } ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV); @@ -939,6 +936,13 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) mutex_exit(&zv->zv_state_lock); rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); mutex_enter(&zv->zv_state_lock); + + if (unlikely(zv->zv_flags & ZVOL_REMOVING)) { + /* Removal started while locks were down. */ + err = SET_ERROR(ENXIO); + goto out_locked; + } + /* Check to see if zv_suspend_lock is needed. */ if (zv->zv_open_count != 0) { rw_exit(&zv->zv_suspend_lock); @@ -946,7 +950,6 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) } } } - rw_exit(&zvol_state_lock); ASSERT(MUTEX_HELD(&zv->zv_state_lock)); @@ -974,7 +977,7 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) if (drop_namespace) mutex_exit(&spa_namespace_lock); if (err) - goto out_zv_locked; + goto out_locked; } ASSERT(MUTEX_HELD(&zv->zv_state_lock)); @@ -1001,9 +1004,8 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) zvol_last_close(zv); wakeup(zv); } -out_zv_locked: - mutex_exit(&zv->zv_state_lock); out_locked: + mutex_exit(&zv->zv_state_lock); if (drop_suspend) rw_exit(&zv->zv_suspend_lock); return (err); @@ -1015,12 +1017,9 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td) zvol_state_t *zv; boolean_t drop_suspend = B_TRUE; - rw_enter(&zvol_state_lock, ZVOL_RW_READER); - zv = dev->si_drv2; - if (zv == NULL) { - rw_exit(&zvol_state_lock); + zv = atomic_load_ptr(&dev->si_drv2); + if (zv == NULL) return (SET_ERROR(ENXIO)); - } mutex_enter(&zv->zv_state_lock); if (zv->zv_flags & ZVOL_EXCL) { @@ -1045,6 +1044,15 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td) mutex_exit(&zv->zv_state_lock); rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); mutex_enter(&zv->zv_state_lock); + + /* + * Unlike in zvol_cdev_open(), we don't check if + * removal started here, because we might be one of the + * openers that needs to be thrown out! If we're the + * last, we need to call zvol_last_close() below to + * finish cleanup. So, no special treatment for us. + */ + /* Check to see if zv_suspend_lock is needed. */ if (zv->zv_open_count != 1) { rw_exit(&zv->zv_suspend_lock); @@ -1054,7 +1062,6 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td) } else { drop_suspend = B_FALSE; } - rw_exit(&zvol_state_lock); ASSERT(MUTEX_HELD(&zv->zv_state_lock)); @@ -1086,7 +1093,8 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, int error; boolean_t sync; - zv = dev->si_drv2; + zv = atomic_load_ptr(&dev->si_drv2); + ASSERT3P(zv, !=, NULL); error = 0; KASSERT(zv->zv_open_count > 0, @@ -1147,6 +1155,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, *(off_t *)data = 0; break; case DIOCGATTR: { + rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); spa_t *spa = dmu_objset_spa(zv->zv_objset); struct diocgattr_arg *arg = (struct diocgattr_arg *)data; uint64_t refd, avail, usedobjs, availobjs; @@ -1171,6 +1180,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, arg->value.off = refd / DEV_BSIZE; } else error = SET_ERROR(ENOIOCTL); + rw_exit(&zv->zv_suspend_lock); break; } case FIOSEEKHOLE: @@ -1181,10 +1191,12 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, hole = (cmd == FIOSEEKHOLE); noff = *off; + rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); lr = zfs_rangelock_enter(&zv->zv_rangelock, 0, UINT64_MAX, RL_READER); error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff); zfs_rangelock_exit(lr); + rw_exit(&zv->zv_suspend_lock); *off = noff; break; } @@ -1398,7 +1410,7 @@ zvol_os_remove_minor(zvol_state_t *zv) if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { struct zvol_state_geom *zsg = &zso->zso_geom; struct g_provider *pp = zsg->zsg_provider; - pp->private = NULL; + atomic_store_ptr(&pp->private, NULL); mutex_exit(&zv->zv_state_lock); g_topology_lock(); @@ -1409,7 +1421,7 @@ zvol_os_remove_minor(zvol_state_t *zv) struct cdev *dev = zsd->zsd_cdev; if (dev != NULL) - dev->si_drv2 = NULL; + atomic_store_ptr(&dev->si_drv2, NULL); mutex_exit(&zv->zv_state_lock); if (dev != NULL) { diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c index 57f5bd67d7a8..bac166fcd89e 100644 --- a/module/os/linux/zfs/zvol_os.c +++ b/module/os/linux/zfs/zvol_os.c @@ -679,28 +679,19 @@ zvol_open(struct block_device *bdev, fmode_t flag) retry: #endif - rw_enter(&zvol_state_lock, RW_READER); - /* - * Obtain a copy of private_data under the zvol_state_lock to make - * sure that either the result of zvol free code path setting - * disk->private_data to NULL is observed, or zvol_os_free() - * is not called on this zv because of the positive zv_open_count. - */ + #ifdef HAVE_BLK_MODE_T - zv = disk->private_data; + zv = atomic_load_ptr(&disk->private_data); #else - zv = bdev->bd_disk->private_data; + zv = atomic_load_ptr(&bdev->bd_disk->private_data); #endif if (zv == NULL) { - rw_exit(&zvol_state_lock); return (-SET_ERROR(ENXIO)); } mutex_enter(&zv->zv_state_lock); - if (unlikely(zv->zv_flags & ZVOL_REMOVING)) { mutex_exit(&zv->zv_state_lock); - rw_exit(&zvol_state_lock); return (-SET_ERROR(ENXIO)); } @@ -712,8 +703,28 @@ zvol_open(struct block_device *bdev, fmode_t flag) if (zv->zv_open_count == 0) { if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) { mutex_exit(&zv->zv_state_lock); + + /* + * Removal may happen while the locks are down, so + * we can't trust zv any longer; we have to start over. + */ +#ifdef HAVE_BLK_MODE_T + zv = atomic_load_ptr(&disk->private_data); +#else + zv = atomic_load_ptr(&bdev->bd_disk->private_data); +#endif + if (zv == NULL) + return (-SET_ERROR(ENXIO)); + rw_enter(&zv->zv_suspend_lock, RW_READER); mutex_enter(&zv->zv_state_lock); + + if (unlikely(zv->zv_flags & ZVOL_REMOVING)) { + mutex_exit(&zv->zv_state_lock); + rw_exit(&zv->zv_suspend_lock); + return (-SET_ERROR(ENXIO)); + } + /* check to see if zv_suspend_lock is needed */ if (zv->zv_open_count != 0) { rw_exit(&zv->zv_suspend_lock); @@ -724,7 +735,6 @@ zvol_open(struct block_device *bdev, fmode_t flag) drop_suspend = B_TRUE; } } - rw_exit(&zvol_state_lock); ASSERT(MUTEX_HELD(&zv->zv_state_lock)); @@ -821,11 +831,11 @@ zvol_release(struct gendisk *disk, fmode_t unused) #if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG) (void) unused; #endif - zvol_state_t *zv; boolean_t drop_suspend = B_TRUE; - rw_enter(&zvol_state_lock, RW_READER); - zv = disk->private_data; + zvol_state_t *zv = atomic_load_ptr(&disk->private_data); + if (zv == NULL) + return; mutex_enter(&zv->zv_state_lock); ASSERT3U(zv->zv_open_count, >, 0); @@ -839,6 +849,15 @@ zvol_release(struct gendisk *disk, fmode_t unused) mutex_exit(&zv->zv_state_lock); rw_enter(&zv->zv_suspend_lock, RW_READER); mutex_enter(&zv->zv_state_lock); + + /* + * Unlike in zvol_open(), we don't check if removal + * started here, because we might be one of the openers + * that needs to be thrown out! If we're the last, we + * need to call zvol_last_close() below to finish + * cleanup. So, no special treatment for us. + */ + /* check to see if zv_suspend_lock is needed */ if (zv->zv_open_count != 1) { rw_exit(&zv->zv_suspend_lock); @@ -848,7 +867,6 @@ zvol_release(struct gendisk *disk, fmode_t unused) } else { drop_suspend = B_FALSE; } - rw_exit(&zvol_state_lock); ASSERT(MUTEX_HELD(&zv->zv_state_lock)); @@ -868,9 +886,10 @@ static int zvol_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - zvol_state_t *zv = bdev->bd_disk->private_data; int error = 0; + zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data); + ASSERT3P(zv, !=, NULL); ASSERT3U(zv->zv_open_count, >, 0); switch (cmd) { @@ -923,9 +942,8 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing) { unsigned int mask = 0; - rw_enter(&zvol_state_lock, RW_READER); + zvol_state_t *zv = atomic_load_ptr(&disk->private_data); - zvol_state_t *zv = disk->private_data; if (zv != NULL) { mutex_enter(&zv->zv_state_lock); mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0; @@ -933,17 +951,14 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing) mutex_exit(&zv->zv_state_lock); } - rw_exit(&zvol_state_lock); - return (mask); } static int zvol_revalidate_disk(struct gendisk *disk) { - rw_enter(&zvol_state_lock, RW_READER); + zvol_state_t *zv = atomic_load_ptr(&disk->private_data); - zvol_state_t *zv = disk->private_data; if (zv != NULL) { mutex_enter(&zv->zv_state_lock); set_capacity(zv->zv_zso->zvo_disk, @@ -951,8 +966,6 @@ zvol_revalidate_disk(struct gendisk *disk) mutex_exit(&zv->zv_state_lock); } - rw_exit(&zvol_state_lock); - return (0); } @@ -980,9 +993,10 @@ zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize) static int zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo) { - zvol_state_t *zv = bdev->bd_disk->private_data; sector_t sectors; + zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data); + ASSERT3P(zv, !=, NULL); ASSERT3U(zv->zv_open_count, >, 0); sectors = get_capacity(zv->zv_zso->zvo_disk); @@ -1415,15 +1429,11 @@ zvol_os_remove_minor(zvol_state_t *zv) ASSERT0(atomic_read(&zv->zv_suspend_ref)); ASSERT(zv->zv_flags & ZVOL_REMOVING); - /* - * Cleared while holding zvol_state_lock as a writer - * which will prevent zvol_open() from opening it. - */ struct zvol_state_os *zso = zv->zv_zso; zv->zv_zso = NULL; /* Clearing private_data will make new callers return immediately. */ - zso->zvo_disk->private_data = NULL; + atomic_store_ptr(&zso->zvo_disk->private_data, NULL); /* * Drop the state lock before calling del_gendisk(). There may be @@ -1455,17 +1465,6 @@ zvol_os_remove_minor(zvol_state_t *zv) mutex_enter(&zv->zv_state_lock); } -/* - * Cleanup then free a zvol_state_t which was created by zvol_alloc(). - * At this time, the structure is not opened by anyone, is taken off - * the zvol_state_list, and has its private data set to NULL. - * The zvol_state_lock is dropped. - * - * This function may take many milliseconds to complete (e.g. we've seen - * it take over 256ms), due to the calls to "blk_cleanup_queue" and - * "del_gendisk". Thus, consumers need to be careful to account for this - * latency when calling this function. - */ void zvol_os_free(zvol_state_t *zv) { diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index cd3e1894c0ef..3a432d6d29ee 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -44,19 +44,30 @@ /* * Note on locking of zvol state structures. * - * These structures are used to maintain internal state used to emulate block - * devices on top of zvols. In particular, management of device minor number - * operations - create, remove, rename, and set_snapdev - involves access to - * these structures. The zvol_state_lock is primarily used to protect the - * zvol_state_list. The zv->zv_state_lock is used to protect the contents - * of the zvol_state_t structures, as well as to make sure that when the - * time comes to remove the structure from the list, it is not in use, and - * therefore, it can be taken off zvol_state_list and freed. + * zvol_state_t represents the connection between a single dataset + * (DMU_OST_ZVOL) and the device "minor" (some OS-specific representation of a + * "disk" or "device" or "volume", eg, a /dev/zdXX node, a GEOM object, etc). * - * The zv_suspend_lock was introduced to allow for suspending I/O to a zvol, - * e.g. for the duration of receive and rollback operations. This lock can be - * held for significant periods of time. Given that it is undesirable to hold - * mutexes for long periods of time, the following lock ordering applies: + * The global zvol_state_lock is used to protect access to zvol_state_list and + * zvol_htable, which are the primary way to obtain a zvol_state_t from a name. + * It should not be used for anything not name-relateds, and you should avoid + * sleeping or waiting while its held. See zvol_find_by_name(), zvol_insert(), + * zvol_remove(). + * + * The zv_state_lock is used to protect the contents of the associated + * zvol_state_t. Most of the zvol_state_t is dedicated to control and + * configuration; almost none of it is needed for data operations (that is, + * read, write, flush) so this lock is rarely taken during general IO. It + * should be released quickly; you should avoid sleeping or waiting while its + * held. + * + * zv_suspend_lock is used to suspend IO/data operations to a zvol. The read + * half should held for the duration of an IO operation. The write half should + * be taken when something to wait for IO to complete and the block further IO, + * eg for the duration of receive and rollback operations. This lock can be + * held for long periods of time. + * + * Thus, the following lock ordering appies. * - take zvol_state_lock if necessary, to protect zvol_state_list * - take zv_suspend_lock if necessary, by the code path in question * - take zv_state_lock to protect zvol_state_t @@ -67,9 +78,8 @@ * these operations are serialized per pool. Consequently, we can be certain * that for a given zvol, there is only one operation at a time in progress. * That is why one can be sure that first, zvol_state_t for a given zvol is - * allocated and placed on zvol_state_list, and then other minor operations - * for this zvol are going to proceed in the order of issue. - * + * allocated and placed on zvol_state_list, and then other minor operations for + * this zvol are going to proceed in the order of issue. */ #include From 3a41b4d736052735eee3650a6d5e8db6abf1e30f Mon Sep 17 00:00:00 2001 From: Rob Norris Date: Tue, 5 Aug 2025 13:53:40 +1000 Subject: [PATCH 5/5] zvol_remove_minors_impl: remove all async fallbacks Since both ZFS- and OS-sides of a zvol now take care of their own locking and don't get in each other's way, there's no need for the very complicated removal code to fall back to async tasks if the locks needed at each stage can't be obtained right now. Here we change it to be a linear three-step process: select zvols of interest and flag them for removal, then wait for them to shed activity and then remove them, and finally, free them. Sponsored-by: Klara, Inc. Sponsored-by: Railway Corporation Signed-off-by: Rob Norris --- include/sys/zvol_impl.h | 1 + module/zfs/zvol.c | 185 ++++++++++++++++++---------------------- 2 files changed, 84 insertions(+), 102 deletions(-) diff --git a/include/sys/zvol_impl.h b/include/sys/zvol_impl.h index 64ab2a8118f5..5422e66832c0 100644 --- a/include/sys/zvol_impl.h +++ b/include/sys/zvol_impl.h @@ -56,6 +56,7 @@ typedef struct zvol_state { atomic_t zv_suspend_ref; /* refcount for suspend */ krwlock_t zv_suspend_lock; /* suspend lock */ kcondvar_t zv_removing_cv; /* ready to remove minor */ + list_node_t zv_remove_node; /* node on removal list */ struct zvol_state_os *zv_zso; /* private platform state */ boolean_t zv_threading; /* volthreading property */ } zvol_state_t; diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 3a432d6d29ee..2fd3e1c37045 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -1579,51 +1579,6 @@ zvol_create_minors_impl(zvol_task_t *task) zvol_task_update_status(task, total, done, last_error); } -/* - * Remove minors for specified dataset including children and snapshots. - */ - -/* - * Remove the minor for a given zvol. This will do it all: - * - flag the zvol for removal, so new requests are rejected - * - wait until outstanding requests are completed - * - remove it from lists - * - free it - * It's also usable as a taskq task, and smells nice too. - */ -static void -zvol_remove_minor_task(void *arg) -{ - zvol_state_t *zv = (zvol_state_t *)arg; - - ASSERT(!RW_LOCK_HELD(&zvol_state_lock)); - ASSERT(!MUTEX_HELD(&zv->zv_state_lock)); - - mutex_enter(&zv->zv_state_lock); - while (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) { - zv->zv_flags |= ZVOL_REMOVING; - cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock); - } - mutex_exit(&zv->zv_state_lock); - - rw_enter(&zvol_state_lock, RW_WRITER); - mutex_enter(&zv->zv_state_lock); - - zvol_os_remove_minor(zv); - zvol_remove(zv); - - mutex_exit(&zv->zv_state_lock); - rw_exit(&zvol_state_lock); - - zvol_os_free(zv); -} - -static void -zvol_free_task(void *arg) -{ - zvol_os_free(arg); -} - /* * Remove minors for specified dataset and, optionally, its children and * snapshots. @@ -1636,25 +1591,33 @@ zvol_remove_minors_impl(zvol_task_t *task) int namelen = ((name) ? strlen(name) : 0); boolean_t children = task ? !!task->zt_value : B_TRUE; - taskqid_t t; - list_t delay_list, free_list; - if (zvol_inhibit_dev) return; - list_create(&delay_list, sizeof (zvol_state_t), - offsetof(zvol_state_t, zv_next)); - list_create(&free_list, sizeof (zvol_state_t), - offsetof(zvol_state_t, zv_next)); - - int error = ENOENT; + /* + * We collect up zvols that we want to remove on a separate list, so + * that we don't have to hold zvol_state_lock for the whole time. + * + * We can't remove them from the global lists until we're completely + * done with them, because that would make them appear to ZFS-side ops + * that they don't exist, and the name might be reused, which can't be + * good. + */ + list_t remove_list; + list_create(&remove_list, sizeof (zvol_state_t), + offsetof(zvol_state_t, zv_remove_node)); - rw_enter(&zvol_state_lock, RW_WRITER); + rw_enter(&zvol_state_lock, RW_READER); for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) { zv_next = list_next(&zvol_state_list, zv); mutex_enter(&zv->zv_state_lock); + if (zv->zv_flags & ZVOL_REMOVING) { + /* Another thread is handling shutdown, skip it. */ + mutex_exit(&zv->zv_state_lock); + continue; + } /* * This zvol should be removed if: @@ -1669,61 +1632,87 @@ zvol_remove_minors_impl(zvol_task_t *task) (children && strncmp(zv->zv_name, name, namelen) == 0 && (zv->zv_name[namelen] == '/' || zv->zv_name[namelen] == '@'))) { - /* - * By holding zv_state_lock here, we guarantee that no - * one is currently using this zv - */ - error = 0; /* - * If in use, try to throw everyone off and try again - * later. + * Matched, so mark it removal. We want to take the + * write half of the suspend lock to make sure that + * the zvol is not suspended, and give any data ops + * chance to finish. */ - zv->zv_flags |= ZVOL_REMOVING; - if (zv->zv_open_count > 0 || - atomic_read(&zv->zv_suspend_ref)) { - t = taskq_dispatch( - zv->zv_objset->os_spa->spa_zvol_taskq, - zvol_remove_minor_task, zv, TQ_SLEEP); - if (t == TASKQID_INVALID) { - /* - * Couldn't create the task, so we'll - * do it in place once the loop is - * finished. - */ - list_insert_head(&delay_list, zv); - } + mutex_exit(&zv->zv_state_lock); + rw_enter(&zv->zv_suspend_lock, RW_WRITER); + mutex_enter(&zv->zv_state_lock); + + if (zv->zv_flags & ZVOL_REMOVING) { + /* Another thread has taken it, let them. */ mutex_exit(&zv->zv_state_lock); + rw_exit(&zv->zv_suspend_lock); continue; } - zvol_os_remove_minor(zv); - zvol_remove(zv); - - /* Drop zv_state_lock before zvol_free() */ + /* + * Mark it and unlock. New entries will see the flag + * and return ENXIO. + */ + zv->zv_flags |= ZVOL_REMOVING; mutex_exit(&zv->zv_state_lock); + rw_exit(&zv->zv_suspend_lock); - /* Try parallel zv_free, if failed do it in place */ - t = taskq_dispatch(system_taskq, zvol_free_task, zv, - TQ_SLEEP); - if (t == TASKQID_INVALID) - list_insert_head(&free_list, zv); - } else { + /* Put it on the list for the next stage. */ + list_insert_head(&remove_list, zv); + } else mutex_exit(&zv->zv_state_lock); - } } + rw_exit(&zvol_state_lock); - /* Wait for zvols that we couldn't create a remove task for */ - while ((zv = list_remove_head(&delay_list)) != NULL) - zvol_remove_minor_task(zv); + /* Didn't match any, nothing to do! */ + if (list_is_empty(&remove_list)) { + if (task) + task->zt_error = SET_ERROR(ENOENT); + return; + } + + /* Actually shut them all down. */ + for (zv = list_head(&remove_list); zv != NULL; zv = zv_next) { + zv_next = list_next(&remove_list, zv); - /* Free any that we couldn't free in parallel earlier */ - while ((zv = list_remove_head(&free_list)) != NULL) + mutex_enter(&zv->zv_state_lock); + + /* + * Still open or suspended, just wait. This can happen if, for + * example, we managed to acquire zv_state_lock in the moments + * where zvol_open() or zvol_release() are trading locks to + * call zvol_first_open() or zvol_last_close(). + */ + while (zv->zv_open_count > 0 || + atomic_read(&zv->zv_suspend_ref)) + cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock); + + /* + * No users, shut down the OS side. This may not remove the + * minor from view immediately, depending on the kernel + * specifics, but it will ensure that it is unusable and that + * this zvol_state_t can never again be reached from an OS-side + * operation. + */ + zvol_os_remove_minor(zv); + mutex_exit(&zv->zv_state_lock); + + /* Remove it from the name lookup lists */ + rw_enter(&zvol_state_lock, RW_WRITER); + zvol_remove(zv); + rw_exit(&zvol_state_lock); + } + + /* + * Our own references on remove_list is the last one, free them and + * we're done. + */ + while ((zv = list_remove_head(&remove_list)) != NULL) zvol_os_free(zv); - if (error && task) - task->zt_error = SET_ERROR(error); + list_destroy(&remove_list); } /* Remove minor for this specific volume only */ @@ -2182,14 +2171,6 @@ zvol_fini_impl(void) zvol_remove_minors_impl(NULL); - /* - * The call to "zvol_remove_minors_impl" may dispatch entries to - * the system_taskq, but it doesn't wait for those entries to - * complete before it returns. Thus, we must wait for all of the - * removals to finish, before we can continue. - */ - taskq_wait_outstanding(system_taskq, 0); - kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head)); list_destroy(&zvol_state_list); rw_destroy(&zvol_state_lock);