From 7a74be1f97d9ad0a7989366bbfda6adeb419a9c2 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 17 Aug 2023 21:30:37 -0400 Subject: [PATCH] Drop nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch Signed-off-by: Sasha Levin --- ...hold-rcu-read-lock-in-nvme_ns_chr_ur.patch | 55 ------------------- queue-6.1/series | 1 - ...hold-rcu-read-lock-in-nvme_ns_chr_ur.patch | 55 ------------------- queue-6.4/series | 1 - 4 files changed, 112 deletions(-) delete mode 100644 queue-6.1/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch delete mode 100644 queue-6.4/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch diff --git a/queue-6.1/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch b/queue-6.1/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch deleted file mode 100644 index 04e49afc4eb..00000000000 --- a/queue-6.1/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch +++ /dev/null @@ -1,55 +0,0 @@ -From d01f4cee237b3a0cba25055f9e083a53ec71afcd Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Wed, 9 Aug 2023 10:04:40 +0800 -Subject: nvme: core: don't hold rcu read lock in nvme_ns_chr_uring_cmd_iopoll - -From: Ming Lei - -[ Upstream commit a7a7dabb5dd72d2875bc3ce56f94ea5ceb259d5b ] - -Now nvme_ns_chr_uring_cmd_iopoll() has switched to request based io -polling, and the associated NS is guaranteed to be live in case of -io polling, so request is guaranteed to be valid because blk-mq uses -pre-allocated request pool. - -Remove the rcu read lock in nvme_ns_chr_uring_cmd_iopoll(), which -isn't needed any more after switching to request based io polling. - -Fix "BUG: sleeping function called from invalid context" because -set_page_dirty_lock() from blk_rq_unmap_user() may sleep. - -Fixes: 585079b6e425 ("nvme: wire up async polling for io passthrough commands") -Reported-by: Guangwu Zhang -Cc: Kanchan Joshi -Cc: Anuj Gupta -Signed-off-by: Ming Lei -Tested-by: Guangwu Zhang -Link: https://lore.kernel.org/r/20230809020440.174682-1-ming.lei@redhat.com -Signed-off-by: Jens Axboe -Signed-off-by: Sasha Levin ---- - drivers/nvme/host/ioctl.c | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c -index 8224675f8de25..ac7d61ce30108 100644 ---- a/drivers/nvme/host/ioctl.c -+++ b/drivers/nvme/host/ioctl.c -@@ -706,14 +706,12 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, - struct nvme_ns *ns; - struct request_queue *q; - -- rcu_read_lock(); - bio = READ_ONCE(ioucmd->cookie); - ns = container_of(file_inode(ioucmd->file)->i_cdev, - struct nvme_ns, cdev); - q = ns->queue; - if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev) - ret = bio_poll(bio, iob, poll_flags); -- rcu_read_unlock(); - return ret; - } - #ifdef CONFIG_NVME_MULTIPATH --- -2.40.1 - diff --git a/queue-6.1/series b/queue-6.1/series index c774ad73c3b..da4bbf5c22b 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -7,7 +7,6 @@ selftests-forwarding-tc_actions-cleanup-temporary-fi.patch selftests-forwarding-tc_actions-use-ncat-instead-of-.patch net-smc-replace-mutex-rmbs_lock-and-sndbufs_lock-wit.patch net-smc-fix-setsockopt-and-sysctl-to-specify-same-bu.patch -nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch net-phy-at803x-use-devm_regulator_get_enable_optiona.patch net-phy-at803x-fix-the-wol-setting-functions.patch drm-amdgpu-fix-calltrace-warning-in-amddrm_buddy_fin.patch diff --git a/queue-6.4/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch b/queue-6.4/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch deleted file mode 100644 index e5498864f25..00000000000 --- a/queue-6.4/nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch +++ /dev/null @@ -1,55 +0,0 @@ -From f577575976f4800ac8750de557748de91d328e78 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Wed, 9 Aug 2023 10:04:40 +0800 -Subject: nvme: core: don't hold rcu read lock in nvme_ns_chr_uring_cmd_iopoll - -From: Ming Lei - -[ Upstream commit a7a7dabb5dd72d2875bc3ce56f94ea5ceb259d5b ] - -Now nvme_ns_chr_uring_cmd_iopoll() has switched to request based io -polling, and the associated NS is guaranteed to be live in case of -io polling, so request is guaranteed to be valid because blk-mq uses -pre-allocated request pool. - -Remove the rcu read lock in nvme_ns_chr_uring_cmd_iopoll(), which -isn't needed any more after switching to request based io polling. - -Fix "BUG: sleeping function called from invalid context" because -set_page_dirty_lock() from blk_rq_unmap_user() may sleep. - -Fixes: 585079b6e425 ("nvme: wire up async polling for io passthrough commands") -Reported-by: Guangwu Zhang -Cc: Kanchan Joshi -Cc: Anuj Gupta -Signed-off-by: Ming Lei -Tested-by: Guangwu Zhang -Link: https://lore.kernel.org/r/20230809020440.174682-1-ming.lei@redhat.com -Signed-off-by: Jens Axboe -Signed-off-by: Sasha Levin ---- - drivers/nvme/host/ioctl.c | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c -index f15e7330b75ac..642f0310da278 100644 ---- a/drivers/nvme/host/ioctl.c -+++ b/drivers/nvme/host/ioctl.c -@@ -787,14 +787,12 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, - struct nvme_ns *ns; - struct request_queue *q; - -- rcu_read_lock(); - bio = READ_ONCE(ioucmd->cookie); - ns = container_of(file_inode(ioucmd->file)->i_cdev, - struct nvme_ns, cdev); - q = ns->queue; - if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev) - ret = bio_poll(bio, iob, poll_flags); -- rcu_read_unlock(); - return ret; - } - #ifdef CONFIG_NVME_MULTIPATH --- -2.40.1 - diff --git a/queue-6.4/series b/queue-6.4/series index 5a381d184bf..01dd7381ea7 100644 --- a/queue-6.4/series +++ b/queue-6.4/series @@ -3,7 +3,6 @@ fix-a-couple-of-spelling-mistakes.patch wrap-lines-at-80.patch move-netfs_extract_iter_to_sg-to-lib-scatterlist.c.patch crypto-cifs-fix-error-handling-in-extract_iter_to_sg.patch -nvme-core-don-t-hold-rcu-read-lock-in-nvme_ns_chr_ur.patch net-phy-at803x-use-devm_regulator_get_enable_optiona.patch net-phy-at803x-fix-the-wol-setting-functions.patch drm-amd-display-update-dtbclk-for-dcn32.patch -- 2.47.3