]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
nvmet: move async event work off nvmet-wq
authorChaitanya Kulkarni <kch@nvidia.com>
Thu, 26 Feb 2026 04:30:03 +0000 (20:30 -0800)
committerKeith Busch <kbusch@kernel.org>
Tue, 10 Mar 2026 15:20:28 +0000 (08:20 -0700)
For target nvmet_ctrl_free() flushes ctrl->async_event_work.
If nvmet_ctrl_free() runs on nvmet-wq, the flush re-enters workqueue
completion for the same worker:-

A. Async event work queued on nvmet-wq (prior to disconnect):
  nvmet_execute_async_event()
     queue_work(nvmet_wq, &ctrl->async_event_work)

  nvmet_add_async_event()
     queue_work(nvmet_wq, &ctrl->async_event_work)

B. Full pre-work chain (RDMA CM path):
  nvmet_rdma_cm_handler()
     nvmet_rdma_queue_disconnect()
       __nvmet_rdma_queue_disconnect()
         queue_work(nvmet_wq, &queue->release_work)
           process_one_work()
             lock((wq_completion)nvmet-wq)  <--------- 1st
             nvmet_rdma_release_queue_work()

C. Recursive path (same worker):
  nvmet_rdma_release_queue_work()
     nvmet_rdma_free_queue()
       nvmet_sq_destroy()
         nvmet_ctrl_put()
           nvmet_ctrl_free()
             flush_work(&ctrl->async_event_work)
               __flush_work()
                 touch_wq_lockdep_map()
                 lock((wq_completion)nvmet-wq) <--------- 2nd

Lockdep splat:

  ============================================
  WARNING: possible recursive locking detected
  6.19.0-rc3nvme+ #14 Tainted: G                 N
  --------------------------------------------
  kworker/u192:42/44933 is trying to acquire lock:
  ffff888118a00948 ((wq_completion)nvmet-wq){+.+.}-{0:0}, at: touch_wq_lockdep_map+0x26/0x90

  but task is already holding lock:
  ffff888118a00948 ((wq_completion)nvmet-wq){+.+.}-{0:0}, at: process_one_work+0x53e/0x660

  3 locks held by kworker/u192:42/44933:
   #0: ffff888118a00948 ((wq_completion)nvmet-wq){+.+.}-{0:0}, at: process_one_work+0x53e/0x660
   #1: ffffc9000e6cbe28 ((work_completion)(&queue->release_work)){+.+.}-{0:0}, at: process_one_work+0x1c5/0x660
   #2: ffffffff82d4db60 (rcu_read_lock){....}-{1:3}, at: __flush_work+0x62/0x530

  Workqueue: nvmet-wq nvmet_rdma_release_queue_work [nvmet_rdma]
  Call Trace:
   __flush_work+0x268/0x530
   nvmet_ctrl_free+0x140/0x310 [nvmet]
   nvmet_cq_put+0x74/0x90 [nvmet]
   nvmet_rdma_free_queue+0x23/0xe0 [nvmet_rdma]
   nvmet_rdma_release_queue_work+0x19/0x50 [nvmet_rdma]
   process_one_work+0x206/0x660
   worker_thread+0x184/0x320
   kthread+0x10c/0x240
   ret_from_fork+0x319/0x390

Move async event work to a dedicated nvmet-aen-wq to avoid reentrant
flush on nvmet-wq.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c

index 5e366502fb757ec89bf1d70f7019ff48d978ac3d..66fc8d2a7fe3a1a596be926808754ecc71b3bc67 100644 (file)
@@ -1586,7 +1586,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
        ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
        mutex_unlock(&ctrl->lock);
 
-       queue_work(nvmet_wq, &ctrl->async_event_work);
+       queue_work(nvmet_aen_wq, &ctrl->async_event_work);
 }
 
 void nvmet_execute_keep_alive(struct nvmet_req *req)
index eab3e4fc0f74a59789e3aada01f40e83018da407..a25ac4bf9972e520cdcabf540f693deba35a462a 100644 (file)
@@ -27,6 +27,8 @@ static DEFINE_IDA(cntlid_ida);
 
 struct workqueue_struct *nvmet_wq;
 EXPORT_SYMBOL_GPL(nvmet_wq);
+struct workqueue_struct *nvmet_aen_wq;
+EXPORT_SYMBOL_GPL(nvmet_aen_wq);
 
 /*
  * This read/write semaphore is used to synchronize access to configuration
@@ -206,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
        list_add_tail(&aen->entry, &ctrl->async_events);
        mutex_unlock(&ctrl->lock);
 
-       queue_work(nvmet_wq, &ctrl->async_event_work);
+       queue_work(nvmet_aen_wq, &ctrl->async_event_work);
 }
 
 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -1959,9 +1961,14 @@ static int __init nvmet_init(void)
        if (!nvmet_wq)
                goto out_free_buffered_work_queue;
 
+       nvmet_aen_wq = alloc_workqueue("nvmet-aen-wq",
+                       WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+       if (!nvmet_aen_wq)
+               goto out_free_nvmet_work_queue;
+
        error = nvmet_init_debugfs();
        if (error)
-               goto out_free_nvmet_work_queue;
+               goto out_free_nvmet_aen_work_queue;
 
        error = nvmet_init_discovery();
        if (error)
@@ -1977,6 +1984,8 @@ out_exit_discovery:
        nvmet_exit_discovery();
 out_exit_debugfs:
        nvmet_exit_debugfs();
+out_free_nvmet_aen_work_queue:
+       destroy_workqueue(nvmet_aen_wq);
 out_free_nvmet_work_queue:
        destroy_workqueue(nvmet_wq);
 out_free_buffered_work_queue:
@@ -1994,6 +2003,7 @@ static void __exit nvmet_exit(void)
        nvmet_exit_discovery();
        nvmet_exit_debugfs();
        ida_destroy(&cntlid_ida);
+       destroy_workqueue(nvmet_aen_wq);
        destroy_workqueue(nvmet_wq);
        destroy_workqueue(buffered_io_wq);
        destroy_workqueue(zbd_wq);
index b664b584fdc8e63abbd76eebc55bd97b245a7367..319d6a5e9cf053f3523d7ddf1d7f77fd23cea6f4 100644 (file)
@@ -501,6 +501,7 @@ extern struct kmem_cache *nvmet_bvec_cache;
 extern struct workqueue_struct *buffered_io_wq;
 extern struct workqueue_struct *zbd_wq;
 extern struct workqueue_struct *nvmet_wq;
+extern struct workqueue_struct *nvmet_aen_wq;
 
 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
 {
index 9c12b2361a6d7a14609a6c68bcf114ca882fdc78..038432364967112e80b30e1810c4df8551de7780 100644 (file)
@@ -2088,6 +2088,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
        mutex_unlock(&nvmet_rdma_queue_mutex);
 
        flush_workqueue(nvmet_wq);
+       flush_workqueue(nvmet_aen_wq);
 }
 
 static struct ib_client nvmet_rdma_ib_client = {