]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
scsi: lpfc: Release hbalock before calling lpfc_worker_wake_up()
authorJustin Tee <justin.tee@broadcom.com>
Tue, 5 Mar 2024 20:04:57 +0000 (12:04 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 May 2024 10:14:37 +0000 (12:14 +0200)
[ Upstream commit ded20192dff31c91cef2a04f7e20e60e9bb887d3 ]

lpfc_worker_wake_up() calls the lpfc_work_done() routine, which takes the
hbalock.  Thus, lpfc_worker_wake_up() should not be called while holding the
hbalock to avoid potential deadlock.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20240305200503.57317-7-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_sli.c

index 4d723200690a4bd7d7329cefacaefd0e0a6ab7d0..26736122bda17abd94836455cefd05db04cb5adb 100644 (file)
@@ -4462,23 +4462,23 @@ lpfc_els_retry_delay(struct timer_list *t)
        unsigned long flags;
        struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
 
+       /* Hold a node reference for outstanding queued work */
+       if (!lpfc_nlp_get(ndlp))
+               return;
+
        spin_lock_irqsave(&phba->hbalock, flags);
        if (!list_empty(&evtp->evt_listp)) {
                spin_unlock_irqrestore(&phba->hbalock, flags);
+               lpfc_nlp_put(ndlp);
                return;
        }
 
-       /* We need to hold the node by incrementing the reference
-        * count until the queued work is done
-        */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-       if (evtp->evt_arg1) {
-               evtp->evt = LPFC_EVT_ELS_RETRY;
-               list_add_tail(&evtp->evt_listp, &phba->work_list);
-               lpfc_worker_wake_up(phba);
-       }
+       evtp->evt_arg1 = ndlp;
+       evtp->evt = LPFC_EVT_ELS_RETRY;
+       list_add_tail(&evtp->evt_listp, &phba->work_list);
        spin_unlock_irqrestore(&phba->hbalock, flags);
-       return;
+
+       lpfc_worker_wake_up(phba);
 }
 
 /**
index f80bbc315f4caa57ea7f36ca3358c6cb68c01cda..da3aee0f63237cadd2e0054ea1eee7b8a488c1ba 100644 (file)
@@ -257,7 +257,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                if (evtp->evt_arg1) {
                        evtp->evt = LPFC_EVT_DEV_LOSS;
                        list_add_tail(&evtp->evt_listp, &phba->work_list);
+                       spin_unlock_irqrestore(&phba->hbalock, iflags);
                        lpfc_worker_wake_up(phba);
+                       return;
                }
                spin_unlock_irqrestore(&phba->hbalock, iflags);
        } else {
@@ -275,10 +277,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                        lpfc_disc_state_machine(vport, ndlp, NULL,
                                                NLP_EVT_DEVICE_RM);
                }
-
        }
-
-       return;
 }
 
 /**
index 706985358c6a02f37d91698c0163f506695e22cc..c00b945947b1d2e77968dbef4c86ffd3be91e05d 100644 (file)
@@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        empty = list_empty(&phba->active_rrq_list);
        list_add_tail(&rrq->list, &phba->active_rrq_list);
        phba->hba_flag |= HBA_RRQ_ACTIVE;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
        if (empty)
                lpfc_worker_wake_up(phba);
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
        return 0;
 out:
        spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -11373,18 +11373,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
        unsigned long iflags;
        struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
 
+       /* Hold a node reference for outstanding queued work */
+       if (!lpfc_nlp_get(ndlp))
+               return;
+
        spin_lock_irqsave(&phba->hbalock, iflags);
        if (!list_empty(&evtp->evt_listp)) {
                spin_unlock_irqrestore(&phba->hbalock, iflags);
+               lpfc_nlp_put(ndlp);
                return;
        }
 
-       /* Incrementing the reference count until the queued work is done. */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-       if (!evtp->evt_arg1) {
-               spin_unlock_irqrestore(&phba->hbalock, iflags);
-               return;
-       }
+       evtp->evt_arg1 = ndlp;
        evtp->evt = LPFC_EVT_RECOVER_PORT;
        list_add_tail(&evtp->evt_listp, &phba->work_list);
        spin_unlock_irqrestore(&phba->hbalock, iflags);