]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
scsi: lpfc: Release hbalock before calling lpfc_worker_wake_up()
authorJustin Tee <justin.tee@broadcom.com>
Tue, 5 Mar 2024 20:04:57 +0000 (12:04 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 May 2024 10:02:09 +0000 (12:02 +0200)
[ Upstream commit ded20192dff31c91cef2a04f7e20e60e9bb887d3 ]

lpfc_worker_wake_up() calls the lpfc_work_done() routine, which takes the
hbalock.  Thus, lpfc_worker_wake_up() should not be called while holding the
hbalock to avoid potential deadlock.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20240305200503.57317-7-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_sli.c

index 18b8325fd419eb1e5a1e3d85f247c72094802f29..44d3ada9fbbcb115e1a68933a1cd59a0c87a7392 100644 (file)
@@ -4432,23 +4432,23 @@ lpfc_els_retry_delay(struct timer_list *t)
        unsigned long flags;
        struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
 
+       /* Hold a node reference for outstanding queued work */
+       if (!lpfc_nlp_get(ndlp))
+               return;
+
        spin_lock_irqsave(&phba->hbalock, flags);
        if (!list_empty(&evtp->evt_listp)) {
                spin_unlock_irqrestore(&phba->hbalock, flags);
+               lpfc_nlp_put(ndlp);
                return;
        }
 
-       /* We need to hold the node by incrementing the reference
-        * count until the queued work is done
-        */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-       if (evtp->evt_arg1) {
-               evtp->evt = LPFC_EVT_ELS_RETRY;
-               list_add_tail(&evtp->evt_listp, &phba->work_list);
-               lpfc_worker_wake_up(phba);
-       }
+       evtp->evt_arg1 = ndlp;
+       evtp->evt = LPFC_EVT_ELS_RETRY;
+       list_add_tail(&evtp->evt_listp, &phba->work_list);
        spin_unlock_irqrestore(&phba->hbalock, flags);
-       return;
+
+       lpfc_worker_wake_up(phba);
 }
 
 /**
index 5154eeaee0ec325c8f293ee52e291ea5d9b7aa2f..93703ab6ce037905e0b9dbc4697ab39e90fb6b4b 100644 (file)
@@ -257,7 +257,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                if (evtp->evt_arg1) {
                        evtp->evt = LPFC_EVT_DEV_LOSS;
                        list_add_tail(&evtp->evt_listp, &phba->work_list);
+                       spin_unlock_irqrestore(&phba->hbalock, iflags);
                        lpfc_worker_wake_up(phba);
+                       return;
                }
                spin_unlock_irqrestore(&phba->hbalock, iflags);
        } else {
@@ -275,10 +277,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                        lpfc_disc_state_machine(vport, ndlp, NULL,
                                                NLP_EVT_DEVICE_RM);
                }
-
        }
-
-       return;
 }
 
 /**
index 4dfadf254a72710564136371032ff460e77a1051..9dab33686a9317df61a8d7de9ce26bd75366e69a 100644 (file)
@@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        empty = list_empty(&phba->active_rrq_list);
        list_add_tail(&rrq->list, &phba->active_rrq_list);
        phba->hba_flag |= HBA_RRQ_ACTIVE;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
        if (empty)
                lpfc_worker_wake_up(phba);
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
        return 0;
 out:
        spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -11369,18 +11369,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
        unsigned long iflags;
        struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
 
+       /* Hold a node reference for outstanding queued work */
+       if (!lpfc_nlp_get(ndlp))
+               return;
+
        spin_lock_irqsave(&phba->hbalock, iflags);
        if (!list_empty(&evtp->evt_listp)) {
                spin_unlock_irqrestore(&phba->hbalock, iflags);
+               lpfc_nlp_put(ndlp);
                return;
        }
 
-       /* Incrementing the reference count until the queued work is done. */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-       if (!evtp->evt_arg1) {
-               spin_unlock_irqrestore(&phba->hbalock, iflags);
-               return;
-       }
+       evtp->evt_arg1 = ndlp;
        evtp->evt = LPFC_EVT_RECOVER_PORT;
        list_add_tail(&evtp->evt_listp, &phba->work_list);
        spin_unlock_irqrestore(&phba->hbalock, iflags);