]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 May 2025 11:42:00 +0000 (13:42 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 May 2025 11:42:00 +0000 (13:42 +0200)
added patches:
can-kvaser_pciefd-force-irq-edge-in-case-of-nested-irq.patch

queue-6.12/can-kvaser_pciefd-force-irq-edge-in-case-of-nested-irq.patch [new file with mode: 0644]
queue-6.12/series

diff --git a/queue-6.12/can-kvaser_pciefd-force-irq-edge-in-case-of-nested-irq.patch b/queue-6.12/can-kvaser_pciefd-force-irq-edge-in-case-of-nested-irq.patch
new file mode 100644 (file)
index 0000000..c9c3168
--- /dev/null
@@ -0,0 +1,190 @@
+From 9176bd205ee0b2cd35073a9973c2a0936bcb579e Mon Sep 17 00:00:00 2001
+From: Axel Forsman <axfo@kvaser.com>
+Date: Tue, 20 May 2025 13:43:30 +0200
+Subject: can: kvaser_pciefd: Force IRQ edge in case of nested IRQ
+
+From: Axel Forsman <axfo@kvaser.com>
+
+commit 9176bd205ee0b2cd35073a9973c2a0936bcb579e upstream.
+
+Avoid the driver missing IRQs by temporarily masking IRQs in the ISR
+to enforce an edge even if a different IRQ is signalled before handled
+IRQs are cleared.
+
+Fixes: 48f827d4f48f ("can: kvaser_pciefd: Move reset of DMA RX buffers to the end of the ISR")
+Cc: stable@vger.kernel.org
+Signed-off-by: Axel Forsman <axfo@kvaser.com>
+Tested-by: Jimmy Assarsson <extja@kvaser.com>
+Reviewed-by: Jimmy Assarsson <extja@kvaser.com>
+Link: https://patch.msgid.link/20250520114332.8961-2-axfo@kvaser.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+
+---
+ drivers/net/can/kvaser_pciefd.c |   81 ++++++++++++++++++----------------------
+ 1 file changed, 38 insertions(+), 43 deletions(-)
+
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1670,24 +1670,28 @@ static int kvaser_pciefd_read_buffer(str
+       return res;
+ }
+-static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
++static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+ {
++      void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
+       u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+-      if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
++      iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
++
++      if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+               kvaser_pciefd_read_buffer(pcie, 0);
++              iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
++      }
+-      if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
++      if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+               kvaser_pciefd_read_buffer(pcie, 1);
++              iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
++      }
+       if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+                    irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+                    irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+                    irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
+               dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
+-
+-      iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+-      return irq;
+ }
+ static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+@@ -1715,29 +1719,22 @@ static irqreturn_t kvaser_pciefd_irq_han
+       struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
+       const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+       u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+-      u32 srb_irq = 0;
+-      u32 srb_release = 0;
+       int i;
+       if (!(pci_irq & irq_mask->all))
+               return IRQ_NONE;
++      iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
++
+       if (pci_irq & irq_mask->kcan_rx0)
+-              srb_irq = kvaser_pciefd_receive_irq(pcie);
++              kvaser_pciefd_receive_irq(pcie);
+       for (i = 0; i < pcie->nr_channels; i++) {
+               if (pci_irq & irq_mask->kcan_tx[i])
+                       kvaser_pciefd_transmit_irq(pcie->can[i]);
+       }
+-      if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+-              srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
+-
+-      if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+-              srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
+-
+-      if (srb_release)
+-              iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
++      iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+       return IRQ_HANDLED;
+ }
+@@ -1757,13 +1754,22 @@ static void kvaser_pciefd_teardown_can_c
+       }
+ }
++static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
++{
++      unsigned int i;
++
++      /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
++      iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
++      for (i = 0; i < pcie->nr_channels; ++i)
++              iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
++}
++
+ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+                              const struct pci_device_id *id)
+ {
+       int ret;
+       struct kvaser_pciefd *pcie;
+       const struct kvaser_pciefd_irq_mask *irq_mask;
+-      void __iomem *irq_en_base;
+       pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+       if (!pcie)
+@@ -1829,8 +1835,7 @@ static int kvaser_pciefd_probe(struct pc
+                 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+       /* Enable PCI interrupts */
+-      irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
+-      iowrite32(irq_mask->all, irq_en_base);
++      iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+       /* Ready the DMA buffers */
+       iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+                 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+@@ -1844,8 +1849,7 @@ static int kvaser_pciefd_probe(struct pc
+       return 0;
+ err_free_irq:
+-      /* Disable PCI interrupts */
+-      iowrite32(0, irq_en_base);
++      kvaser_pciefd_disable_irq_srcs(pcie);
+       free_irq(pcie->pci->irq, pcie);
+ err_pci_free_irq_vectors:
+@@ -1868,35 +1872,26 @@ err_disable_pci:
+       return ret;
+ }
+-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
+-{
+-      int i;
+-
+-      for (i = 0; i < pcie->nr_channels; i++) {
+-              struct kvaser_pciefd_can *can = pcie->can[i];
+-
+-              if (can) {
+-                      iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+-                      unregister_candev(can->can.dev);
+-                      del_timer(&can->bec_poll_timer);
+-                      kvaser_pciefd_pwm_stop(can);
+-                      free_candev(can->can.dev);
+-              }
+-      }
+-}
+-
+ static void kvaser_pciefd_remove(struct pci_dev *pdev)
+ {
+       struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
++      unsigned int i;
+-      kvaser_pciefd_remove_all_ctrls(pcie);
++      for (i = 0; i < pcie->nr_channels; ++i) {
++              struct kvaser_pciefd_can *can = pcie->can[i];
+-      /* Disable interrupts */
+-      iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
+-      iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
++              unregister_candev(can->can.dev);
++              del_timer(&can->bec_poll_timer);
++              kvaser_pciefd_pwm_stop(can);
++      }
++      kvaser_pciefd_disable_irq_srcs(pcie);
+       free_irq(pcie->pci->irq, pcie);
+       pci_free_irq_vectors(pcie->pci);
++
++      for (i = 0; i < pcie->nr_channels; ++i)
++              free_candev(pcie->can[i]->can.dev);
++
+       pci_iounmap(pdev, pcie->reg_base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f04fc8b24fe472875411a2da9b1194a86f0f9b27 100644 (file)
@@ -0,0 +1 @@
+can-kvaser_pciefd-force-irq-edge-in-case-of-nested-irq.patch