]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Drop irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
authorSasha Levin <sashal@kernel.org>
Mon, 9 Sep 2024 12:43:36 +0000 (08:43 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 9 Sep 2024 12:43:36 +0000 (08:43 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch [deleted file]
queue-5.10/series
queue-5.15/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch [deleted file]
queue-5.15/series
queue-6.1/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch [deleted file]
queue-6.1/series
queue-6.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch [deleted file]
queue-6.10/series
queue-6.6/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch [deleted file]
queue-6.6/series

diff --git a/queue-5.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-5.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
deleted file mode 100644 (file)
index 4858683..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-From 8074f0b4bfda80c2cbf87ef1106a1e5fe4d10109 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
-   on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index a9469751720c..b30319c65e7f 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1789,13 +1789,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-               for (i = 0; i < vm->nr_vpes; i++) {
-                       struct its_vpe *vpe = vm->vpes[i];
--                      struct irq_data *d = irq_get_irq_data(vpe->irq);
--                      /* Map the VPE to the first possible CPU */
--                      vpe->col_idx = cpumask_first(cpu_online_mask);
-                       its_send_vmapp(its, vpe, true);
-                       its_send_vinvall(its, vpe);
--                      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-               }
-       }
-@@ -4518,6 +4514,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
-       struct its_node *its;
-+      /* Map the VPE to the first possible CPU */
-+      vpe->col_idx = cpumask_first(cpu_online_mask);
-+      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
-       /*
-        * If we use the list map, we issue VMAPP on demand... Unless
-        * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4526,9 +4526,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       if (!gic_requires_eager_mapping())
-               return 0;
--      /* Map the VPE to the first possible CPU */
--      vpe->col_idx = cpumask_first(cpu_online_mask);
--
-       list_for_each_entry(its, &its_nodes, entry) {
-               if (!is_v4(its))
-                       continue;
-@@ -4537,8 +4534,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-               its_send_vinvall(its, vpe);
-       }
--      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
-       return 0;
- }
--- 
-2.43.0
-
index 352e078d7d7cf0710cd408c2959630a62353a08a..318e869270d7694e6a65dc62aede5255bf7f28f5 100644 (file)
@@ -144,7 +144,6 @@ btrfs-replace-bug_on-with-assert-in-walk_down_proc.patch
 btrfs-clean-up-our-handling-of-refs-0-in-snapshot-de.patch
 pci-add-missing-bridge-lock-to-pci_bus_lock.patch
 net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
 kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch
 btrfs-initialize-location-to-fix-wmaybe-uninitialize.patch
 s390-vmlinux.lds.s-move-ro_after_init-section-behind.patch
diff --git a/queue-5.15/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-5.15/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
deleted file mode 100644 (file)
index d71821f..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-From 4ab6b642d1181207b6203dac042c7ac818534f3e Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
-   on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index 3fa6c7184326..b346753135e0 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1794,13 +1794,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-               for (i = 0; i < vm->nr_vpes; i++) {
-                       struct its_vpe *vpe = vm->vpes[i];
--                      struct irq_data *d = irq_get_irq_data(vpe->irq);
--                      /* Map the VPE to the first possible CPU */
--                      vpe->col_idx = cpumask_first(cpu_online_mask);
-                       its_send_vmapp(its, vpe, true);
-                       its_send_vinvall(its, vpe);
--                      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-               }
-       }
-@@ -4540,6 +4536,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
-       struct its_node *its;
-+      /* Map the VPE to the first possible CPU */
-+      vpe->col_idx = cpumask_first(cpu_online_mask);
-+      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
-       /*
-        * If we use the list map, we issue VMAPP on demand... Unless
-        * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4548,9 +4548,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       if (!gic_requires_eager_mapping())
-               return 0;
--      /* Map the VPE to the first possible CPU */
--      vpe->col_idx = cpumask_first(cpu_online_mask);
--
-       list_for_each_entry(its, &its_nodes, entry) {
-               if (!is_v4(its))
-                       continue;
-@@ -4559,8 +4556,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-               its_send_vinvall(its, vpe);
-       }
--      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
-       return 0;
- }
--- 
-2.43.0
-
index 75dec7eee90ef4f0a713b41c4c1ce2bd2b86a7ab..286c3936b03041e204720e3278abcd9e6baf2065 100644 (file)
@@ -157,7 +157,6 @@ btrfs-replace-bug_on-with-error-handling-at-update_r.patch
 riscv-set-trap-vector-earlier.patch
 pci-add-missing-bridge-lock-to-pci_bus_lock.patch
 net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
 i3c-mipi-i3c-hci-error-out-instead-on-bug_on-in-ibi-.patch
 kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch
 btrfs-initialize-location-to-fix-wmaybe-uninitialize.patch
diff --git a/queue-6.1/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-6.1/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
deleted file mode 100644 (file)
index b7c7381..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-From 27fabf1b7f8e8f557257513651d8ea38634a1830 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
-   on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index a7a952bbfdc2..009e0fb43738 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1800,13 +1800,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-               for (i = 0; i < vm->nr_vpes; i++) {
-                       struct its_vpe *vpe = vm->vpes[i];
--                      struct irq_data *d = irq_get_irq_data(vpe->irq);
--                      /* Map the VPE to the first possible CPU */
--                      vpe->col_idx = cpumask_first(cpu_online_mask);
-                       its_send_vmapp(its, vpe, true);
-                       its_send_vinvall(its, vpe);
--                      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-               }
-       }
-@@ -4525,6 +4521,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
-       struct its_node *its;
-+      /* Map the VPE to the first possible CPU */
-+      vpe->col_idx = cpumask_first(cpu_online_mask);
-+      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
-       /*
-        * If we use the list map, we issue VMAPP on demand... Unless
-        * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4533,9 +4533,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       if (!gic_requires_eager_mapping())
-               return 0;
--      /* Map the VPE to the first possible CPU */
--      vpe->col_idx = cpumask_first(cpu_online_mask);
--
-       list_for_each_entry(its, &its_nodes, entry) {
-               if (!is_v4(its))
-                       continue;
-@@ -4544,8 +4541,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-               its_send_vinvall(its, vpe);
-       }
--      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
-       return 0;
- }
--- 
-2.43.0
-
index a9c85d822f5c21d449716b23cda4d8b662ef35a0..bafb71751b714ff5ae13340df69dfead35b07527 100644 (file)
@@ -113,7 +113,6 @@ riscv-set-trap-vector-earlier.patch
 pci-add-missing-bridge-lock-to-pci_bus_lock.patch
 tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch
 net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
 irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch
 i3c-mipi-i3c-hci-error-out-instead-on-bug_on-in-ibi-.patch
 kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch
diff --git a/queue-6.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-6.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
deleted file mode 100644 (file)
index c61db33..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-From c0ddf48dd19c4b48eacd886bde59c426bc9fcc0e Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
-   on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index 3c755d5dad6e..a00c5e8c4ea6 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1809,13 +1809,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-               for (i = 0; i < vm->nr_vpes; i++) {
-                       struct its_vpe *vpe = vm->vpes[i];
--                      struct irq_data *d = irq_get_irq_data(vpe->irq);
--                      /* Map the VPE to the first possible CPU */
--                      vpe->col_idx = cpumask_first(cpu_online_mask);
-                       its_send_vmapp(its, vpe, true);
-                       its_send_vinvall(its, vpe);
--                      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-               }
-       }
-@@ -4562,6 +4558,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
-       struct its_node *its;
-+      /* Map the VPE to the first possible CPU */
-+      vpe->col_idx = cpumask_first(cpu_online_mask);
-+      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
-       /*
-        * If we use the list map, we issue VMAPP on demand... Unless
-        * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4570,9 +4570,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       if (!gic_requires_eager_mapping())
-               return 0;
--      /* Map the VPE to the first possible CPU */
--      vpe->col_idx = cpumask_first(cpu_online_mask);
--
-       list_for_each_entry(its, &its_nodes, entry) {
-               if (!is_v4(its))
-                       continue;
-@@ -4581,8 +4578,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-               its_send_vinvall(its, vpe);
-       }
--      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
-       return 0;
- }
--- 
-2.43.0
-
index bba08c3b5e5cd280714883aaed2bc8d759aaf592..e3848c4ed2b7f412d20f79fffede7b70f0edb60e 100644 (file)
@@ -250,7 +250,6 @@ pci-add-missing-bridge-lock-to-pci_bus_lock.patch
 tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch
 bluetooth-btnxpuart-fix-null-pointer-dereference-in-.patch
 net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
 irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch
 drm-amdgpu-add-mutex-to-protect-ras-shared-memory.patch
 loongarch-use-correct-api-to-map-cmdline-in-relocate.patch
diff --git a/queue-6.6/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-6.6/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
deleted file mode 100644 (file)
index 7689431..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-From b7b4c01a30352c370769a1966022a48d74d5d226 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
-   on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index 350abbb36e04..e25dea0e50c7 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1803,13 +1803,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-               for (i = 0; i < vm->nr_vpes; i++) {
-                       struct its_vpe *vpe = vm->vpes[i];
--                      struct irq_data *d = irq_get_irq_data(vpe->irq);
--                      /* Map the VPE to the first possible CPU */
--                      vpe->col_idx = cpumask_first(cpu_online_mask);
-                       its_send_vmapp(its, vpe, true);
-                       its_send_vinvall(its, vpe);
--                      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-               }
-       }
-@@ -4551,6 +4547,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
-       struct its_node *its;
-+      /* Map the VPE to the first possible CPU */
-+      vpe->col_idx = cpumask_first(cpu_online_mask);
-+      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
-       /*
-        * If we use the list map, we issue VMAPP on demand... Unless
-        * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4559,9 +4559,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-       if (!gic_requires_eager_mapping())
-               return 0;
--      /* Map the VPE to the first possible CPU */
--      vpe->col_idx = cpumask_first(cpu_online_mask);
--
-       list_for_each_entry(its, &its_nodes, entry) {
-               if (!is_v4(its))
-                       continue;
-@@ -4570,8 +4567,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-               its_send_vinvall(its, vpe);
-       }
--      irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
-       return 0;
- }
--- 
-2.43.0
-
index b060356498e3b12055b2bc0d5d92260be6308d6d..fa1d8fa40d176674ac93ef80c926c57dd7e5f668 100644 (file)
@@ -172,7 +172,6 @@ pci-add-missing-bridge-lock-to-pci_bus_lock.patch
 tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch
 bluetooth-btnxpuart-fix-null-pointer-dereference-in-.patch
 net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
 irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch
 loongarch-use-correct-api-to-map-cmdline-in-relocate.patch
 regmap-maple-work-around-gcc-14.1-false-positive-war.patch