From 92476f295622a22a49657b4005fd74eef9e1e663 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 9 Sep 2024 08:43:36 -0400 Subject: [PATCH] Drop irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch Signed-off-by: Sasha Levin --- ...lways-configure-affinity-on-vpe-acti.patch | 83 ------------------- queue-5.10/series | 1 - ...lways-configure-affinity-on-vpe-acti.patch | 83 ------------------- queue-5.15/series | 1 - ...lways-configure-affinity-on-vpe-acti.patch | 83 ------------------- queue-6.1/series | 1 - ...lways-configure-affinity-on-vpe-acti.patch | 83 ------------------- queue-6.10/series | 1 - ...lways-configure-affinity-on-vpe-acti.patch | 83 ------------------- queue-6.6/series | 1 - 10 files changed, 420 deletions(-) delete mode 100644 queue-5.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch delete mode 100644 queue-5.15/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch delete mode 100644 queue-6.1/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch delete mode 100644 queue-6.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch delete mode 100644 queue-6.6/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch diff --git a/queue-5.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-5.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch deleted file mode 100644 index 4858683aaf7..00000000000 --- a/queue-5.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch +++ /dev/null @@ -1,83 +0,0 @@ -From 8074f0b4bfda80c2cbf87ef1106a1e5fe4d10109 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 5 Jul 2024 10:31:53 +0100 -Subject: irqchip/gic-v4: Always configure affinity on VPE activation - -From: Marc Zyngier - -[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ] - -There are currently two paths to set the initial affinity of a VPE: - - - at activation time on GICv4 without the stupid VMOVP list, and - on GICv4.1 - - - at map time for GICv4 with VMOVP list - -The latter location may end-up modifying the affinity of VPE that is -currently running, making the results unpredictible. - -Instead, unify the two paths, making sure to set the initial affinity only -at activation time. - -Reported-by: Nianyao Tang -Signed-off-by: Marc Zyngier -Signed-off-by: Thomas Gleixner -Tested-by: Nianyao Tang -Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org -Signed-off-by: Sasha Levin ---- - drivers/irqchip/irq-gic-v3-its.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index a9469751720c..b30319c65e7f 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -1789,13 +1789,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm) - - for (i = 0; i < vm->nr_vpes; i++) { - struct its_vpe *vpe = vm->vpes[i]; -- struct irq_data *d = irq_get_irq_data(vpe->irq); - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(its, vpe, true); - its_send_vinvall(its, vpe); -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); - } - } - -@@ -4518,6 +4514,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - struct its_node *its; - -+ /* Map the VPE to the first possible CPU */ -+ vpe->col_idx = cpumask_first(cpu_online_mask); -+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -+ - /* - * If we use the list map, we issue VMAPP on demand... Unless - * we're on a GICv4.1 and we eagerly map the VPE on all ITSs -@@ -4526,9 +4526,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - if (!gic_requires_eager_mapping()) - return 0; - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); -- - list_for_each_entry(its, &its_nodes, entry) { - if (!is_v4(its)) - continue; -@@ -4537,8 +4534,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - its_send_vinvall(its, vpe); - } - -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -- - return 0; - } - --- -2.43.0 - diff --git a/queue-5.10/series b/queue-5.10/series index 352e078d7d7..318e869270d 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -144,7 +144,6 @@ btrfs-replace-bug_on-with-assert-in-walk_down_proc.patch btrfs-clean-up-our-handling-of-refs-0-in-snapshot-de.patch pci-add-missing-bridge-lock-to-pci_bus_lock.patch net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch -irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch btrfs-initialize-location-to-fix-wmaybe-uninitialize.patch s390-vmlinux.lds.s-move-ro_after_init-section-behind.patch diff --git a/queue-5.15/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-5.15/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch deleted file mode 100644 index d71821fc82f..00000000000 --- a/queue-5.15/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch +++ /dev/null @@ -1,83 +0,0 @@ -From 4ab6b642d1181207b6203dac042c7ac818534f3e Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 5 Jul 2024 10:31:53 +0100 -Subject: irqchip/gic-v4: Always configure affinity on VPE activation - -From: Marc Zyngier - -[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ] - -There are currently two paths to set the initial affinity of a VPE: - - - at activation time on GICv4 without the stupid VMOVP list, and - on GICv4.1 - - - at map time for GICv4 with VMOVP list - -The latter location may end-up modifying the affinity of VPE that is -currently running, making the results unpredictible. - -Instead, unify the two paths, making sure to set the initial affinity only -at activation time. - -Reported-by: Nianyao Tang -Signed-off-by: Marc Zyngier -Signed-off-by: Thomas Gleixner -Tested-by: Nianyao Tang -Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org -Signed-off-by: Sasha Levin ---- - drivers/irqchip/irq-gic-v3-its.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index 3fa6c7184326..b346753135e0 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -1794,13 +1794,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm) - - for (i = 0; i < vm->nr_vpes; i++) { - struct its_vpe *vpe = vm->vpes[i]; -- struct irq_data *d = irq_get_irq_data(vpe->irq); - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(its, vpe, true); - its_send_vinvall(its, vpe); -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); - } - } - -@@ -4540,6 +4536,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - struct its_node *its; - -+ /* Map the VPE to the first possible CPU */ -+ vpe->col_idx = cpumask_first(cpu_online_mask); -+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -+ - /* - * If we use the list map, we issue VMAPP on demand... Unless - * we're on a GICv4.1 and we eagerly map the VPE on all ITSs -@@ -4548,9 +4548,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - if (!gic_requires_eager_mapping()) - return 0; - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); -- - list_for_each_entry(its, &its_nodes, entry) { - if (!is_v4(its)) - continue; -@@ -4559,8 +4556,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - its_send_vinvall(its, vpe); - } - -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -- - return 0; - } - --- -2.43.0 - diff --git a/queue-5.15/series b/queue-5.15/series index 75dec7eee90..286c3936b03 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -157,7 +157,6 @@ btrfs-replace-bug_on-with-error-handling-at-update_r.patch riscv-set-trap-vector-earlier.patch pci-add-missing-bridge-lock-to-pci_bus_lock.patch net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch -irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch i3c-mipi-i3c-hci-error-out-instead-on-bug_on-in-ibi-.patch kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch btrfs-initialize-location-to-fix-wmaybe-uninitialize.patch diff --git a/queue-6.1/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-6.1/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch deleted file mode 100644 index b7c7381a005..00000000000 --- a/queue-6.1/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch +++ /dev/null @@ -1,83 +0,0 @@ -From 27fabf1b7f8e8f557257513651d8ea38634a1830 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 5 Jul 2024 10:31:53 +0100 -Subject: irqchip/gic-v4: Always configure affinity on VPE activation - -From: Marc Zyngier - -[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ] - -There are currently two paths to set the initial affinity of a VPE: - - - at activation time on GICv4 without the stupid VMOVP list, and - on GICv4.1 - - - at map time for GICv4 with VMOVP list - -The latter location may end-up modifying the affinity of VPE that is -currently running, making the results unpredictible. - -Instead, unify the two paths, making sure to set the initial affinity only -at activation time. - -Reported-by: Nianyao Tang -Signed-off-by: Marc Zyngier -Signed-off-by: Thomas Gleixner -Tested-by: Nianyao Tang -Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org -Signed-off-by: Sasha Levin ---- - drivers/irqchip/irq-gic-v3-its.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index a7a952bbfdc2..009e0fb43738 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -1800,13 +1800,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm) - - for (i = 0; i < vm->nr_vpes; i++) { - struct its_vpe *vpe = vm->vpes[i]; -- struct irq_data *d = irq_get_irq_data(vpe->irq); - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(its, vpe, true); - its_send_vinvall(its, vpe); -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); - } - } - -@@ -4525,6 +4521,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - struct its_node *its; - -+ /* Map the VPE to the first possible CPU */ -+ vpe->col_idx = cpumask_first(cpu_online_mask); -+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -+ - /* - * If we use the list map, we issue VMAPP on demand... Unless - * we're on a GICv4.1 and we eagerly map the VPE on all ITSs -@@ -4533,9 +4533,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - if (!gic_requires_eager_mapping()) - return 0; - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); -- - list_for_each_entry(its, &its_nodes, entry) { - if (!is_v4(its)) - continue; -@@ -4544,8 +4541,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - its_send_vinvall(its, vpe); - } - -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -- - return 0; - } - --- -2.43.0 - diff --git a/queue-6.1/series b/queue-6.1/series index a9c85d822f5..bafb71751b7 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -113,7 +113,6 @@ riscv-set-trap-vector-earlier.patch pci-add-missing-bridge-lock-to-pci_bus_lock.patch tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch -irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch i3c-mipi-i3c-hci-error-out-instead-on-bug_on-in-ibi-.patch kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch diff --git a/queue-6.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-6.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch deleted file mode 100644 index c61db338fa9..00000000000 --- a/queue-6.10/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch +++ /dev/null @@ -1,83 +0,0 @@ -From c0ddf48dd19c4b48eacd886bde59c426bc9fcc0e Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 5 Jul 2024 10:31:53 +0100 -Subject: irqchip/gic-v4: Always configure affinity on VPE activation - -From: Marc Zyngier - -[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ] - -There are currently two paths to set the initial affinity of a VPE: - - - at activation time on GICv4 without the stupid VMOVP list, and - on GICv4.1 - - - at map time for GICv4 with VMOVP list - -The latter location may end-up modifying the affinity of VPE that is -currently running, making the results unpredictible. - -Instead, unify the two paths, making sure to set the initial affinity only -at activation time. - -Reported-by: Nianyao Tang -Signed-off-by: Marc Zyngier -Signed-off-by: Thomas Gleixner -Tested-by: Nianyao Tang -Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org -Signed-off-by: Sasha Levin ---- - drivers/irqchip/irq-gic-v3-its.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index 3c755d5dad6e..a00c5e8c4ea6 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -1809,13 +1809,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm) - - for (i = 0; i < vm->nr_vpes; i++) { - struct its_vpe *vpe = vm->vpes[i]; -- struct irq_data *d = irq_get_irq_data(vpe->irq); - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(its, vpe, true); - its_send_vinvall(its, vpe); -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); - } - } - -@@ -4562,6 +4558,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - struct its_node *its; - -+ /* Map the VPE to the first possible CPU */ -+ vpe->col_idx = cpumask_first(cpu_online_mask); -+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -+ - /* - * If we use the list map, we issue VMAPP on demand... Unless - * we're on a GICv4.1 and we eagerly map the VPE on all ITSs -@@ -4570,9 +4570,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - if (!gic_requires_eager_mapping()) - return 0; - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); -- - list_for_each_entry(its, &its_nodes, entry) { - if (!is_v4(its)) - continue; -@@ -4581,8 +4578,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - its_send_vinvall(its, vpe); - } - -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -- - return 0; - } - --- -2.43.0 - diff --git a/queue-6.10/series b/queue-6.10/series index bba08c3b5e5..e3848c4ed2b 100644 --- a/queue-6.10/series +++ b/queue-6.10/series @@ -250,7 +250,6 @@ pci-add-missing-bridge-lock-to-pci_bus_lock.patch tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch bluetooth-btnxpuart-fix-null-pointer-dereference-in-.patch net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch -irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch drm-amdgpu-add-mutex-to-protect-ras-shared-memory.patch loongarch-use-correct-api-to-map-cmdline-in-relocate.patch diff --git a/queue-6.6/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch b/queue-6.6/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch deleted file mode 100644 index 7689431dadd..00000000000 --- a/queue-6.6/irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch +++ /dev/null @@ -1,83 +0,0 @@ -From b7b4c01a30352c370769a1966022a48d74d5d226 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 5 Jul 2024 10:31:53 +0100 -Subject: irqchip/gic-v4: Always configure affinity on VPE activation - -From: Marc Zyngier - -[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ] - -There are currently two paths to set the initial affinity of a VPE: - - - at activation time on GICv4 without the stupid VMOVP list, and - on GICv4.1 - - - at map time for GICv4 with VMOVP list - -The latter location may end-up modifying the affinity of VPE that is -currently running, making the results unpredictible. - -Instead, unify the two paths, making sure to set the initial affinity only -at activation time. - -Reported-by: Nianyao Tang -Signed-off-by: Marc Zyngier -Signed-off-by: Thomas Gleixner -Tested-by: Nianyao Tang -Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org -Signed-off-by: Sasha Levin ---- - drivers/irqchip/irq-gic-v3-its.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index 350abbb36e04..e25dea0e50c7 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -1803,13 +1803,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm) - - for (i = 0; i < vm->nr_vpes; i++) { - struct its_vpe *vpe = vm->vpes[i]; -- struct irq_data *d = irq_get_irq_data(vpe->irq); - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(its, vpe, true); - its_send_vinvall(its, vpe); -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); - } - } - -@@ -4551,6 +4547,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - struct its_node *its; - -+ /* Map the VPE to the first possible CPU */ -+ vpe->col_idx = cpumask_first(cpu_online_mask); -+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -+ - /* - * If we use the list map, we issue VMAPP on demand... Unless - * we're on a GICv4.1 and we eagerly map the VPE on all ITSs -@@ -4559,9 +4559,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - if (!gic_requires_eager_mapping()) - return 0; - -- /* Map the VPE to the first possible CPU */ -- vpe->col_idx = cpumask_first(cpu_online_mask); -- - list_for_each_entry(its, &its_nodes, entry) { - if (!is_v4(its)) - continue; -@@ -4570,8 +4567,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, - its_send_vinvall(its, vpe); - } - -- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); -- - return 0; - } - --- -2.43.0 - diff --git a/queue-6.6/series b/queue-6.6/series index b060356498e..fa1d8fa40d1 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -172,7 +172,6 @@ pci-add-missing-bridge-lock-to-pci_bus_lock.patch tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch bluetooth-btnxpuart-fix-null-pointer-dereference-in-.patch net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch -irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch loongarch-use-correct-api-to-map-cmdline-in-relocate.patch regmap-maple-work-around-gcc-14.1-false-positive-war.patch -- 2.47.3