+++ /dev/null
-From 8074f0b4bfda80c2cbf87ef1106a1e5fe4d10109 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
- on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index a9469751720c..b30319c65e7f 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1789,13 +1789,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-
- for (i = 0; i < vm->nr_vpes; i++) {
- struct its_vpe *vpe = vm->vpes[i];
-- struct irq_data *d = irq_get_irq_data(vpe->irq);
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
- its_send_vinvall(its, vpe);
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
- }
- }
-
-@@ -4518,6 +4514,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct its_node *its;
-
-+ /* Map the VPE to the first possible CPU */
-+ vpe->col_idx = cpumask_first(cpu_online_mask);
-+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
- /*
- * If we use the list map, we issue VMAPP on demand... Unless
- * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4526,9 +4526,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- if (!gic_requires_eager_mapping())
- return 0;
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
--
- list_for_each_entry(its, &its_nodes, entry) {
- if (!is_v4(its))
- continue;
-@@ -4537,8 +4534,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- its_send_vinvall(its, vpe);
- }
-
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
- return 0;
- }
-
---
-2.43.0
-
btrfs-clean-up-our-handling-of-refs-0-in-snapshot-de.patch
pci-add-missing-bridge-lock-to-pci_bus_lock.patch
net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch
btrfs-initialize-location-to-fix-wmaybe-uninitialize.patch
s390-vmlinux.lds.s-move-ro_after_init-section-behind.patch
+++ /dev/null
-From 4ab6b642d1181207b6203dac042c7ac818534f3e Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
- on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index 3fa6c7184326..b346753135e0 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1794,13 +1794,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-
- for (i = 0; i < vm->nr_vpes; i++) {
- struct its_vpe *vpe = vm->vpes[i];
-- struct irq_data *d = irq_get_irq_data(vpe->irq);
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
- its_send_vinvall(its, vpe);
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
- }
- }
-
-@@ -4540,6 +4536,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct its_node *its;
-
-+ /* Map the VPE to the first possible CPU */
-+ vpe->col_idx = cpumask_first(cpu_online_mask);
-+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
- /*
- * If we use the list map, we issue VMAPP on demand... Unless
- * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4548,9 +4548,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- if (!gic_requires_eager_mapping())
- return 0;
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
--
- list_for_each_entry(its, &its_nodes, entry) {
- if (!is_v4(its))
- continue;
-@@ -4559,8 +4556,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- its_send_vinvall(its, vpe);
- }
-
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
- return 0;
- }
-
---
-2.43.0
-
riscv-set-trap-vector-earlier.patch
pci-add-missing-bridge-lock-to-pci_bus_lock.patch
net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
i3c-mipi-i3c-hci-error-out-instead-on-bug_on-in-ibi-.patch
kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch
btrfs-initialize-location-to-fix-wmaybe-uninitialize.patch
+++ /dev/null
-From 27fabf1b7f8e8f557257513651d8ea38634a1830 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
- on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index a7a952bbfdc2..009e0fb43738 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1800,13 +1800,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-
- for (i = 0; i < vm->nr_vpes; i++) {
- struct its_vpe *vpe = vm->vpes[i];
-- struct irq_data *d = irq_get_irq_data(vpe->irq);
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
- its_send_vinvall(its, vpe);
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
- }
- }
-
-@@ -4525,6 +4521,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct its_node *its;
-
-+ /* Map the VPE to the first possible CPU */
-+ vpe->col_idx = cpumask_first(cpu_online_mask);
-+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
- /*
- * If we use the list map, we issue VMAPP on demand... Unless
- * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4533,9 +4533,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- if (!gic_requires_eager_mapping())
- return 0;
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
--
- list_for_each_entry(its, &its_nodes, entry) {
- if (!is_v4(its))
- continue;
-@@ -4544,8 +4541,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- its_send_vinvall(its, vpe);
- }
-
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
- return 0;
- }
-
---
-2.43.0
-
pci-add-missing-bridge-lock-to-pci_bus_lock.patch
tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch
net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch
i3c-mipi-i3c-hci-error-out-instead-on-bug_on-in-ibi-.patch
kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch
+++ /dev/null
-From c0ddf48dd19c4b48eacd886bde59c426bc9fcc0e Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
- on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index 3c755d5dad6e..a00c5e8c4ea6 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1809,13 +1809,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-
- for (i = 0; i < vm->nr_vpes; i++) {
- struct its_vpe *vpe = vm->vpes[i];
-- struct irq_data *d = irq_get_irq_data(vpe->irq);
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
- its_send_vinvall(its, vpe);
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
- }
- }
-
-@@ -4562,6 +4558,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct its_node *its;
-
-+ /* Map the VPE to the first possible CPU */
-+ vpe->col_idx = cpumask_first(cpu_online_mask);
-+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
- /*
- * If we use the list map, we issue VMAPP on demand... Unless
- * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4570,9 +4570,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- if (!gic_requires_eager_mapping())
- return 0;
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
--
- list_for_each_entry(its, &its_nodes, entry) {
- if (!is_v4(its))
- continue;
-@@ -4581,8 +4578,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- its_send_vinvall(its, vpe);
- }
-
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
- return 0;
- }
-
---
-2.43.0
-
tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch
bluetooth-btnxpuart-fix-null-pointer-dereference-in-.patch
net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch
drm-amdgpu-add-mutex-to-protect-ras-shared-memory.patch
loongarch-use-correct-api-to-map-cmdline-in-relocate.patch
+++ /dev/null
-From b7b4c01a30352c370769a1966022a48d74d5d226 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 5 Jul 2024 10:31:53 +0100
-Subject: irqchip/gic-v4: Always configure affinity on VPE activation
-
-From: Marc Zyngier <maz@kernel.org>
-
-[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
-
-There are currently two paths to set the initial affinity of a VPE:
-
- - at activation time on GICv4 without the stupid VMOVP list, and
- on GICv4.1
-
- - at map time for GICv4 with VMOVP list
-
-The latter location may end-up modifying the affinity of VPE that is
-currently running, making the results unpredictible.
-
-Instead, unify the two paths, making sure to set the initial affinity only
-at activation time.
-
-Reported-by: Nianyao Tang <tangnianyao@huawei.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Nianyao Tang <tangnianyao@huawei.com>
-Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
- 1 file changed, 4 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index 350abbb36e04..e25dea0e50c7 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -1803,13 +1803,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
-
- for (i = 0; i < vm->nr_vpes; i++) {
- struct its_vpe *vpe = vm->vpes[i];
-- struct irq_data *d = irq_get_irq_data(vpe->irq);
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
- its_send_vinvall(its, vpe);
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
- }
- }
-
-@@ -4551,6 +4547,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct its_node *its;
-
-+ /* Map the VPE to the first possible CPU */
-+ vpe->col_idx = cpumask_first(cpu_online_mask);
-+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-+
- /*
- * If we use the list map, we issue VMAPP on demand... Unless
- * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
-@@ -4559,9 +4559,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- if (!gic_requires_eager_mapping())
- return 0;
-
-- /* Map the VPE to the first possible CPU */
-- vpe->col_idx = cpumask_first(cpu_online_mask);
--
- list_for_each_entry(its, &its_nodes, entry) {
- if (!is_v4(its))
- continue;
-@@ -4570,8 +4567,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- its_send_vinvall(its, vpe);
- }
-
-- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
--
- return 0;
- }
-
---
-2.43.0
-
tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch
bluetooth-btnxpuart-fix-null-pointer-dereference-in-.patch
net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
-irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch
loongarch-use-correct-api-to-map-cmdline-in-relocate.patch
regmap-maple-work-around-gcc-14.1-false-positive-war.patch