--- /dev/null
+From 233323f9b9f828cd7cd5145ad811c1990b692542 Mon Sep 17 00:00:00 2001
+From: Kuan-Wei Chiu <visitorckw@gmail.com>
+Date: Tue, 2 Jul 2024 04:56:39 +0800
+Subject: ACPI: processor_idle: Fix invalid comparison with insertion sort for latency
+
+From: Kuan-Wei Chiu <visitorckw@gmail.com>
+
+commit 233323f9b9f828cd7cd5145ad811c1990b692542 upstream.
+
+The acpi_cst_latency_cmp() comparison function currently used for
+sorting C-state latencies does not satisfy transitivity, causing
+incorrect sorting results.
+
+Specifically, if there are two valid acpi_processor_cx elements A and B
+and one invalid element C, it may occur that A < B, A = C, and B = C.
+Sorting algorithms assume that if A < B and A = C, then C < B, leading
+to incorrect ordering.
+
+Given the small size of the array (<=8), we replace the library sort
+function with a simple insertion sort that properly ignores invalid
+elements and sorts valid ones based on latency. This change ensures
+correct ordering of the C-state latencies.
+
+Fixes: 65ea8f2c6e23 ("ACPI: processor idle: Fix up C-state latency if not ordered")
+Reported-by: Julian Sikorski <belegdol@gmail.com>
+Closes: https://lore.kernel.org/lkml/70674dc7-5586-4183-8953-8095567e73df@gmail.com
+Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com>
+Tested-by: Julian Sikorski <belegdol@gmail.com>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20240701205639.117194-1-visitorckw@gmail.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/processor_idle.c | 37 ++++++++++++++++---------------------
+ 1 file changed, 16 insertions(+), 21 deletions(-)
+
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -16,7 +16,6 @@
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/sched.h> /* need_resched() */
+-#include <linux/sort.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpu.h>
+@@ -386,25 +385,24 @@ static void acpi_processor_power_verify_
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
+ }
+
+-static int acpi_cst_latency_cmp(const void *a, const void *b)
++static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
+ {
+- const struct acpi_processor_cx *x = a, *y = b;
++ int i, j, k;
+
+- if (!(x->valid && y->valid))
+- return 0;
+- if (x->latency > y->latency)
+- return 1;
+- if (x->latency < y->latency)
+- return -1;
+- return 0;
+-}
+-static void acpi_cst_latency_swap(void *a, void *b, int n)
+-{
+- struct acpi_processor_cx *x = a, *y = b;
++ for (i = 1; i < length; i++) {
++ if (!states[i].valid)
++ continue;
+
+- if (!(x->valid && y->valid))
+- return;
+- swap(x->latency, y->latency);
++ for (j = i - 1, k = i; j >= 0; j--) {
++ if (!states[j].valid)
++ continue;
++
++ if (states[j].latency > states[k].latency)
++ swap(states[j].latency, states[k].latency);
++
++ k = j;
++ }
++ }
+ }
+
+ static int acpi_processor_power_verify(struct acpi_processor *pr)
+@@ -449,10 +447,7 @@ static int acpi_processor_power_verify(s
+
+ if (buggy_latency) {
+ pr_notice("FW issue: working around C-state latencies out of order\n");
+- sort(&pr->power.states[1], max_cstate,
+- sizeof(struct acpi_processor_cx),
+- acpi_cst_latency_cmp,
+- acpi_cst_latency_swap);
++ acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
+ }
+
+ lapic_timer_propagate_broadcast(pr);
--- /dev/null
+From d92467ad9d9ee63a700934b9228a989ef671d511 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed, 26 Jun 2024 15:47:23 -0500
+Subject: cpufreq: ACPI: Mark boost policy as enabled when setting boost
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit d92467ad9d9ee63a700934b9228a989ef671d511 upstream.
+
+When boost is set for CPUs using acpi-cpufreq, the policy is not
+updated which can cause boost to be incorrectly not reported.
+
+Fixes: 218a06a79d9a ("cpufreq: Support per-policy performance boost")
+Link: https://patch.msgid.link/20240626204723.6237-2-mario.limonciello@amd.com
+Suggested-by: Viresh Kumar <viresh.kumar@linaro.org>
+Suggested-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/acpi-cpufreq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 37f1cdf46d29..4ac3a35dcd98 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -890,8 +890,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
+
+- if (acpi_cpufreq_driver.set_boost)
++ if (acpi_cpufreq_driver.set_boost) {
+ set_boost(policy, acpi_cpufreq_driver.boost_enabled);
++ policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
++ }
+
+ return result;
+
+--
+2.45.2
+
--- /dev/null
+From 102fa9c4b439ca3bd93d13fb53f5b7592d96a109 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed, 26 Jun 2024 15:47:22 -0500
+Subject: cpufreq: Allow drivers to advertise boost enabled
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 102fa9c4b439ca3bd93d13fb53f5b7592d96a109 upstream.
+
+The behavior introduced in commit f37a4d6b4a2c ("cpufreq: Fix per-policy
+boost behavior on SoCs using cpufreq_boost_set_sw()") sets up the boost
+policy incorrectly when boost has been enabled by the platform firmware
+initially even if a driver sets the policy up.
+
+This is because policy_has_boost_freq() assumes that there is a frequency
+table set up by the driver and that the boost frequencies are advertised
+in that table. This assumption doesn't work for acpi-cpufreq or
+amd-pstate. Only use this check to enable boost if it's not already
+enabled instead of also disabling it if alreayd enabled.
+
+Fixes: f37a4d6b4a2c ("cpufreq: Fix per-policy boost behavior on SoCs using cpufreq_boost_set_sw()")
+Link: https://patch.msgid.link/20240626204723.6237-1-mario.limonciello@amd.com
+Reviewed-by: Sibi Sankar <quic_sibis@quicinc.com>
+Reviewed-by: Dhruva Gole <d-gole@ti.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
+Suggested-by: Viresh Kumar <viresh.kumar@linaro.org>
+Suggested-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cpufreq.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1422,7 +1422,8 @@ static int cpufreq_online(unsigned int c
+ }
+
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+- policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
++ if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
++ policy->boost_enabled = true;
+
+ /*
+ * The initialization has succeeded and the policy is online.
--- /dev/null
+From 1723f04caacb32cadc4e063725d836a0c4450694 Mon Sep 17 00:00:00 2001
+From: Audra Mitchell <audra@redhat.com>
+Date: Wed, 26 Jun 2024 09:05:11 -0400
+Subject: Fix userfaultfd_api to return EINVAL as expected
+
+From: Audra Mitchell <audra@redhat.com>
+
+commit 1723f04caacb32cadc4e063725d836a0c4450694 upstream.
+
+Currently if we request a feature that is not set in the Kernel config we
+fail silently and return all the available features. However, the man
+page indicates we should return an EINVAL.
+
+We need to fix this issue since we can end up with a Kernel warning should
+a program request the feature UFFD_FEATURE_WP_UNPOPULATED on a kernel with
+the config not set with this feature.
+
+ [ 200.812896] WARNING: CPU: 91 PID: 13634 at mm/memory.c:1660 zap_pte_range+0x43d/0x660
+ [ 200.820738] Modules linked in:
+ [ 200.869387] CPU: 91 PID: 13634 Comm: userfaultfd Kdump: loaded Not tainted 6.9.0-rc5+ #8
+ [ 200.877477] Hardware name: Dell Inc. PowerEdge R6525/0N7YGH, BIOS 2.7.3 03/30/2022
+ [ 200.885052] RIP: 0010:zap_pte_range+0x43d/0x660
+
+Link: https://lkml.kernel.org/r/20240626130513.120193-1-audra@redhat.com
+Fixes: e06f1e1dd499 ("userfaultfd: wp: enabled write protection in userfaultfd API")
+Signed-off-by: Audra Mitchell <audra@redhat.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Rafael Aquini <raquini@redhat.com>
+Cc: Shaohua Li <shli@fb.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/userfaultfd.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -2050,7 +2050,7 @@ static int userfaultfd_api(struct userfa
+ goto out;
+ features = uffdio_api.features;
+ ret = -EINVAL;
+- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++ if (uffdio_api.api != UFFD_API)
+ goto err_out;
+ ret = -EPERM;
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+@@ -2068,6 +2068,11 @@ static int userfaultfd_api(struct userfa
+ uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
+ uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
+ #endif
++
++ ret = -EINVAL;
++ if (features & ~uffdio_api.features)
++ goto err_out;
++
+ uffdio_api.ioctls = UFFD_API_IOCTLS;
+ ret = -EFAULT;
+ if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
--- /dev/null
+From 69c7b2fe4c9cc1d3b1186d1c5606627ecf0de883 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Mon, 8 Jul 2024 22:37:29 +0200
+Subject: libceph: fix race between delayed_work() and ceph_monc_stop()
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 69c7b2fe4c9cc1d3b1186d1c5606627ecf0de883 upstream.
+
+The way the delayed work is handled in ceph_monc_stop() is prone to
+races with mon_fault() and possibly also finish_hunting(). Both of
+these can requeue the delayed work which wouldn't be canceled by any of
+the following code in case that happens after cancel_delayed_work_sync()
+runs -- __close_session() doesn't mess with the delayed work in order
+to avoid interfering with the hunting interval logic. This part was
+missed in commit b5d91704f53e ("libceph: behave in mon_fault() if
+cur_mon < 0") and use-after-free can still ensue on monc and objects
+that hang off of it, with monc->auth and monc->monmap being
+particularly susceptible to quickly being reused.
+
+To fix this:
+
+- clear monc->cur_mon and monc->hunting as part of closing the session
+ in ceph_monc_stop()
+- bail from delayed_work() if monc->cur_mon is cleared, similar to how
+ it's done in mon_fault() and finish_hunting() (based on monc->hunting)
+- call cancel_delayed_work_sync() after the session is closed
+
+Cc: stable@vger.kernel.org
+Link: https://tracker.ceph.com/issues/66857
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Xiubo Li <xiubli@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/mon_client.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_str
+ struct ceph_mon_client *monc =
+ container_of(work, struct ceph_mon_client, delayed_work.work);
+
+- dout("monc delayed_work\n");
+ mutex_lock(&monc->mutex);
++ dout("%s mon%d\n", __func__, monc->cur_mon);
++ if (monc->cur_mon < 0) {
++ goto out;
++ }
++
+ if (monc->hunting) {
+ dout("%s continuing hunt\n", __func__);
+ reopen_session(monc);
+ } else {
+ int is_auth = ceph_auth_is_authenticated(monc->auth);
++
++ dout("%s is_authed %d\n", __func__, is_auth);
+ if (ceph_con_keepalive_expired(&monc->con,
+ CEPH_MONC_PING_TIMEOUT)) {
+ dout("monc keepalive timeout\n");
+@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_str
+ }
+ }
+ __schedule_delayed(monc);
++
++out:
+ mutex_unlock(&monc->mutex);
+ }
+
+@@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init);
+ void ceph_monc_stop(struct ceph_mon_client *monc)
+ {
+ dout("stop\n");
+- cancel_delayed_work_sync(&monc->delayed_work);
+
+ mutex_lock(&monc->mutex);
+ __close_session(monc);
++ monc->hunting = false;
+ monc->cur_mon = -1;
+ mutex_unlock(&monc->mutex);
+
++ cancel_delayed_work_sync(&monc->delayed_work);
++
+ /*
+ * flush msgr queue before we destroy ourselves to ensure that:
+ * - any work that references our embedded con is finished.
--- /dev/null
+From bfb6b07d2a30ffe98864d8cfc31fc00470063025 Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Fri, 28 Jun 2024 12:44:58 +0100
+Subject: misc: fastrpc: Avoid updating PD type for capability request
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit bfb6b07d2a30ffe98864d8cfc31fc00470063025 upstream.
+
+When user is requesting for DSP capability, the process pd type is
+getting updated to USER_PD which is incorrect as DSP will assume the
+process which is making the request is a user PD and this will never
+get updated back to the original value. The actual PD type should not
+be updated for capability request and it should be serviced by the
+respective PD on DSP side. Don't change process's PD type for DSP
+capability request.
+
+Fixes: 6c16fd8bdd40 ("misc: fastrpc: Add support to get DSP capabilities")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Reviewed-by: Caleb Connolly <caleb.connolly@linaro.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://lore.kernel.org/r/20240628114501.14310-4-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1707,7 +1707,6 @@ static int fastrpc_get_info_from_dsp(str
+ args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
+ args[1].length = dsp_attr_buf_len * sizeof(u32);
+ args[1].fd = -1;
+- fl->pd = USER_PD;
+
+ return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
+ FASTRPC_SCALARS(0, 1, 1), args);
--- /dev/null
+From e7f0be3f09c6e955dc8009129862b562d8b64513 Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Fri, 28 Jun 2024 12:44:57 +0100
+Subject: misc: fastrpc: Copy the complete capability structure to user
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit e7f0be3f09c6e955dc8009129862b562d8b64513 upstream.
+
+User is passing capability ioctl structure(argp) to get DSP
+capabilities. This argp is copied to a local structure to get domain
+and attribute_id information. After getting the capability, only
+capability value is getting copied to user argp which will not be
+useful if the use is trying to get the capability by checking the
+capability member of fastrpc_ioctl_capability structure. Copy the
+complete capability structure so that user can get the capability
+value from the expected member of the structure.
+
+Fixes: 6c16fd8bdd40 ("misc: fastrpc: Add support to get DSP capabilities")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Caleb Connolly <caleb.connolly@linaro.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20240628114501.14310-3-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1787,7 +1787,7 @@ static int fastrpc_get_dsp_info(struct f
+ if (err)
+ return err;
+
+- if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
++ if (copy_to_user(argp, &cap, sizeof(cap)))
+ return -EFAULT;
+
+ return 0;
--- /dev/null
+From 4cb7915f0a35e2fcc4be60b912c4be35cd830957 Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Fri, 28 Jun 2024 12:44:56 +0100
+Subject: misc: fastrpc: Fix DSP capabilities request
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit 4cb7915f0a35e2fcc4be60b912c4be35cd830957 upstream.
+
+The DSP capability request call expects 2 arguments. First is the
+information about the total number of attributes to be copied from
+DSP and second is the information about the buffer where the DSP
+needs to copy the information. The current design is passing the
+information about the size to be copied from DSP which would be
+considered as a bad argument to the call by DSP causing a failure
+suggesting the same. The second argument carries the information
+about the buffer where the DSP needs to copy the capability
+information and the size to be copied. As the first entry of
+capability attribute is getting skipped, same should also be
+considered while sending the information to DSP. Add changes to
+pass proper arguments to DSP.
+
+Fixes: 6c16fd8bdd40 ("misc: fastrpc: Add support to get DSP capabilities")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Caleb Connolly <caleb.connolly@linaro.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20240628114501.14310-2-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1693,14 +1693,19 @@ static int fastrpc_get_info_from_dsp(str
+ {
+ struct fastrpc_invoke_args args[2] = { 0 };
+
+- /* Capability filled in userspace */
++ /*
++ * Capability filled in userspace. This carries the information
++ * about the remoteproc support which is fetched from the remoteproc
++ * sysfs node by userspace.
++ */
+ dsp_attr_buf[0] = 0;
++ dsp_attr_buf_len -= 1;
+
+ args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
+ args[0].length = sizeof(dsp_attr_buf_len);
+ args[0].fd = -1;
+ args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
+- args[1].length = dsp_attr_buf_len;
++ args[1].length = dsp_attr_buf_len * sizeof(u32);
+ args[1].fd = -1;
+ fl->pd = USER_PD;
+
+@@ -1730,7 +1735,7 @@ static int fastrpc_get_info_from_kernel(
+ if (!dsp_attributes)
+ return -ENOMEM;
+
+- err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
++ err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
+ if (err == DSP_UNSUPPORTED_API) {
+ dev_info(&cctx->rpdev->dev,
+ "Warning: DSP capabilities not supported on domain: %d\n", domain);
--- /dev/null
+From ad0bd973a033003ca578c42a760d1dc77aeea15e Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Fri, 28 Jun 2024 12:44:59 +0100
+Subject: misc: fastrpc: Fix memory leak in audio daemon attach operation
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit ad0bd973a033003ca578c42a760d1dc77aeea15e upstream.
+
+Audio PD daemon send the name as part of the init IOCTL call. This
+name needs to be copied to kernel for which memory is allocated.
+This memory is never freed which might result in memory leak. Free
+the memory when it is not needed.
+
+Fixes: 0871561055e6 ("misc: fastrpc: Add support for audiopd")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20240628114501.14310-5-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1320,6 +1320,7 @@ static int fastrpc_init_create_static_pr
+ goto err_invoke;
+
+ kfree(args);
++ kfree(name);
+
+ return 0;
+ err_invoke:
--- /dev/null
+From a6f2f158f1ac4893a4967993105712bf3dad32d9 Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Fri, 28 Jun 2024 12:45:00 +0100
+Subject: misc: fastrpc: Fix ownership reassignment of remote heap
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit a6f2f158f1ac4893a4967993105712bf3dad32d9 upstream.
+
+Audio PD daemon will allocate memory for audio PD dynamic loading
+usage when it is attaching for the first time to audio PD. As
+part of this, the memory ownership is moved to the VM where
+audio PD can use it. In case daemon process is killed without any
+impact to DSP audio PD, the daemon process will retry to attach to
+audio PD and in this case memory won't be reallocated. If the invoke
+fails due to any reason, as part of err_invoke, the memory ownership
+is getting reassigned to HLOS even when the memory was not allocated.
+At this time the audio PD might still be using the memory and an
+attemp of ownership reassignment would result in memory issue.
+
+Fixes: 0871561055e6 ("misc: fastrpc: Add support for audiopd")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20240628114501.14310-6-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1238,6 +1238,7 @@ static int fastrpc_init_create_static_pr
+ struct fastrpc_phy_page pages[1];
+ char *name;
+ int err;
++ bool scm_done = false;
+ struct {
+ int pgid;
+ u32 namelen;
+@@ -1289,6 +1290,7 @@ static int fastrpc_init_create_static_pr
+ fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
+ goto err_map;
+ }
++ scm_done = true;
+ }
+ }
+
+@@ -1324,7 +1326,7 @@ static int fastrpc_init_create_static_pr
+
+ return 0;
+ err_invoke:
+- if (fl->cctx->vmcount) {
++ if (fl->cctx->vmcount && scm_done) {
+ u64 src_perms = 0;
+ struct qcom_scm_vmperm dst_perms;
+ u32 i;
--- /dev/null
+From bab2f5e8fd5d2f759db26b78d9db57412888f187 Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Fri, 28 Jun 2024 12:45:01 +0100
+Subject: misc: fastrpc: Restrict untrusted app to attach to privileged PD
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit bab2f5e8fd5d2f759db26b78d9db57412888f187 upstream.
+
+Untrusted application with access to only non-secure fastrpc device
+node can attach to root_pd or static PDs if it can make the respective
+init request. This can cause problems as the untrusted application
+can send bad requests to root_pd or static PDs. Add changes to reject
+attach to privileged PDs if the request is being made using non-secure
+fastrpc device node.
+
+Fixes: 0871561055e6 ("misc: fastrpc: Add support for audiopd")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20240628114501.14310-7-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 22 +++++++++++++++++++---
+ include/uapi/misc/fastrpc.h | 3 +++
+ 2 files changed, 22 insertions(+), 3 deletions(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -2087,6 +2087,16 @@ err_invoke:
+ return err;
+ }
+
++static int is_attach_rejected(struct fastrpc_user *fl)
++{
++ /* Check if the device node is non-secure */
++ if (!fl->is_secure_dev) {
++ dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n");
++ return -EACCES;
++ }
++ return 0;
++}
++
+ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+ {
+@@ -2099,13 +2109,19 @@ static long fastrpc_device_ioctl(struct
+ err = fastrpc_invoke(fl, argp);
+ break;
+ case FASTRPC_IOCTL_INIT_ATTACH:
+- err = fastrpc_init_attach(fl, ROOT_PD);
++ err = is_attach_rejected(fl);
++ if (!err)
++ err = fastrpc_init_attach(fl, ROOT_PD);
+ break;
+ case FASTRPC_IOCTL_INIT_ATTACH_SNS:
+- err = fastrpc_init_attach(fl, SENSORS_PD);
++ err = is_attach_rejected(fl);
++ if (!err)
++ err = fastrpc_init_attach(fl, SENSORS_PD);
+ break;
+ case FASTRPC_IOCTL_INIT_CREATE_STATIC:
+- err = fastrpc_init_create_static_process(fl, argp);
++ err = is_attach_rejected(fl);
++ if (!err)
++ err = fastrpc_init_create_static_process(fl, argp);
+ break;
+ case FASTRPC_IOCTL_INIT_CREATE:
+ err = fastrpc_init_create_process(fl, argp);
+--- a/include/uapi/misc/fastrpc.h
++++ b/include/uapi/misc/fastrpc.h
+@@ -8,11 +8,14 @@
+ #define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
+ #define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32)
+ #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
++/* This ioctl is only supported with secure device nodes */
+ #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
+ #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
+ #define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
+ #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
++/* This ioctl is only supported with secure device nodes */
+ #define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8)
++/* This ioctl is only supported with secure device nodes */
+ #define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static)
+ #define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map)
+ #define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap)
--- /dev/null
+From 310d6c15e9104c99d5d9d0ff8e5383a79da7d5e6 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Mon, 24 Jun 2024 10:58:14 -0700
+Subject: mm/damon/core: merge regions aggressively when max_nr_regions is unmet
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 310d6c15e9104c99d5d9d0ff8e5383a79da7d5e6 upstream.
+
+DAMON keeps the number of regions under max_nr_regions by skipping regions
+split operations when doing so can make the number higher than the limit.
+It works well for preventing violation of the limit. But, if somehow the
+violation happens, it cannot recovery well depending on the situation. In
+detail, if the real number of regions having different access pattern is
+higher than the limit, the mechanism cannot reduce the number below the
+limit. In such a case, the system could suffer from high monitoring
+overhead of DAMON.
+
+The violation can actually happen. For an example, the user could reduce
+max_nr_regions while DAMON is running, to be lower than the current number
+of regions. Fix the problem by repeating the merge operations with
+increasing aggressiveness in kdamond_merge_regions() for the case, until
+the limit is met.
+
+[sj@kernel.org: increase regions merge aggressiveness while respecting min_nr_regions]
+ Link: https://lkml.kernel.org/r/20240626164753.46270-1-sj@kernel.org
+[sj@kernel.org: ensure max threshold attempt for max_nr_regions violation]
+ Link: https://lkml.kernel.org/r/20240627163153.75969-1-sj@kernel.org
+Link: https://lkml.kernel.org/r/20240624175814.89611-1-sj@kernel.org
+Fixes: b9a6ac4e4ede ("mm/damon: adaptively adjust regions")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [5.15+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1121,14 +1121,31 @@ static void damon_merge_regions_of(struc
+ * access frequencies are similar. This is for minimizing the monitoring
+ * overhead under the dynamically changeable access pattern. If a merge was
+ * unnecessarily made, later 'kdamond_split_regions()' will revert it.
++ *
++ * The total number of regions could be higher than the user-defined limit,
++ * max_nr_regions for some cases. For example, the user can update
++ * max_nr_regions to a number that lower than the current number of regions
++ * while DAMON is running. For such a case, repeat merging until the limit is
++ * met while increasing @threshold up to possible maximum level.
+ */
+ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
+ unsigned long sz_limit)
+ {
+ struct damon_target *t;
++ unsigned int nr_regions;
++ unsigned int max_thres;
+
+- damon_for_each_target(t, c)
+- damon_merge_regions_of(t, threshold, sz_limit);
++ max_thres = c->attrs.aggr_interval /
++ (c->attrs.sample_interval ? c->attrs.sample_interval : 1);
++ do {
++ nr_regions = 0;
++ damon_for_each_target(t, c) {
++ damon_merge_regions_of(t, threshold, sz_limit);
++ nr_regions += damon_nr_regions(t);
++ }
++ threshold = max(1, threshold * 2);
++ } while (nr_regions > c->attrs.max_nr_regions &&
++ threshold / 2 < max_thres);
+ }
+
+ /*
--- /dev/null
+From 9fd154ba926b34c833b7bfc4c14ee2e931b3d743 Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gshan@redhat.com>
+Date: Thu, 27 Jun 2024 10:39:52 +1000
+Subject: mm/shmem: disable PMD-sized page cache if needed
+
+From: Gavin Shan <gshan@redhat.com>
+
+commit 9fd154ba926b34c833b7bfc4c14ee2e931b3d743 upstream.
+
+For shmem files, it's possible that PMD-sized page cache can't be
+supported by xarray. For example, 512MB page cache on ARM64 when the base
+page size is 64KB can't be supported by xarray. It leads to errors as the
+following messages indicate when this sort of xarray entry is split.
+
+WARNING: CPU: 34 PID: 7578 at lib/xarray.c:1025 xas_split_alloc+0xf8/0x128
+Modules linked in: binfmt_misc nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 \
+nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject \
+nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 \
+ip_set rfkill nf_tables nfnetlink vfat fat virtio_balloon drm fuse xfs \
+libcrc32c crct10dif_ce ghash_ce sha2_ce sha256_arm64 sha1_ce virtio_net \
+net_failover virtio_console virtio_blk failover dimlib virtio_mmio
+CPU: 34 PID: 7578 Comm: test Kdump: loaded Tainted: G W 6.10.0-rc5-gavin+ #9
+Hardware name: QEMU KVM Virtual Machine, BIOS edk2-20240524-1.el9 05/24/2024
+pstate: 83400005 (Nzcv daif +PAN -UAO +TCO +DIT -SSBS BTYPE=--)
+pc : xas_split_alloc+0xf8/0x128
+lr : split_huge_page_to_list_to_order+0x1c4/0x720
+sp : ffff8000882af5f0
+x29: ffff8000882af5f0 x28: ffff8000882af650 x27: ffff8000882af768
+x26: 0000000000000cc0 x25: 000000000000000d x24: ffff00010625b858
+x23: ffff8000882af650 x22: ffffffdfc0900000 x21: 0000000000000000
+x20: 0000000000000000 x19: ffffffdfc0900000 x18: 0000000000000000
+x17: 0000000000000000 x16: 0000018000000000 x15: 52f8004000000000
+x14: 0000e00000000000 x13: 0000000000002000 x12: 0000000000000020
+x11: 52f8000000000000 x10: 52f8e1c0ffff6000 x9 : ffffbeb9619a681c
+x8 : 0000000000000003 x7 : 0000000000000000 x6 : ffff00010b02ddb0
+x5 : ffffbeb96395e378 x4 : 0000000000000000 x3 : 0000000000000cc0
+x2 : 000000000000000d x1 : 000000000000000c x0 : 0000000000000000
+Call trace:
+ xas_split_alloc+0xf8/0x128
+ split_huge_page_to_list_to_order+0x1c4/0x720
+ truncate_inode_partial_folio+0xdc/0x160
+ shmem_undo_range+0x2bc/0x6a8
+ shmem_fallocate+0x134/0x430
+ vfs_fallocate+0x124/0x2e8
+ ksys_fallocate+0x4c/0xa0
+ __arm64_sys_fallocate+0x24/0x38
+ invoke_syscall.constprop.0+0x7c/0xd8
+ do_el0_svc+0xb4/0xd0
+ el0_svc+0x44/0x1d8
+ el0t_64_sync_handler+0x134/0x150
+ el0t_64_sync+0x17c/0x180
+
+Fix it by disabling PMD-sized page cache when HPAGE_PMD_ORDER is larger
+than MAX_PAGECACHE_ORDER. As Matthew Wilcox pointed, the page cache in a
+shmem file isn't represented by a multi-index entry and doesn't have this
+limitation when the xarry entry is split until commit 6b24ca4a1a8d ("mm:
+Use multi-index entries in the page cache").
+
+Link: https://lkml.kernel.org/r/20240627003953.1262512-5-gshan@redhat.com
+Fixes: 6b24ca4a1a8d ("mm: Use multi-index entries in the page cache")
+Signed-off-by: Gavin Shan <gshan@redhat.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Darrick J. Wong <djwong@kernel.org>
+Cc: Don Dutile <ddutile@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: William Kucharski <william.kucharski@oracle.com>
+Cc: Zhenyu Zhang <zhenyzha@redhat.com>
+Cc: <stable@vger.kernel.org> [5.17+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/shmem.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -535,8 +535,9 @@ static bool shmem_confirm_swap(struct ad
+
+ static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+
+-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+- struct mm_struct *mm, unsigned long vm_flags)
++static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct mm_struct *mm,
++ unsigned long vm_flags)
+ {
+ loff_t i_size;
+
+@@ -567,6 +568,16 @@ bool shmem_is_huge(struct inode *inode,
+ }
+ }
+
++bool shmem_is_huge(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct mm_struct *mm,
++ unsigned long vm_flags)
++{
++ if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
++ return false;
++
++ return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
++}
++
+ #if defined(CONFIG_SYSFS)
+ static int shmem_parse_huge(const char *str)
+ {
--- /dev/null
+From ddab91f4b2de5c5b46e312a90107d9353087d8ea Mon Sep 17 00:00:00 2001
+From: Taniya Das <quic_tdas@quicinc.com>
+Date: Tue, 25 Jun 2024 10:03:11 +0530
+Subject: pmdomain: qcom: rpmhpd: Skip retention level for Power Domains
+
+From: Taniya Das <quic_tdas@quicinc.com>
+
+commit ddab91f4b2de5c5b46e312a90107d9353087d8ea upstream.
+
+In the cases where the power domain connected to logics is allowed to
+transition from a level(L)-->power collapse(0)-->retention(1) or
+vice versa retention(1)-->power collapse(0)-->level(L) will cause the
+logic to lose the configurations. The ARC does not support retention
+to collapse transition on MxC rails.
+
+The targets from SM8450 onwards the PLL logics of clock controllers are
+connected to MxC rails and the recommended configurations are carried
+out during the clock controller probes. The MxC transition as mentioned
+above should be skipped to ensure the PLL settings are intact across
+clock controller power on & off.
+
+On older targets that do not split MX into MxA and MxC does not collapse
+the logic and it is parked always at RETENTION, thus this issue is never
+observed on those targets.
+
+Cc: stable@vger.kernel.org # v5.17
+Reviewed-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Taniya Das <quic_tdas@quicinc.com>
+Link: https://lore.kernel.org/r/20240625-avoid_mxc_retention-v2-1-af9c2f549a5f@quicinc.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pmdomain/qcom/rpmhpd.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/pmdomain/qcom/rpmhpd.c
++++ b/drivers/pmdomain/qcom/rpmhpd.c
+@@ -40,6 +40,7 @@
+ * @addr: Resource address as looped up using resource name from
+ * cmd-db
+ * @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource
++ * @skip_retention_level: Indicate that retention level should not be used for the power domain
+ */
+ struct rpmhpd {
+ struct device *dev;
+@@ -56,6 +57,7 @@ struct rpmhpd {
+ const char *res_name;
+ u32 addr;
+ bool state_synced;
++ bool skip_retention_level;
+ };
+
+ struct rpmhpd_desc {
+@@ -173,6 +175,7 @@ static struct rpmhpd mxc = {
+ .pd = { .name = "mxc", },
+ .peer = &mxc_ao,
+ .res_name = "mxc.lvl",
++ .skip_retention_level = true,
+ };
+
+ static struct rpmhpd mxc_ao = {
+@@ -180,6 +183,7 @@ static struct rpmhpd mxc_ao = {
+ .active_only = true,
+ .peer = &mxc,
+ .res_name = "mxc.lvl",
++ .skip_retention_level = true,
+ };
+
+ static struct rpmhpd nsp = {
+@@ -749,6 +753,9 @@ static int rpmhpd_update_level_mapping(s
+ return -EINVAL;
+
+ for (i = 0; i < rpmhpd->level_count; i++) {
++ if (rpmhpd->skip_retention_level && buf[i] == RPMH_REGULATOR_LEVEL_RETENTION)
++ continue;
++
+ rpmhpd->level[i] = buf[i];
+
+ /* Remember the first corner with non-zero level */
alsa-hda-realtek-add-quirk-for-clevo-v50tu.patch
alsa-hda-realtek-enable-mute-led-on-hp-250-g7.patch
alsa-hda-realtek-limit-mic-boost-on-vaio-pro-px.patch
+fix-userfaultfd_api-to-return-einval-as-expected.patch
+pmdomain-qcom-rpmhpd-skip-retention-level-for-power-domains.patch
+libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch
+acpi-processor_idle-fix-invalid-comparison-with-insertion-sort-for-latency.patch
+cpufreq-acpi-mark-boost-policy-as-enabled-when-setting-boost.patch
+cpufreq-allow-drivers-to-advertise-boost-enabled.patch
+wireguard-selftests-use-acpi-off-instead-of-no-acpi-for-recent-qemu.patch
+wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch
+wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch
+wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch
+misc-fastrpc-fix-dsp-capabilities-request.patch
+misc-fastrpc-avoid-updating-pd-type-for-capability-request.patch
+misc-fastrpc-copy-the-complete-capability-structure-to-user.patch
+misc-fastrpc-fix-memory-leak-in-audio-daemon-attach-operation.patch
+misc-fastrpc-fix-ownership-reassignment-of-remote-heap.patch
+misc-fastrpc-restrict-untrusted-app-to-attach-to-privileged-pd.patch
+mm-shmem-disable-pmd-sized-page-cache-if-needed.patch
+mm-damon-core-merge-regions-aggressively-when-max_nr_regions-is-unmet.patch
--- /dev/null
+From 948f991c62a4018fb81d85804eeab3029c6209f8 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@kernel.org>
+Date: Thu, 4 Jul 2024 17:45:15 +0200
+Subject: wireguard: allowedips: avoid unaligned 64-bit memory accesses
+
+From: Helge Deller <deller@kernel.org>
+
+commit 948f991c62a4018fb81d85804eeab3029c6209f8 upstream.
+
+On the parisc platform, the kernel issues kernel warnings because
+swap_endian() tries to load a 128-bit IPv6 address from an unaligned
+memory location:
+
+ Kernel: unaligned access to 0x55f4688c in wg_allowedips_insert_v6+0x2c/0x80 [wireguard] (iir 0xf3010df)
+ Kernel: unaligned access to 0x55f46884 in wg_allowedips_insert_v6+0x38/0x80 [wireguard] (iir 0xf2010dc)
+
+Avoid such unaligned memory accesses by instead using the
+get_unaligned_be64() helper macro.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+[Jason: replace src[8] in original patch with src+8]
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://patch.msgid.link/20240704154517.1572127-3-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/allowedips.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u
+ if (bits == 32) {
+ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+ } else if (bits == 128) {
+- ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+- ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
++ ((u64 *)dst)[0] = get_unaligned_be64(src);
++ ((u64 *)dst)[1] = get_unaligned_be64(src + 8);
+ }
+ }
+
--- /dev/null
+From 2fe3d6d2053c57f2eae5e85ca1656d185ebbe4e8 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 4 Jul 2024 17:45:16 +0200
+Subject: wireguard: queueing: annotate intentional data race in cpu round robin
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 2fe3d6d2053c57f2eae5e85ca1656d185ebbe4e8 upstream.
+
+KCSAN reports a race in the CPU round robin function, which, as the
+comment points out, is intentional:
+
+ BUG: KCSAN: data-race in wg_packet_send_staged_packets / wg_packet_send_staged_packets
+
+ read to 0xffff88811254eb28 of 4 bytes by task 3160 on cpu 1:
+ wg_cpumask_next_online drivers/net/wireguard/queueing.h:127 [inline]
+ wg_queue_enqueue_per_device_and_peer drivers/net/wireguard/queueing.h:173 [inline]
+ wg_packet_create_data drivers/net/wireguard/send.c:320 [inline]
+ wg_packet_send_staged_packets+0x60e/0xac0 drivers/net/wireguard/send.c:388
+ wg_packet_send_keepalive+0xe2/0x100 drivers/net/wireguard/send.c:239
+ wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline]
+ wg_packet_handshake_receive_worker+0x449/0x5f0 drivers/net/wireguard/receive.c:213
+ process_one_work kernel/workqueue.c:3248 [inline]
+ process_scheduled_works+0x483/0x9a0 kernel/workqueue.c:3329
+ worker_thread+0x526/0x720 kernel/workqueue.c:3409
+ kthread+0x1d1/0x210 kernel/kthread.c:389
+ ret_from_fork+0x4b/0x60 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+
+ write to 0xffff88811254eb28 of 4 bytes by task 3158 on cpu 0:
+ wg_cpumask_next_online drivers/net/wireguard/queueing.h:130 [inline]
+ wg_queue_enqueue_per_device_and_peer drivers/net/wireguard/queueing.h:173 [inline]
+ wg_packet_create_data drivers/net/wireguard/send.c:320 [inline]
+ wg_packet_send_staged_packets+0x6e5/0xac0 drivers/net/wireguard/send.c:388
+ wg_packet_send_keepalive+0xe2/0x100 drivers/net/wireguard/send.c:239
+ wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline]
+ wg_packet_handshake_receive_worker+0x449/0x5f0 drivers/net/wireguard/receive.c:213
+ process_one_work kernel/workqueue.c:3248 [inline]
+ process_scheduled_works+0x483/0x9a0 kernel/workqueue.c:3329
+ worker_thread+0x526/0x720 kernel/workqueue.c:3409
+ kthread+0x1d1/0x210 kernel/kthread.c:389
+ ret_from_fork+0x4b/0x60 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+
+ value changed: 0xffffffff -> 0x00000000
+
+Mark this race as intentional by using READ/WRITE_ONCE().
+
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://patch.msgid.link/20240704154517.1572127-4-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/queueing.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_onli
+ */
+ static inline int wg_cpumask_next_online(int *last_cpu)
+ {
+- int cpu = cpumask_next(*last_cpu, cpu_online_mask);
++ int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_online_mask);
+- *last_cpu = cpu;
++ WRITE_ONCE(*last_cpu, cpu);
+ return cpu;
+ }
+
--- /dev/null
+From 2cb489eb8dfc291060516df313ff31f4f9f3d794 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 4 Jul 2024 17:45:14 +0200
+Subject: wireguard: selftests: use acpi=off instead of -no-acpi for recent QEMU
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 2cb489eb8dfc291060516df313ff31f4f9f3d794 upstream.
+
+QEMU 9.0 removed -no-acpi, in favor of machine properties, so update the
+Makefile to use the correct QEMU invocation.
+
+Cc: stable@vger.kernel.org
+Fixes: b83fdcd9fb8a ("wireguard: selftests: use microvm on x86")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://patch.msgid.link/20240704154517.1572127-2-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/wireguard/qemu/Makefile | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/wireguard/qemu/Makefile
++++ b/tools/testing/selftests/wireguard/qemu/Makefile
+@@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(HOST_ARCH),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu max -machine microvm -no-acpi
++QEMU_MACHINE := -cpu max -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),i686)
+ CHOST := i686-linux-musl
+@@ -120,9 +120,9 @@ KERNEL_ARCH := x86
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
++QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),mips64)
+ CHOST := mips64-linux-musl
--- /dev/null
+From 381a7d453fa2ac5f854a154d3c9b1bbb90c4f94f Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 4 Jul 2024 17:45:17 +0200
+Subject: wireguard: send: annotate intentional data race in checking empty queue
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 381a7d453fa2ac5f854a154d3c9b1bbb90c4f94f upstream.
+
+KCSAN reports a race in wg_packet_send_keepalive, which is intentional:
+
+ BUG: KCSAN: data-race in wg_packet_send_keepalive / wg_packet_send_staged_packets
+
+ write to 0xffff88814cd91280 of 8 bytes by task 3194 on cpu 0:
+ __skb_queue_head_init include/linux/skbuff.h:2162 [inline]
+ skb_queue_splice_init include/linux/skbuff.h:2248 [inline]
+ wg_packet_send_staged_packets+0xe5/0xad0 drivers/net/wireguard/send.c:351
+ wg_xmit+0x5b8/0x660 drivers/net/wireguard/device.c:218
+ __netdev_start_xmit include/linux/netdevice.h:4940 [inline]
+ netdev_start_xmit include/linux/netdevice.h:4954 [inline]
+ xmit_one net/core/dev.c:3548 [inline]
+ dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3564
+ __dev_queue_xmit+0xeff/0x1d80 net/core/dev.c:4349
+ dev_queue_xmit include/linux/netdevice.h:3134 [inline]
+ neigh_connected_output+0x231/0x2a0 net/core/neighbour.c:1592
+ neigh_output include/net/neighbour.h:542 [inline]
+ ip6_finish_output2+0xa66/0xce0 net/ipv6/ip6_output.c:137
+ ip6_finish_output+0x1a5/0x490 net/ipv6/ip6_output.c:222
+ NF_HOOK_COND include/linux/netfilter.h:303 [inline]
+ ip6_output+0xeb/0x220 net/ipv6/ip6_output.c:243
+ dst_output include/net/dst.h:451 [inline]
+ NF_HOOK include/linux/netfilter.h:314 [inline]
+ ndisc_send_skb+0x4a2/0x670 net/ipv6/ndisc.c:509
+ ndisc_send_rs+0x3ab/0x3e0 net/ipv6/ndisc.c:719
+ addrconf_dad_completed+0x640/0x8e0 net/ipv6/addrconf.c:4295
+ addrconf_dad_work+0x891/0xbc0
+ process_one_work kernel/workqueue.c:2633 [inline]
+ process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2706
+ worker_thread+0x525/0x730 kernel/workqueue.c:2787
+ kthread+0x1d7/0x210 kernel/kthread.c:388
+ ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:242
+
+ read to 0xffff88814cd91280 of 8 bytes by task 3202 on cpu 1:
+ skb_queue_empty include/linux/skbuff.h:1798 [inline]
+ wg_packet_send_keepalive+0x20/0x100 drivers/net/wireguard/send.c:225
+ wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline]
+ wg_packet_handshake_receive_worker+0x445/0x5e0 drivers/net/wireguard/receive.c:213
+ process_one_work kernel/workqueue.c:2633 [inline]
+ process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2706
+ worker_thread+0x525/0x730 kernel/workqueue.c:2787
+ kthread+0x1d7/0x210 kernel/kthread.c:388
+ ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:242
+
+ value changed: 0xffff888148fef200 -> 0xffff88814cd91280
+
+Mark this race as intentional by using the skb_queue_empty_lockless()
+function rather than skb_queue_empty(), which uses READ_ONCE()
+internally to annotate the race.
+
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://patch.msgid.link/20240704154517.1572127-5-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/send.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_
+ {
+ struct sk_buff *skb;
+
+- if (skb_queue_empty(&peer->staged_packet_queue)) {
++ if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
+ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+ GFP_ATOMIC);
+ if (unlikely(!skb))