]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Oct 2022 06:17:20 +0000 (07:17 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Oct 2022 06:17:20 +0000 (07:17 +0100)
added patches:
cpufreq-intel_pstate-hybrid-use-known-scaling-factor-for-p-cores.patch
cpufreq-intel_pstate-read-all-msrs-on-the-target-cpu.patch
exec-copy-oldsighand-action-under-spin-lock.patch
fs-binfmt_elf-fix-memory-leak-in-load_elf_binary.patch
mac802154-fix-lqi-recording.patch
scsi-qla2xxx-use-transport-defined-speed-mask-for-supported_speeds.patch

queue-6.0/cpufreq-intel_pstate-hybrid-use-known-scaling-factor-for-p-cores.patch [new file with mode: 0644]
queue-6.0/cpufreq-intel_pstate-read-all-msrs-on-the-target-cpu.patch [new file with mode: 0644]
queue-6.0/exec-copy-oldsighand-action-under-spin-lock.patch [new file with mode: 0644]
queue-6.0/fs-binfmt_elf-fix-memory-leak-in-load_elf_binary.patch [new file with mode: 0644]
queue-6.0/mac802154-fix-lqi-recording.patch [new file with mode: 0644]
queue-6.0/scsi-qla2xxx-use-transport-defined-speed-mask-for-supported_speeds.patch [new file with mode: 0644]
queue-6.0/series

diff --git a/queue-6.0/cpufreq-intel_pstate-hybrid-use-known-scaling-factor-for-p-cores.patch b/queue-6.0/cpufreq-intel_pstate-hybrid-use-known-scaling-factor-for-p-cores.patch
new file mode 100644 (file)
index 0000000..a0d8077
--- /dev/null
@@ -0,0 +1,163 @@
+From f5c8cf2a4992dd929fa0c2f25c09ee69b8dcbce1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Mon, 24 Oct 2022 21:22:48 +0200
+Subject: cpufreq: intel_pstate: hybrid: Use known scaling factor for P-cores
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit f5c8cf2a4992dd929fa0c2f25c09ee69b8dcbce1 upstream.
+
+Commit 46573fd6369f ("cpufreq: intel_pstate: hybrid: Rework HWP
+calibration") attempted to use the information from CPPC (the nominal
+performance in particular) to obtain the scaling factor allowing the
+frequency to be computed if the HWP performance level of the given CPU
+is known or vice versa.
+
+However, it turns out that on some platforms this doesn't work, because
+the CPPC information on them does not align with the contents of the
+MSR_HWP_CAPABILITIES registers.
+
+This basically means that the only way to make intel_pstate work on all
+of the hybrid platforms to date is to use the observation that on all
+of them the scaling factor between the HWP performance levels and
+frequency for P-cores is 78741 (approximately 100000/1.27).  For
+E-cores it is 100000, which is the same as for all of the non-hybrid
+"core" platforms and does not require any changes.
+
+Accordingly, make intel_pstate use 78741 as the scaling factor between
+HWP performance levels and frequency for P-cores on all hybrid platforms
+and drop the dependency of the HWP calibration code on CPPC.
+
+Fixes: 46573fd6369f ("cpufreq: intel_pstate: hybrid: Rework HWP calibration")
+Reported-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Tested-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: 5.15+ <stable@vger.kernel.org> # 5.15+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c |   69 ++++++++---------------------------------
+ 1 file changed, 15 insertions(+), 54 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -27,6 +27,7 @@
+ #include <linux/pm_qos.h>
+ #include <trace/events/power.h>
++#include <asm/cpu.h>
+ #include <asm/div64.h>
+ #include <asm/msr.h>
+ #include <asm/cpu_device_id.h>
+@@ -398,16 +399,6 @@ static int intel_pstate_get_cppc_guarant
+       return cppc_perf.nominal_perf;
+ }
+-
+-static u32 intel_pstate_cppc_nominal(int cpu)
+-{
+-      u64 nominal_perf;
+-
+-      if (cppc_get_nominal_perf(cpu, &nominal_perf))
+-              return 0;
+-
+-      return nominal_perf;
+-}
+ #else /* CONFIG_ACPI_CPPC_LIB */
+ static inline void intel_pstate_set_itmt_prio(int cpu)
+ {
+@@ -532,34 +523,17 @@ static void intel_pstate_hybrid_hwp_adju
+       int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
+       int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+       int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
+-      int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
+       int scaling = cpu->pstate.scaling;
+       pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+-      pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max(cpu->cpu));
+       pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+       pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+       pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
+       pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
+       pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+-      /*
+-       * If the product of the HWP performance scaling factor and the HWP_CAP
+-       * highest performance is greater than the maximum turbo frequency
+-       * corresponding to the pstate_funcs.get_turbo() return value, the
+-       * scaling factor is too high, so recompute it to make the HWP_CAP
+-       * highest performance correspond to the maximum turbo frequency.
+-       */
+-      cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
+-      if (turbo_freq < cpu->pstate.turbo_freq) {
+-              cpu->pstate.turbo_freq = turbo_freq;
+-              scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
+-              cpu->pstate.scaling = scaling;
+-
+-              pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n",
+-                       cpu->cpu, scaling);
+-      }
+-
++      cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
++                                         perf_ctl_scaling);
+       cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
+                                        perf_ctl_scaling);
+@@ -1965,37 +1939,24 @@ static int knl_get_turbo_pstate(int cpu)
+       return ret;
+ }
+-#ifdef CONFIG_ACPI_CPPC_LIB
+-static u32 hybrid_ref_perf;
+-
+-static int hybrid_get_cpu_scaling(int cpu)
++static void hybrid_get_type(void *data)
+ {
+-      return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf,
+-                          intel_pstate_cppc_nominal(cpu));
++      u8 *cpu_type = data;
++
++      *cpu_type = get_this_hybrid_cpu_type();
+ }
+-static void intel_pstate_cppc_set_cpu_scaling(void)
++static int hybrid_get_cpu_scaling(int cpu)
+ {
+-      u32 min_nominal_perf = U32_MAX;
+-      int cpu;
++      u8 cpu_type = 0;
+-      for_each_present_cpu(cpu) {
+-              u32 nominal_perf = intel_pstate_cppc_nominal(cpu);
++      smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
++      /* P-cores have a smaller perf level-to-freqency scaling factor. */
++      if (cpu_type == 0x40)
++              return 78741;
+-              if (nominal_perf && nominal_perf < min_nominal_perf)
+-                      min_nominal_perf = nominal_perf;
+-      }
+-
+-      if (min_nominal_perf < U32_MAX) {
+-              hybrid_ref_perf = min_nominal_perf;
+-              pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
+-      }
++      return core_get_scaling();
+ }
+-#else
+-static inline void intel_pstate_cppc_set_cpu_scaling(void)
+-{
+-}
+-#endif /* CONFIG_ACPI_CPPC_LIB */
+ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+ {
+@@ -3450,7 +3411,7 @@ static int __init intel_pstate_init(void
+                               default_driver = &intel_pstate;
+                       if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
+-                              intel_pstate_cppc_set_cpu_scaling();
++                              pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
+                       goto hwp_cpu_matched;
+               }
diff --git a/queue-6.0/cpufreq-intel_pstate-read-all-msrs-on-the-target-cpu.patch b/queue-6.0/cpufreq-intel_pstate-read-all-msrs-on-the-target-cpu.patch
new file mode 100644 (file)
index 0000000..f65e35f
--- /dev/null
@@ -0,0 +1,242 @@
+From 8dbab94d45fb1094cefac7956b7fb987a36e2b12 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Mon, 24 Oct 2022 21:21:00 +0200
+Subject: cpufreq: intel_pstate: Read all MSRs on the target CPU
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 8dbab94d45fb1094cefac7956b7fb987a36e2b12 upstream.
+
+Some of the MSR accesses in intel_pstate are carried out on the CPU
+that is running the code, but the values coming from them are used
+for the performance scaling of the other CPUs.
+
+This is problematic, for example, on hybrid platforms where
+MSR_TURBO_RATIO_LIMIT for P-cores and E-cores is different, so the
+values read from it on a P-core are generally not applicable to E-cores
+and the other way around.
+
+For this reason, make the driver access all MSRs on the target CPU on
+platforms using the "core" pstate_funcs callbacks which is the case for
+all of the hybrid platforms released to date.  For this purpose, pass
+a CPU argument to the ->get_max(), ->get_max_physical(), ->get_min()
+and ->get_turbo() pstate_funcs callbacks and from there pass it to
+rdmsrl_on_cpu() or rdmsrl_safe_on_cpu() to access the MSR on the target
+CPU.
+
+Fixes: 46573fd6369f ("cpufreq: intel_pstate: hybrid: Rework HWP calibration")
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Tested-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: 5.15+ <stable@vger.kernel.org> # 5.15+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c |   66 ++++++++++++++++++++---------------------
+ 1 file changed, 33 insertions(+), 33 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -280,10 +280,10 @@ static struct cpudata **all_cpu_data;
+  * structure is used to store those callbacks.
+  */
+ struct pstate_funcs {
+-      int (*get_max)(void);
+-      int (*get_max_physical)(void);
+-      int (*get_min)(void);
+-      int (*get_turbo)(void);
++      int (*get_max)(int cpu);
++      int (*get_max_physical)(int cpu);
++      int (*get_min)(int cpu);
++      int (*get_turbo)(int cpu);
+       int (*get_scaling)(void);
+       int (*get_cpu_scaling)(int cpu);
+       int (*get_aperf_mperf_shift)(void);
+@@ -531,12 +531,12 @@ static void intel_pstate_hybrid_hwp_adju
+ {
+       int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
+       int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+-      int perf_ctl_turbo = pstate_funcs.get_turbo();
++      int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
+       int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
+       int scaling = cpu->pstate.scaling;
+       pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+-      pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max());
++      pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max(cpu->cpu));
+       pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+       pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+       pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
+@@ -1740,7 +1740,7 @@ static void intel_pstate_hwp_enable(stru
+       intel_pstate_update_epp_defaults(cpudata);
+ }
+-static int atom_get_min_pstate(void)
++static int atom_get_min_pstate(int not_used)
+ {
+       u64 value;
+@@ -1748,7 +1748,7 @@ static int atom_get_min_pstate(void)
+       return (value >> 8) & 0x7F;
+ }
+-static int atom_get_max_pstate(void)
++static int atom_get_max_pstate(int not_used)
+ {
+       u64 value;
+@@ -1756,7 +1756,7 @@ static int atom_get_max_pstate(void)
+       return (value >> 16) & 0x7F;
+ }
+-static int atom_get_turbo_pstate(void)
++static int atom_get_turbo_pstate(int not_used)
+ {
+       u64 value;
+@@ -1834,23 +1834,23 @@ static void atom_get_vid(struct cpudata
+       cpudata->vid.turbo = value & 0x7f;
+ }
+-static int core_get_min_pstate(void)
++static int core_get_min_pstate(int cpu)
+ {
+       u64 value;
+-      rdmsrl(MSR_PLATFORM_INFO, value);
++      rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+       return (value >> 40) & 0xFF;
+ }
+-static int core_get_max_pstate_physical(void)
++static int core_get_max_pstate_physical(int cpu)
+ {
+       u64 value;
+-      rdmsrl(MSR_PLATFORM_INFO, value);
++      rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+       return (value >> 8) & 0xFF;
+ }
+-static int core_get_tdp_ratio(u64 plat_info)
++static int core_get_tdp_ratio(int cpu, u64 plat_info)
+ {
+       /* Check how many TDP levels present */
+       if (plat_info & 0x600000000) {
+@@ -1860,13 +1860,13 @@ static int core_get_tdp_ratio(u64 plat_i
+               int err;
+               /* Get the TDP level (0, 1, 2) to get ratios */
+-              err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
++              err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+               if (err)
+                       return err;
+               /* TDP MSR are continuous starting at 0x648 */
+               tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
+-              err = rdmsrl_safe(tdp_msr, &tdp_ratio);
++              err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
+               if (err)
+                       return err;
+@@ -1883,7 +1883,7 @@ static int core_get_tdp_ratio(u64 plat_i
+       return -ENXIO;
+ }
+-static int core_get_max_pstate(void)
++static int core_get_max_pstate(int cpu)
+ {
+       u64 tar;
+       u64 plat_info;
+@@ -1891,10 +1891,10 @@ static int core_get_max_pstate(void)
+       int tdp_ratio;
+       int err;
+-      rdmsrl(MSR_PLATFORM_INFO, plat_info);
++      rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+       max_pstate = (plat_info >> 8) & 0xFF;
+-      tdp_ratio = core_get_tdp_ratio(plat_info);
++      tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
+       if (tdp_ratio <= 0)
+               return max_pstate;
+@@ -1903,7 +1903,7 @@ static int core_get_max_pstate(void)
+               return tdp_ratio;
+       }
+-      err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
++      err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
+       if (!err) {
+               int tar_levels;
+@@ -1918,13 +1918,13 @@ static int core_get_max_pstate(void)
+       return max_pstate;
+ }
+-static int core_get_turbo_pstate(void)
++static int core_get_turbo_pstate(int cpu)
+ {
+       u64 value;
+       int nont, ret;
+-      rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
+-      nont = core_get_max_pstate();
++      rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
++      nont = core_get_max_pstate(cpu);
+       ret = (value) & 255;
+       if (ret <= nont)
+               ret = nont;
+@@ -1952,13 +1952,13 @@ static int knl_get_aperf_mperf_shift(voi
+       return 10;
+ }
+-static int knl_get_turbo_pstate(void)
++static int knl_get_turbo_pstate(int cpu)
+ {
+       u64 value;
+       int nont, ret;
+-      rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
+-      nont = core_get_max_pstate();
++      rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
++      nont = core_get_max_pstate(cpu);
+       ret = (((value) >> 8) & 0xFF);
+       if (ret <= nont)
+               ret = nont;
+@@ -2025,10 +2025,10 @@ static void intel_pstate_max_within_limi
+ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ {
+-      int perf_ctl_max_phys = pstate_funcs.get_max_physical();
++      int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
+       int perf_ctl_scaling = pstate_funcs.get_scaling();
+-      cpu->pstate.min_pstate = pstate_funcs.get_min();
++      cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
+       cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
+       cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
+@@ -2044,8 +2044,8 @@ static void intel_pstate_get_cpu_pstates
+               }
+       } else {
+               cpu->pstate.scaling = perf_ctl_scaling;
+-              cpu->pstate.max_pstate = pstate_funcs.get_max();
+-              cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
++              cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
++              cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
+       }
+       if (cpu->pstate.scaling == perf_ctl_scaling) {
+@@ -3221,9 +3221,9 @@ static unsigned int force_load __initdat
+ static int __init intel_pstate_msrs_not_valid(void)
+ {
+-      if (!pstate_funcs.get_max() ||
+-          !pstate_funcs.get_min() ||
+-          !pstate_funcs.get_turbo())
++      if (!pstate_funcs.get_max(0) ||
++          !pstate_funcs.get_min(0) ||
++          !pstate_funcs.get_turbo(0))
+               return -ENODEV;
+       return 0;
diff --git a/queue-6.0/exec-copy-oldsighand-action-under-spin-lock.patch b/queue-6.0/exec-copy-oldsighand-action-under-spin-lock.patch
new file mode 100644 (file)
index 0000000..73d484c
--- /dev/null
@@ -0,0 +1,39 @@
+From 5bf2fedca8f59379025b0d52f917b9ddb9bfe17e Mon Sep 17 00:00:00 2001
+From: Bernd Edlinger <bernd.edlinger@hotmail.de>
+Date: Mon, 7 Jun 2021 15:54:27 +0200
+Subject: exec: Copy oldsighand->action under spin-lock
+
+From: Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+commit 5bf2fedca8f59379025b0d52f917b9ddb9bfe17e upstream.
+
+unshare_sighand should only access oldsighand->action
+while holding oldsighand->siglock, to make sure that
+newsighand->action is in a consistent state.
+
+Signed-off-by: Bernd Edlinger <bernd.edlinger@hotmail.de>
+Cc: stable@vger.kernel.org
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/AM8PR10MB470871DEBD1DED081F9CC391E4389@AM8PR10MB4708.EURPRD10.PROD.OUTLOOK.COM
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/exec.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1196,11 +1196,11 @@ static int unshare_sighand(struct task_s
+                       return -ENOMEM;
+               refcount_set(&newsighand->count, 1);
+-              memcpy(newsighand->action, oldsighand->action,
+-                     sizeof(newsighand->action));
+               write_lock_irq(&tasklist_lock);
+               spin_lock(&oldsighand->siglock);
++              memcpy(newsighand->action, oldsighand->action,
++                     sizeof(newsighand->action));
+               rcu_assign_pointer(me->sighand, newsighand);
+               spin_unlock(&oldsighand->siglock);
+               write_unlock_irq(&tasklist_lock);
diff --git a/queue-6.0/fs-binfmt_elf-fix-memory-leak-in-load_elf_binary.patch b/queue-6.0/fs-binfmt_elf-fix-memory-leak-in-load_elf_binary.patch
new file mode 100644 (file)
index 0000000..c579f46
--- /dev/null
@@ -0,0 +1,67 @@
+From 594d2a14f2168c09b13b114c3d457aa939403e52 Mon Sep 17 00:00:00 2001
+From: Li Zetao <lizetao1@huawei.com>
+Date: Mon, 24 Oct 2022 23:44:21 +0800
+Subject: fs/binfmt_elf: Fix memory leak in load_elf_binary()
+
+From: Li Zetao <lizetao1@huawei.com>
+
+commit 594d2a14f2168c09b13b114c3d457aa939403e52 upstream.
+
+There is a memory leak reported by kmemleak:
+
+  unreferenced object 0xffff88817104ef80 (size 224):
+    comm "xfs_admin", pid 47165, jiffies 4298708825 (age 1333.476s)
+    hex dump (first 32 bytes):
+      00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+      60 a8 b3 00 81 88 ff ff a8 10 5a 00 81 88 ff ff  `.........Z.....
+    backtrace:
+      [<ffffffff819171e1>] __alloc_file+0x21/0x250
+      [<ffffffff81918061>] alloc_empty_file+0x41/0xf0
+      [<ffffffff81948cda>] path_openat+0xea/0x3d30
+      [<ffffffff8194ec89>] do_filp_open+0x1b9/0x290
+      [<ffffffff8192660e>] do_open_execat+0xce/0x5b0
+      [<ffffffff81926b17>] open_exec+0x27/0x50
+      [<ffffffff81a69250>] load_elf_binary+0x510/0x3ed0
+      [<ffffffff81927759>] bprm_execve+0x599/0x1240
+      [<ffffffff8192a997>] do_execveat_common.isra.0+0x4c7/0x680
+      [<ffffffff8192b078>] __x64_sys_execve+0x88/0xb0
+      [<ffffffff83bbf0a5>] do_syscall_64+0x35/0x80
+
+If "interp_elf_ex" fails to allocate memory in load_elf_binary(),
+the program will take the "out_free_ph" error handing path,
+resulting in "interpreter" file resource is not released.
+
+Fix it by adding an error handing path "out_free_file", which will
+release the file resource when "interp_elf_ex" failed to allocate
+memory.
+
+Fixes: 0693ffebcfe5 ("fs/binfmt_elf.c: allocate less for static executable")
+Signed-off-by: Li Zetao <lizetao1@huawei.com>
+Reviewed-by: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221024154421.982230-1-lizetao1@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/binfmt_elf.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -911,7 +911,7 @@ static int load_elf_binary(struct linux_
+               interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
+               if (!interp_elf_ex) {
+                       retval = -ENOMEM;
+-                      goto out_free_ph;
++                      goto out_free_file;
+               }
+               /* Get the exec headers */
+@@ -1354,6 +1354,7 @@ out:
+ out_free_dentry:
+       kfree(interp_elf_ex);
+       kfree(interp_elf_phdata);
++out_free_file:
+       allow_write_access(interpreter);
+       if (interpreter)
+               fput(interpreter);
diff --git a/queue-6.0/mac802154-fix-lqi-recording.patch b/queue-6.0/mac802154-fix-lqi-recording.patch
new file mode 100644 (file)
index 0000000..0d504a1
--- /dev/null
@@ -0,0 +1,60 @@
+From 5a5c4e06fd03b595542d5590f2bc05a6b7fc5c2b Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Thu, 20 Oct 2022 16:25:35 +0200
+Subject: mac802154: Fix LQI recording
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 5a5c4e06fd03b595542d5590f2bc05a6b7fc5c2b upstream.
+
+Back in 2014, the LQI was saved in the skb control buffer (skb->cb, or
+mac_cb(skb)) without any actual reset of this area prior to its use.
+
+As part of a useful rework of the use of this region, 32edc40ae65c
+("ieee802154: change _cb handling slightly") introduced mac_cb_init() to
+basically memset the cb field to 0. In particular, this new function got
+called at the beginning of mac802154_parse_frame_start(), right before
+the location where the buffer got actually filled.
+
+What went through unnoticed however, is the fact that the very first
+helper called by device drivers in the receive path already used this
+area to save the LQI value for later extraction. Resetting the cb field
+"so late" led to systematically zeroing the LQI.
+
+If we consider the reset of the cb field needed, we can make it as soon
+as we get an skb from a device driver, right before storing the LQI,
+as is the very first time we need to write something there.
+
+Cc: stable@vger.kernel.org
+Fixes: 32edc40ae65c ("ieee802154: change _cb handling slightly")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Acked-by: Alexander Aring <aahringo@redhat.com>
+Link: https://lore.kernel.org/r/20221020142535.1038885-1-miquel.raynal@bootlin.com
+Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac802154/rx.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/mac802154/rx.c
++++ b/net/mac802154/rx.c
+@@ -132,7 +132,7 @@ static int
+ ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
+ {
+       int hlen;
+-      struct ieee802154_mac_cb *cb = mac_cb_init(skb);
++      struct ieee802154_mac_cb *cb = mac_cb(skb);
+       skb_reset_mac_header(skb);
+@@ -294,8 +294,9 @@ void
+ ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
+ {
+       struct ieee802154_local *local = hw_to_local(hw);
++      struct ieee802154_mac_cb *cb = mac_cb_init(skb);
+-      mac_cb(skb)->lqi = lqi;
++      cb->lqi = lqi;
+       skb->pkt_type = IEEE802154_RX_MSG;
+       skb_queue_tail(&local->skb_queue, skb);
+       tasklet_schedule(&local->tasklet);
diff --git a/queue-6.0/scsi-qla2xxx-use-transport-defined-speed-mask-for-supported_speeds.patch b/queue-6.0/scsi-qla2xxx-use-transport-defined-speed-mask-for-supported_speeds.patch
new file mode 100644 (file)
index 0000000..db9aff1
--- /dev/null
@@ -0,0 +1,73 @@
+From 0b863257c17c5f57a41e0a48de140ed026957a63 Mon Sep 17 00:00:00 2001
+From: Manish Rangankar <mrangankar@marvell.com>
+Date: Tue, 27 Sep 2022 04:59:46 -0700
+Subject: scsi: qla2xxx: Use transport-defined speed mask for supported_speeds
+
+From: Manish Rangankar <mrangankar@marvell.com>
+
+commit 0b863257c17c5f57a41e0a48de140ed026957a63 upstream.
+
+One of the sysfs values reported for supported_speeds was not valid (20Gb/s
+reported instead of 64Gb/s).  Instead of driver internal speed mask
+definition, use speed mask defined in transport_fc for reporting
+host->supported_speeds.
+
+Link: https://lore.kernel.org/r/20220927115946.17559-1-njavali@marvell.com
+Cc: stable@vger.kernel.org
+Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Signed-off-by: Manish Rangankar <mrangankar@marvell.com>
+Signed-off-by: Nilesh Javali <njavali@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/qla2xxx/qla_attr.c |   28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -3330,11 +3330,34 @@ struct fc_function_template qla2xxx_tran
+       .bsg_timeout = qla24xx_bsg_timeout,
+ };
++static uint
++qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
++{
++      uint supported_speeds = FC_PORTSPEED_UNKNOWN;
++
++      if (speeds & FDMI_PORT_SPEED_64GB)
++              supported_speeds |= FC_PORTSPEED_64GBIT;
++      if (speeds & FDMI_PORT_SPEED_32GB)
++              supported_speeds |= FC_PORTSPEED_32GBIT;
++      if (speeds & FDMI_PORT_SPEED_16GB)
++              supported_speeds |= FC_PORTSPEED_16GBIT;
++      if (speeds & FDMI_PORT_SPEED_8GB)
++              supported_speeds |= FC_PORTSPEED_8GBIT;
++      if (speeds & FDMI_PORT_SPEED_4GB)
++              supported_speeds |= FC_PORTSPEED_4GBIT;
++      if (speeds & FDMI_PORT_SPEED_2GB)
++              supported_speeds |= FC_PORTSPEED_2GBIT;
++      if (speeds & FDMI_PORT_SPEED_1GB)
++              supported_speeds |= FC_PORTSPEED_1GBIT;
++
++      return supported_speeds;
++}
++
+ void
+ qla2x00_init_host_attr(scsi_qla_host_t *vha)
+ {
+       struct qla_hw_data *ha = vha->hw;
+-      u32 speeds = FC_PORTSPEED_UNKNOWN;
++      u32 speeds = 0, fdmi_speed = 0;
+       fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
+       fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+@@ -3344,7 +3367,8 @@ qla2x00_init_host_attr(scsi_qla_host_t *
+       fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
+       fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
+-      speeds = qla25xx_fdmi_port_speed_capability(ha);
++      fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
++      speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
+       fc_host_supported_speeds(vha->host) = speeds;
+ }
index 6985a6150f3283628ccf642a67039fc97d6959e5..383f4f76c9dcd21bb3ea994a18c0416b8533c381 100644 (file)
@@ -49,3 +49,9 @@ iio-adxl372-fix-unsafe-buffer-attributes.patch
 iio-adxl367-fix-unsafe-buffer-attributes.patch
 fbdev-stifb-fall-back-to-cfb_fillrect-on-32-bit-hcrx-cards.patch
 fbdev-smscufx-fix-several-use-after-free-bugs.patch
+cpufreq-intel_pstate-read-all-msrs-on-the-target-cpu.patch
+cpufreq-intel_pstate-hybrid-use-known-scaling-factor-for-p-cores.patch
+fs-binfmt_elf-fix-memory-leak-in-load_elf_binary.patch
+exec-copy-oldsighand-action-under-spin-lock.patch
+mac802154-fix-lqi-recording.patch
+scsi-qla2xxx-use-transport-defined-speed-mask-for-supported_speeds.patch