]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Feb 2022 12:35:09 +0000 (13:35 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Feb 2022 12:35:09 +0000 (13:35 +0100)
added patches:
serial-8250-of-fix-mapped-region-size-when-using-reg-offset-property.patch
x86-fpu-correct-pkru-xstate-inconsistency.patch

queue-4.14/serial-8250-of-fix-mapped-region-size-when-using-reg-offset-property.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/x86-fpu-correct-pkru-xstate-inconsistency.patch [new file with mode: 0644]

diff --git a/queue-4.14/serial-8250-of-fix-mapped-region-size-when-using-reg-offset-property.patch b/queue-4.14/serial-8250-of-fix-mapped-region-size-when-using-reg-offset-property.patch
new file mode 100644 (file)
index 0000000..a2b10d6
--- /dev/null
@@ -0,0 +1,55 @@
+From foo@baz Fri Feb 25 01:30:53 PM CET 2022
+From: Robert Hancock <robert.hancock@calian.com>
+Date: Wed, 12 Jan 2022 13:42:14 -0600
+Subject: serial: 8250: of: Fix mapped region size when using reg-offset property
+
+From: Robert Hancock <robert.hancock@calian.com>
+
+commit d06b1cf28297e27127d3da54753a3a01a2fa2f28 upstream.
+
+8250_of supports a reg-offset property which is intended to handle
+cases where the device registers start at an offset inside the region
+of memory allocated to the device. The Xilinx 16550 UART, for which this
+support was initially added, requires this. However, the code did not
+adjust the overall size of the mapped region accordingly, causing the
+driver to request an area of memory past the end of the device's
+allocation. For example, if the UART was allocated an address of
+0xb0130000, size of 0x10000 and reg-offset of 0x1000 in the device
+tree, the region of memory reserved was b0131000-b0140fff, which caused
+the driver for the region starting at b0140000 to fail to probe.
+
+Fix this by subtracting reg-offset from the mapped region size.
+
+Fixes: b912b5e2cfb3 ([POWERPC] Xilinx: of_serial support for Xilinx uart 16550.)
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Robert Hancock <robert.hancock@calian.com>
+Link: https://lore.kernel.org/r/20220112194214.881844-1-robert.hancock@calian.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[sudip: adjust context]
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_of.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -102,8 +102,17 @@ static int of_platform_serial_setup(stru
+       port->mapsize = resource_size(&resource);
+       /* Check for shifted address mapping */
+-      if (of_property_read_u32(np, "reg-offset", &prop) == 0)
++      if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
++              if (prop >= port->mapsize) {
++                      dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
++                               prop, &port->mapsize);
++                      ret = -EINVAL;
++                      goto err_unprepare;
++              }
++
+               port->mapbase += prop;
++              port->mapsize -= prop;
++      }
+       /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
+       if (of_device_is_compatible(np, "mrvl,mmp-uart"))
index eac60153f28399680b2e0a51257018ea7598822a..f28a52f17e607b2335304edca05285d16e704b59 100644 (file)
@@ -4,3 +4,5 @@ parisc-unaligned-fix-fldd-and-fstd-unaligned-handlers-on-32-bit-kernel.patch
 parisc-unaligned-fix-ldw-and-stw-unalignment-handlers.patch
 sr9700-sanity-check-for-packet-length.patch
 usb-zaurus-support-another-broken-zaurus.patch
+serial-8250-of-fix-mapped-region-size-when-using-reg-offset-property.patch
+x86-fpu-correct-pkru-xstate-inconsistency.patch
diff --git a/queue-4.14/x86-fpu-correct-pkru-xstate-inconsistency.patch b/queue-4.14/x86-fpu-correct-pkru-xstate-inconsistency.patch
new file mode 100644 (file)
index 0000000..c89050b
--- /dev/null
@@ -0,0 +1,155 @@
+From bgeffon@google.com  Fri Feb 25 13:00:00 2022
+From: Brian Geffon <bgeffon@google.com>
+Date: Tue, 15 Feb 2022 11:22:33 -0800
+Subject: x86/fpu: Correct pkru/xstate inconsistency
+To: Dave Hansen <dave.hansen@intel.com>, Thomas Gleixner <tglx@linutronix.de>
+Cc: Willis Kung <williskung@google.com>, Guenter Roeck <groeck@google.com>, Borislav Petkov <bp@suse.de>, Andy Lutomirski <luto@kernel.org>, stable@vger.kernel.org, x86@kernel.org, linux-kernel@vger.kernel.org, Brian Geffon <bgeffon@google.com>
+Message-ID: <20220215192233.8717-1-bgeffon@google.com>
+
+From: Brian Geffon <bgeffon@google.com>
+
+When eagerly switching PKRU in switch_fpu_finish() it checks that
+current is not a kernel thread as kernel threads will never use PKRU.
+It's possible that this_cpu_read_stable() on current_task
+(ie. get_current()) is returning an old cached value. To resolve this
+reference next_p directly rather than relying on current.
+
+As written it's possible when switching from a kernel thread to a
+userspace thread to observe a cached PF_KTHREAD flag and never restore
+the PKRU. And as a result this issue only occurs when switching
+from a kernel thread to a userspace thread, switching from a non kernel
+thread works perfectly fine because all that is considered in that
+situation are the flags from some other non kernel task and the next fpu
+is passed in to switch_fpu_finish().
+
+This behavior only exists between 5.2 and 5.13 when it was fixed by a
+rewrite decoupling PKRU from xstate, in:
+  commit 954436989cc5 ("x86/fpu: Remove PKRU handling from switch_fpu_finish()")
+
+Unfortunately backporting the fix from 5.13 is probably not realistic as
+it's part of a 60+ patch series which rewrites most of the PKRU handling.
+
+Fixes: 0cecca9d03c9 ("x86/fpu: Eager switch PKRU state")
+Signed-off-by: Brian Geffon <bgeffon@google.com>
+Signed-off-by: Willis Kung <williskung@google.com>
+Tested-by: Willis Kung <williskung@google.com>
+Cc: <stable@vger.kernel.org> # v5.4.x
+Cc: <stable@vger.kernel.org> # v5.10.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/fpu/internal.h | 13 ++++++++-----
+ arch/x86/kernel/process_32.c        |  6 ++----
+ arch/x86/kernel/process_64.c        |  6 ++----
+ 3 files changed, 12 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 03b3de491b5e..5ed702e2c55f 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -560,9 +560,11 @@ static inline void __fpregs_load_activate(void)
+  * The FPU context is only stored/restored for a user task and
+  * PF_KTHREAD is used to distinguish between kernel and user threads.
+  */
+-static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
++static inline void switch_fpu_prepare(struct task_struct *prev, int cpu)
+ {
+-      if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
++      struct fpu *old_fpu = &prev->thread.fpu;
++
++      if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) {
+               if (!copy_fpregs_to_fpstate(old_fpu))
+                       old_fpu->last_cpu = -1;
+               else
+@@ -581,10 +583,11 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
+  * Load PKRU from the FPU context if available. Delay loading of the
+  * complete FPU state until the return to userland.
+  */
+-static inline void switch_fpu_finish(struct fpu *new_fpu)
++static inline void switch_fpu_finish(struct task_struct *next)
+ {
+       u32 pkru_val = init_pkru_value;
+       struct pkru_state *pk;
++      struct fpu *next_fpu = &next->thread.fpu;
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return;
+@@ -598,7 +601,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
+        * PKRU state is switched eagerly because it needs to be valid before we
+        * return to userland e.g. for a copy_to_user() operation.
+        */
+-      if (!(current->flags & PF_KTHREAD)) {
++      if (!(next->flags & PF_KTHREAD)) {
+               /*
+                * If the PKRU bit in xsave.header.xfeatures is not set,
+                * then the PKRU component was in init state, which means
+@@ -607,7 +610,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
+                * in memory is not valid. This means pkru_val has to be
+                * set to 0 and not to init_pkru_value.
+                */
+-              pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
++              pk = get_xsave_addr(&next_fpu->state.xsave, XFEATURE_PKRU);
+               pkru_val = pk ? pk->pkru : 0;
+       }
+       __write_pkru(pkru_val);
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index b8ceec4974fe..352f876950ab 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -229,14 +229,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ {
+       struct thread_struct *prev = &prev_p->thread,
+                            *next = &next_p->thread;
+-      struct fpu *prev_fpu = &prev->fpu;
+-      struct fpu *next_fpu = &next->fpu;
+       int cpu = smp_processor_id();
+       /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+       if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+-              switch_fpu_prepare(prev_fpu, cpu);
++              switch_fpu_prepare(prev_p, cpu);
+       /*
+        * Save away %gs. No need to save %fs, as it was saved on the
+@@ -292,7 +290,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       this_cpu_write(current_task, next_p);
+-      switch_fpu_finish(next_fpu);
++      switch_fpu_finish(next_p);
+       /* Load the Intel cache allocation PQR MSR. */
+       resctrl_sched_in();
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index da3cc3a10d63..633788362906 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -505,15 +505,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ {
+       struct thread_struct *prev = &prev_p->thread;
+       struct thread_struct *next = &next_p->thread;
+-      struct fpu *prev_fpu = &prev->fpu;
+-      struct fpu *next_fpu = &next->fpu;
+       int cpu = smp_processor_id();
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
+                    this_cpu_read(irq_count) != -1);
+       if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+-              switch_fpu_prepare(prev_fpu, cpu);
++              switch_fpu_prepare(prev_p, cpu);
+       /* We must save %fs and %gs before load_TLS() because
+        * %fs and %gs may be cleared by load_TLS().
+@@ -565,7 +563,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       this_cpu_write(current_task, next_p);
+       this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
+-      switch_fpu_finish(next_fpu);
++      switch_fpu_finish(next_p);
+       /* Reload sp0. */
+       update_task_stack(next_p);
+-- 
+2.35.1.265.g69c8d7142f-goog
+