--- /dev/null
+From dbd4d7ca563fd0a8949718d35ce197e5642d5d9d Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 1 Mar 2016 14:18:50 +0000
+Subject: arm64: Rework valid_user_regs
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit dbd4d7ca563fd0a8949718d35ce197e5642d5d9d upstream.
+
+We validate pstate using PSR_MODE32_BIT, which is part of the
+user-provided pstate (and cannot be trusted). Also, we conflate
+validation of AArch32 and AArch64 pstate values, making the code
+difficult to reason about.
+
+Instead, validate the pstate value based on the associated task. The
+task may or may not be current (e.g. when using ptrace), so this must be
+passed explicitly by callers. To avoid circular header dependencies via
+sched.h, is_compat_task is pulled out of asm/ptrace.h.
+
+To make the code possible to reason about, the AArch64 and AArch32
+validation is split into separate functions. Software must respect the
+RES0 policy for SPSR bits, and thus the kernel mirrors the hardware
+policy (RAZ/WI) for bits as-yet unallocated. When these acquire an
+architected meaning writes may be permitted (potentially with additional
+validation).
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Dave Martin <dave.martin@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ rebased for v4.1+
+ This avoids a user-triggerable Oops() if a task is switched to a mode
+ not supported by the kernel (e.g. switching a 64-bit task to AArch32).
+]
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com> [backport]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm64/include/asm/ptrace.h | 33 +---------------
+ arch/arm64/kernel/ptrace.c | 81 +++++++++++++++++++++++++++++++++++++++-
+ arch/arm64/kernel/signal.c | 4 -
+ arch/arm64/kernel/signal32.c | 2
+ 4 files changed, 86 insertions(+), 34 deletions(-)
+
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -58,6 +58,7 @@
+ #define COMPAT_PSR_Z_BIT 0x40000000
+ #define COMPAT_PSR_N_BIT 0x80000000
+ #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
++#define COMPAT_PSR_GE_MASK 0x000f0000
+
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
+@@ -151,35 +152,9 @@ static inline unsigned long regs_return_
+ return regs->regs[0];
+ }
+
+-/*
+- * Are the current registers suitable for user mode? (used to maintain
+- * security in signal handlers)
+- */
+-static inline int valid_user_regs(struct user_pt_regs *regs)
+-{
+- if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) {
+- regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT);
+-
+- /* The T bit is reserved for AArch64 */
+- if (!(regs->pstate & PSR_MODE32_BIT))
+- regs->pstate &= ~COMPAT_PSR_T_BIT;
+-
+- return 1;
+- }
+-
+- /*
+- * Force PSR to something logical...
+- */
+- regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \
+- COMPAT_PSR_T_BIT | PSR_MODE32_BIT;
+-
+- if (!(regs->pstate & PSR_MODE32_BIT)) {
+- regs->pstate &= ~COMPAT_PSR_T_BIT;
+- regs->pstate |= PSR_MODE_EL0t;
+- }
+-
+- return 0;
+-}
++/* We must avoid circular header include via sched.h */
++struct task_struct;
++int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
+
+ #define instruction_pointer(regs) ((unsigned long)(regs)->pc)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -39,6 +39,7 @@
+ #include <linux/elf.h>
+
+ #include <asm/compat.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/pgtable.h>
+ #include <asm/syscall.h>
+@@ -500,7 +501,7 @@ static int gpr_set(struct task_struct *t
+ if (ret)
+ return ret;
+
+- if (!valid_user_regs(&newregs))
++ if (!valid_user_regs(&newregs, target))
+ return -EINVAL;
+
+ task_pt_regs(target)->user_regs = newregs;
+@@ -770,7 +771,7 @@ static int compat_gpr_set(struct task_st
+
+ }
+
+- if (valid_user_regs(&newregs.user_regs))
++ if (valid_user_regs(&newregs.user_regs, target))
+ *task_pt_regs(target) = newregs;
+ else
+ ret = -EINVAL;
+@@ -1272,3 +1273,79 @@ asmlinkage void syscall_trace_exit(struc
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+ }
++
++/*
++ * Bits which are always architecturally RES0 per ARM DDI 0487A.h
++ * Userspace cannot use these until they have an architectural meaning.
++ * We also reserve IL for the kernel; SS is handled dynamically.
++ */
++#define SPSR_EL1_AARCH64_RES0_BITS \
++ (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
++ GENMASK_ULL(5, 5))
++#define SPSR_EL1_AARCH32_RES0_BITS \
++ (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
++
++static int valid_compat_regs(struct user_pt_regs *regs)
++{
++ regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
++
++ if (!system_supports_mixed_endian_el0()) {
++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
++ regs->pstate |= COMPAT_PSR_E_BIT;
++ else
++ regs->pstate &= ~COMPAT_PSR_E_BIT;
++ }
++
++ if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
++ (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
++ (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
++ (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
++ return 1;
++ }
++
++ /*
++ * Force PSR to a valid 32-bit EL0t, preserving the same bits as
++ * arch/arm.
++ */
++ regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
++ COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
++ COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
++ COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
++ COMPAT_PSR_T_BIT;
++ regs->pstate |= PSR_MODE32_BIT;
++
++ return 0;
++}
++
++static int valid_native_regs(struct user_pt_regs *regs)
++{
++ regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
++
++ if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
++ (regs->pstate & PSR_D_BIT) == 0 &&
++ (regs->pstate & PSR_A_BIT) == 0 &&
++ (regs->pstate & PSR_I_BIT) == 0 &&
++ (regs->pstate & PSR_F_BIT) == 0) {
++ return 1;
++ }
++
++ /* Force PSR to a valid 64-bit EL0t */
++ regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
++
++ return 0;
++}
++
++/*
++ * Are the current registers suitable for user mode? (used to maintain
++ * security in signal handlers)
++ */
++int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
++{
++ if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
++ regs->pstate &= ~DBG_SPSR_SS;
++
++ if (is_compat_thread(task_thread_info(task)))
++ return valid_compat_regs(regs);
++ else
++ return valid_native_regs(regs);
++}
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -115,7 +115,7 @@ static int restore_sigframe(struct pt_re
+ */
+ regs->syscallno = ~0UL;
+
+- err |= !valid_user_regs(®s->user_regs);
++ err |= !valid_user_regs(®s->user_regs, current);
+
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+@@ -307,7 +307,7 @@ static void handle_signal(struct ksignal
+ /*
+ * Check that the resulting registers are actually sane.
+ */
+- ret |= !valid_user_regs(®s->user_regs);
++ ret |= !valid_user_regs(®s->user_regs, current);
+
+ /*
+ * Fast forward the stepping logic so we step into the signal
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -356,7 +356,7 @@ static int compat_restore_sigframe(struc
+ */
+ regs->syscallno = ~0UL;
+
+- err |= !valid_user_regs(®s->user_regs);
++ err |= !valid_user_regs(®s->user_regs, current);
+
+ aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
+ if (err == 0)
--- /dev/null
+From 7e1b1fc4dabd6ec8e28baa0708866e13fa93c9b3 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Fri, 10 Jun 2016 10:54:32 +0200
+Subject: base: make module_create_drivers_dir race-free
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+commit 7e1b1fc4dabd6ec8e28baa0708866e13fa93c9b3 upstream.
+
+Modules which register drivers via standard path (driver_register) in
+parallel can cause a warning:
+WARNING: CPU: 2 PID: 3492 at ../fs/sysfs/dir.c:31 sysfs_warn_dup+0x62/0x80
+sysfs: cannot create duplicate filename '/module/saa7146/drivers'
+Modules linked in: hexium_gemini(+) mxb(+) ...
+...
+Call Trace:
+...
+ [<ffffffff812e63a2>] sysfs_warn_dup+0x62/0x80
+ [<ffffffff812e6487>] sysfs_create_dir_ns+0x77/0x90
+ [<ffffffff8140f2c4>] kobject_add_internal+0xb4/0x340
+ [<ffffffff8140f5b8>] kobject_add+0x68/0xb0
+ [<ffffffff8140f631>] kobject_create_and_add+0x31/0x70
+ [<ffffffff8157a703>] module_add_driver+0xc3/0xd0
+ [<ffffffff8155e5d4>] bus_add_driver+0x154/0x280
+ [<ffffffff815604c0>] driver_register+0x60/0xe0
+ [<ffffffff8145bed0>] __pci_register_driver+0x60/0x70
+ [<ffffffffa0273e14>] saa7146_register_extension+0x64/0x90 [saa7146]
+ [<ffffffffa0033011>] hexium_init_module+0x11/0x1000 [hexium_gemini]
+...
+
+As can be (mostly) seen, driver_register causes this call sequence:
+ -> bus_add_driver
+ -> module_add_driver
+ -> module_create_drivers_dir
+The last one creates "drivers" directory in /sys/module/<...>. When
+this is done in parallel, the directory is attempted to be created
+twice at the same time.
+
+This can be easily reproduced by loading mxb and hexium_gemini in
+parallel:
+while :; do
+ modprobe mxb &
+ modprobe hexium_gemini
+ wait
+ rmmod mxb hexium_gemini saa7146_vv saa7146
+done
+
+saa7146 calls pci_register_driver for both mxb and hexium_gemini,
+which means /sys/module/saa7146/drivers is to be created for both of
+them.
+
+Fix this by a new mutex in module_create_drivers_dir which makes the
+test-and-create "drivers" dir atomic.
+
+I inverted the condition and removed 'return' to avoid multiple
+unlocks or a goto.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Fixes: fe480a2675ed (Modules: only add drivers/ direcory if needed)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/module.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -24,10 +24,12 @@ static char *make_driver_name(struct dev
+
+ static void module_create_drivers_dir(struct module_kobject *mk)
+ {
+- if (!mk || mk->drivers_dir)
+- return;
++ static DEFINE_MUTEX(drivers_dir_mutex);
+
+- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++ mutex_lock(&drivers_dir_mutex);
++ if (mk && !mk->drivers_dir)
++ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++ mutex_unlock(&drivers_dir_mutex);
+ }
+
+ void module_add_driver(struct module *mod, struct device_driver *drv)
--- /dev/null
+From d3922b69617b62bb2509936b68301f837229d9f0 Mon Sep 17 00:00:00 2001
+From: Mathieu Larouche <mathieu.larouche@matrox.com>
+Date: Fri, 27 May 2016 15:12:50 -0400
+Subject: drm/mgag200: Black screen fix for G200e rev 4
+
+From: Mathieu Larouche <mathieu.larouche@matrox.com>
+
+commit d3922b69617b62bb2509936b68301f837229d9f0 upstream.
+
+- Fixed black screen for some resolutions of G200e rev4
+- Fixed testm & testn which had predetermined value.
+
+Reported-by: Jan Beulich <jbeulich@suse.com>
+
+Signed-off-by: Mathieu Larouche <mathieu.larouche@matrox.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_mode.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -194,7 +194,7 @@ static int mga_g200se_set_plls(struct mg
+ }
+ }
+
+- fvv = pllreffreq * testn / testm;
++ fvv = pllreffreq * (n + 1) / (m + 1);
+ fvv = (fvv - 800000) / 50000;
+
+ if (fvv > 15)
+@@ -214,6 +214,14 @@ static int mga_g200se_set_plls(struct mg
+ WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_PIX_PLLC_P, p);
++
++ if (mdev->unique_rev_id >= 0x04) {
++ WREG_DAC(0x1a, 0x09);
++ msleep(20);
++ WREG_DAC(0x1a, 0x01);
++
++ }
++
+ return 0;
+ }
+
--- /dev/null
+From ed596a4a88bd161f868ccba078557ee7ede8a6ef Mon Sep 17 00:00:00 2001
+From: Oliver Neukum <oneukum@suse.com>
+Date: Tue, 31 May 2016 14:48:15 +0200
+Subject: HID: elo: kill not flush the work
+
+From: Oliver Neukum <oneukum@suse.com>
+
+commit ed596a4a88bd161f868ccba078557ee7ede8a6ef upstream.
+
+Flushing a work that reschedules itself is not a sensible operation. It needs
+to be killed. Failure to do so leads to a kernel panic in the timer code.
+
+Signed-off-by: Oliver Neukum <ONeukum@suse.com>
+Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-elo.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device
+ struct elo_priv *priv = hid_get_drvdata(hdev);
+
+ hid_hw_stop(hdev);
+- flush_workqueue(wq);
++ cancel_delayed_work_sync(&priv->work);
+ kfree(priv);
+ }
+
--- /dev/null
+From 93a2001bdfd5376c3dc2158653034c20392d15c5 Mon Sep 17 00:00:00 2001
+From: Scott Bauer <sbauer@plzdonthack.me>
+Date: Thu, 23 Jun 2016 08:59:47 -0600
+Subject: HID: hiddev: validate num_values for HIDIOCGUSAGES, HIDIOCSUSAGES commands
+
+From: Scott Bauer <sbauer@plzdonthack.me>
+
+commit 93a2001bdfd5376c3dc2158653034c20392d15c5 upstream.
+
+This patch validates the num_values parameter from userland during the
+HIDIOCGUSAGES and HIDIOCSUSAGES commands. Previously, if the report id was set
+to HID_REPORT_ID_UNKNOWN, we would fail to validate the num_values parameter
+leading to a heap overflow.
+
+Signed-off-by: Scott Bauer <sbauer@plzdonthack.me>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/usbhid/hiddev.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(s
+ goto inval;
+ } else if (uref->usage_index >= field->report_count)
+ goto inval;
+-
+- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+- uref->usage_index + uref_multi->num_values > field->report_count))
+- goto inval;
+ }
+
++ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
++ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
++ uref->usage_index + uref_multi->num_values > field->report_count))
++ goto inval;
++
+ switch (cmd) {
+ case HIDIOCGUSAGE:
+ uref->value = field->value[uref->usage_index];
--- /dev/null
+From 6dd2e27a103d716921cc4a1a96a9adc0a8e3ab57 Mon Sep 17 00:00:00 2001
+From: Allen Hung <allen_hung@dell.com>
+Date: Thu, 23 Jun 2016 16:31:30 +0800
+Subject: HID: multitouch: enable palm rejection for Windows Precision Touchpad
+
+From: Allen Hung <allen_hung@dell.com>
+
+commit 6dd2e27a103d716921cc4a1a96a9adc0a8e3ab57 upstream.
+
+The usage Confidence is mandary to Windows Precision Touchpad devices. If
+it is examined in input_mapping on a WIndows Precision Touchpad, a new add
+quirk MT_QUIRK_CONFIDENCE desgned for such devices will be applied to the
+device. A touch with the confidence bit is not set is determined as
+invalid.
+
+Tested on Dell XPS13 9343
+
+Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Tested-by: Andy Lutomirski <luto@kernel.org> # XPS 13 9350, BIOS 1.4.3
+Signed-off-by: Allen Hung <allen_hung@dell.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-multitouch.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_ALWAYS_VALID (1 << 4)
+ #define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
+ #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
++#define MT_QUIRK_CONFIDENCE (1 << 7)
+ #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
+ #define MT_QUIRK_NO_AREA (1 << 9)
+ #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
+@@ -78,6 +79,7 @@ struct mt_slot {
+ __s32 contactid; /* the device ContactID assigned to this slot */
+ bool touch_state; /* is the touch valid? */
+ bool inrange_state; /* is the finger in proximity of the sensor? */
++ bool confidence_state; /* is the touch made by a finger? */
+ };
+
+ struct mt_class {
+@@ -502,6 +504,9 @@ static int mt_touch_input_mapping(struct
+ mt_store_field(usage, td, hi);
+ return 1;
+ case HID_DG_CONFIDENCE:
++ if (cls->name == MT_CLS_WIN_8 &&
++ field->application == HID_DG_TOUCHPAD)
++ cls->quirks |= MT_QUIRK_CONFIDENCE;
+ mt_store_field(usage, td, hi);
+ return 1;
+ case HID_DG_TIPSWITCH:
+@@ -614,6 +619,7 @@ static void mt_complete_slot(struct mt_d
+ return;
+
+ if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
++ int active;
+ int slotnum = mt_compute_slot(td, input);
+ struct mt_slot *s = &td->curdata;
+ struct input_mt *mt = input->mt;
+@@ -628,10 +634,14 @@ static void mt_complete_slot(struct mt_d
+ return;
+ }
+
++ if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
++ s->confidence_state = 1;
++ active = (s->touch_state || s->inrange_state) &&
++ s->confidence_state;
++
+ input_mt_slot(input, slotnum);
+- input_mt_report_slot_state(input, MT_TOOL_FINGER,
+- s->touch_state || s->inrange_state);
+- if (s->touch_state || s->inrange_state) {
++ input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
++ if (active) {
+ /* this finger is in proximity of the sensor */
+ int wide = (s->w > s->h);
+ /* divided by two to match visual scale of touch */
+@@ -696,6 +706,8 @@ static void mt_process_mt_event(struct h
+ td->curdata.touch_state = value;
+ break;
+ case HID_DG_CONFIDENCE:
++ if (quirks & MT_QUIRK_CONFIDENCE)
++ td->curdata.confidence_state = value;
+ if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+ td->curvalid = value;
+ break;
--- /dev/null
+From 522e5cb76d0663c88f96b6a8301451c8efa37207 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 1 Jul 2016 16:42:55 +0200
+Subject: iommu/amd: Fix unity mapping initialization race
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 522e5cb76d0663c88f96b6a8301451c8efa37207 upstream.
+
+There is a race condition in the AMD IOMMU init code that
+causes requested unity mappings to be blocked by the IOMMU
+for a short period of time. This results on boot failures
+and IO_PAGE_FAULTs on some machines.
+
+Fix this by making sure the unity mappings are installed
+before all other DMA is blocked.
+
+Fixes: aafd8ba0ca74 ('iommu/amd: Implement add_device and remove_device')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/amd_iommu_init.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1363,13 +1363,23 @@ static int __init amd_iommu_init_pci(voi
+ break;
+ }
+
++ /*
++ * Order is important here to make sure any unity map requirements are
++ * fulfilled. The unity mappings are created and written to the device
++ * table during the amd_iommu_init_api() call.
++ *
++ * After that we call init_device_table_dma() to make sure any
++ * uninitialized DTE will block DMA, and in the end we flush the caches
++ * of all IOMMUs to make sure the changes to the device table are
++ * active.
++ */
++ ret = amd_iommu_init_api();
++
+ init_device_table_dma();
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+
+- ret = amd_iommu_init_api();
+-
+ if (!ret)
+ print_iommu_info();
+
--- /dev/null
+From 9aeb26cfc2abc96be42b9df2d0f2dc5d805084ff Mon Sep 17 00:00:00 2001
+From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Date: Fri, 3 Jun 2016 11:50:30 +0100
+Subject: iommu/arm-smmu: Wire up map_sg for arm-smmu-v3
+
+From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+
+commit 9aeb26cfc2abc96be42b9df2d0f2dc5d805084ff upstream.
+
+The map_sg callback is missing from arm_smmu_ops, but is required by
+iommu.h. Similarly to most other IOMMU drivers, connect it to
+default_iommu_map_sg.
+
+Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu-v3.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -1919,6 +1919,7 @@ static struct iommu_ops arm_smmu_ops = {
+ .detach_dev = arm_smmu_detach_dev,
+ .map = arm_smmu_map,
+ .unmap = arm_smmu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .add_device = arm_smmu_add_device,
+ .remove_device = arm_smmu_remove_device,
--- /dev/null
+From a4c34ff1c029e90e7d5f8dd8d29b0a93b31c3cb2 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 17 Jun 2016 11:29:48 +0200
+Subject: iommu/vt-d: Enable QI on all IOMMUs before setting root entry
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit a4c34ff1c029e90e7d5f8dd8d29b0a93b31c3cb2 upstream.
+
+This seems to be required on some X58 chipsets on systems
+with more than one IOMMU. QI does not work until it is
+enabled on all IOMMUs in the system.
+
+Reported-by: Dheeraj CVR <cvr.dheeraj@gmail.com>
+Tested-by: Dheeraj CVR <cvr.dheeraj@gmail.com>
+Fixes: 5f0a7f7614a9 ('iommu/vt-d: Make root entry visible for hardware right after allocation')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
+ }
+ }
+
+- iommu_flush_write_buffer(iommu);
+- iommu_set_root_entry(iommu);
+- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+-
+ if (!ecap_pass_through(iommu->ecap))
+ hw_pass_through = 0;
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+@@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
+ #endif
+ }
+
++ /*
++ * Now that qi is enabled on all iommus, set the root entry and flush
++ * caches. This is required on some Intel X58 chipsets, otherwise the
++ * flush_context function will loop forever and the boot hangs.
++ */
++ for_each_active_iommu(iommu, drhd) {
++ iommu_flush_write_buffer(iommu);
++ iommu_set_root_entry(iommu);
++ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
++ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
++ }
++
+ if (iommu_pass_through)
+ iommu_identity_mapping |= IDENTMAP_ALL;
+
--- /dev/null
+From ae4ea9a2460c7fee2ae8feeb4dfe96f5f6c3e562 Mon Sep 17 00:00:00 2001
+From: Junichi Nomura <j-nomura@ce.jp.nec.com>
+Date: Fri, 10 Jun 2016 04:31:52 +0000
+Subject: ipmi: Remove smi_msg from waiting_rcv_msgs list before handle_one_recv_msg()
+
+From: Junichi Nomura <j-nomura@ce.jp.nec.com>
+
+commit ae4ea9a2460c7fee2ae8feeb4dfe96f5f6c3e562 upstream.
+
+Commit 7ea0ed2b5be8 ("ipmi: Make the message handler easier to use for
+SMI interfaces") changed handle_new_recv_msgs() to call handle_one_recv_msg()
+for a smi_msg while the smi_msg is still connected to waiting_rcv_msgs list.
+That could lead to following list corruption problems:
+
+1) low-level function treats smi_msg as not connected to list
+
+ handle_one_recv_msg() could end up calling smi_send(), which
+ assumes the msg is not connected to list.
+
+ For example, the following sequence could corrupt list by
+ doing list_add_tail() for the entry still connected to other list.
+
+ handle_new_recv_msgs()
+ msg = list_entry(waiting_rcv_msgs)
+ handle_one_recv_msg(msg)
+ handle_ipmb_get_msg_cmd(msg)
+ smi_send(msg)
+ spin_lock(xmit_msgs_lock)
+ list_add_tail(msg)
+ spin_unlock(xmit_msgs_lock)
+
+2) race between multiple handle_new_recv_msgs() instances
+
+ handle_new_recv_msgs() once releases waiting_rcv_msgs_lock before calling
+ handle_one_recv_msg() then retakes the lock and list_del() it.
+
+ If others call handle_new_recv_msgs() during the window shown below
+ list_del() will be done twice for the same smi_msg.
+
+ handle_new_recv_msgs()
+ spin_lock(waiting_rcv_msgs_lock)
+ msg = list_entry(waiting_rcv_msgs)
+ spin_unlock(waiting_rcv_msgs_lock)
+ |
+ | handle_one_recv_msg(msg)
+ |
+ spin_lock(waiting_rcv_msgs_lock)
+ list_del(msg)
+ spin_unlock(waiting_rcv_msgs_lock)
+
+Fixes: 7ea0ed2b5be8 ("ipmi: Make the message handler easier to use for SMI interfaces")
+Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+[Added a comment to describe why this works.]
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Ye Feng <yefeng.yl@alibaba-inc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_msghandler.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3819,6 +3819,7 @@ static void handle_new_recv_msgs(ipmi_sm
+ while (!list_empty(&intf->waiting_rcv_msgs)) {
+ smi_msg = list_entry(intf->waiting_rcv_msgs.next,
+ struct ipmi_smi_msg, link);
++ list_del(&smi_msg->link);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
+@@ -3828,11 +3829,14 @@ static void handle_new_recv_msgs(ipmi_sm
+ if (rv > 0) {
+ /*
+ * To preserve message order, quit if we
+- * can't handle a message.
++ * can't handle a message. Add the message
++ * back at the head, this is safe because this
++ * tasklet is the only thing that pulls the
++ * messages.
+ */
++ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
+ break;
+ } else {
+- list_del(&smi_msg->link);
+ if (rv == 0)
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
--- /dev/null
+From 38327424b40bcebe2de92d07312c89360ac9229a Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 16 Jun 2016 15:48:57 +0100
+Subject: KEYS: potential uninitialized variable
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 38327424b40bcebe2de92d07312c89360ac9229a upstream.
+
+If __key_link_begin() failed then "edit" would be uninitialized. I've
+added a check to fix that.
+
+This allows a random user to crash the kernel, though it's quite
+difficult to achieve. There are three ways it can be done as the user
+would have to cause an error to occur in __key_link():
+
+ (1) Cause the kernel to run out of memory. In practice, this is difficult
+ to achieve without ENOMEM cropping up elsewhere and aborting the
+ attempt.
+
+ (2) Revoke the destination keyring between the keyring ID being looked up
+ and it being tested for revocation. In practice, this is difficult to
+ time correctly because the KEYCTL_REJECT function can only be used
+ from the request-key upcall process. Further, users can only make use
+ of what's in /sbin/request-key.conf, though this does including a
+ rejection debugging test - which means that the destination keyring
+ has to be the caller's session keyring in practice.
+
+ (3) Have just enough key quota available to create a key, a new session
+ keyring for the upcall and a link in the session keyring, but not then
+ sufficient quota to create a link in the nominated destination keyring
+ so that it fails with EDQUOT.
+
+The bug can be triggered using option (3) above using something like the
+following:
+
+ echo 80 >/proc/sys/kernel/keys/root_maxbytes
+ keyctl request2 user debug:fred negate @t
+
+The above sets the quota to something much lower (80) to make the bug
+easier to trigger, but this is dependent on the system. Note also that
+the name of the keyring created contains a random number that may be
+between 1 and 10 characters in size, so may throw the test off by
+changing the amount of quota used.
+
+Assuming the failure occurs, something like the following will be seen:
+
+ kfree_debugcheck: out of range ptr 6b6b6b6b6b6b6b68h
+ ------------[ cut here ]------------
+ kernel BUG at ../mm/slab.c:2821!
+ ...
+ RIP: 0010:[<ffffffff811600f9>] kfree_debugcheck+0x20/0x25
+ RSP: 0018:ffff8804014a7de8 EFLAGS: 00010092
+ RAX: 0000000000000034 RBX: 6b6b6b6b6b6b6b68 RCX: 0000000000000000
+ RDX: 0000000000040001 RSI: 00000000000000f6 RDI: 0000000000000300
+ RBP: ffff8804014a7df0 R08: 0000000000000001 R09: 0000000000000000
+ R10: ffff8804014a7e68 R11: 0000000000000054 R12: 0000000000000202
+ R13: ffffffff81318a66 R14: 0000000000000000 R15: 0000000000000001
+ ...
+ Call Trace:
+ kfree+0xde/0x1bc
+ assoc_array_cancel_edit+0x1f/0x36
+ __key_link_end+0x55/0x63
+ key_reject_and_link+0x124/0x155
+ keyctl_reject_key+0xb6/0xe0
+ keyctl_negate_key+0x10/0x12
+ SyS_keyctl+0x9f/0xe7
+ do_syscall_64+0x63/0x13a
+ entry_SYSCALL64_slow_path+0x25/0x25
+
+Fixes: f70e2e06196a ('KEYS: Do preallocation for __key_link()')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/keys/key.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -578,7 +578,7 @@ int key_reject_and_link(struct key *key,
+
+ mutex_unlock(&key_construction_mutex);
+
+- if (keyring)
++ if (keyring && link_ret == 0)
+ __key_link_end(keyring, &key->index_key, edit);
+
+ /* wake up anyone waiting for a key to be constructed */
--- /dev/null
+From caf1ff26e1aa178133df68ac3d40815fed2187d9 Mon Sep 17 00:00:00 2001
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Date: Wed, 15 Jun 2016 18:00:33 +0800
+Subject: kvm: Fix irq route entries exceeding KVM_MAX_IRQ_ROUTES
+
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+
+commit caf1ff26e1aa178133df68ac3d40815fed2187d9 upstream.
+
+These days, we experienced one guest crash with 8 cores and 3 disks,
+with qemu error logs as bellow:
+
+qemu-system-x86_64: /build/qemu-2.0.0/kvm-all.c:984:
+kvm_irqchip_commit_routes: Assertion `ret == 0' failed.
+
+And then we found one patch(bdf026317d) in qemu tree, which said
+could fix this bug.
+
+Execute the following script will reproduce the BUG quickly:
+
+irq_affinity.sh
+========================================================================
+
+vda_irq_num=25
+vdb_irq_num=27
+while [ 1 ]
+do
+ for irq in {1,2,4,8,10,20,40,80}
+ do
+ echo $irq > /proc/irq/$vda_irq_num/smp_affinity
+ echo $irq > /proc/irq/$vdb_irq_num/smp_affinity
+ dd if=/dev/vda of=/dev/zero bs=4K count=100 iflag=direct
+ dd if=/dev/vdb of=/dev/zero bs=4K count=100 iflag=direct
+ done
+done
+========================================================================
+
+The following qemu log is added in the qemu code and is displayed when
+this bug reproduced:
+
+kvm_irqchip_commit_routes: max gsi: 1008, nr_allocated_irq_routes: 1024,
+irq_routes->nr: 1024, gsi_count: 1024.
+
+That's to say when irq_routes->nr == 1024, there are 1024 routing entries,
+but in the kernel code when routes->nr >= 1024, will just return -EINVAL;
+
+The nr is the number of the routing entries which is in of
+[1 ~ KVM_MAX_IRQ_ROUTES], not the index in [0 ~ KVM_MAX_IRQ_ROUTES - 1].
+
+This patch fix the BUG above.
+
+Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Signed-off-by: Wei Tang <tangwei@cmss.chinamobile.com>
+Signed-off-by: Zhang Zhuoyu <zhangzhuoyu@cmss.chinamobile.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/kvm_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2861,7 +2861,7 @@ static long kvm_vm_ioctl(struct file *fi
+ if (copy_from_user(&routing, argp, sizeof(routing)))
+ goto out;
+ r = -EINVAL;
+- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
++ if (routing.nr > KVM_MAX_IRQ_ROUTES)
+ goto out;
+ if (routing.flags)
+ goto out;
--- /dev/null
+From ff30ef40deca4658e27b0c596e7baf39115e858f Mon Sep 17 00:00:00 2001
+From: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Date: Sat, 18 Jun 2016 11:01:05 +0200
+Subject: KVM: nVMX: VMX instructions: fix segment checks when L1 is in long mode.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+
+commit ff30ef40deca4658e27b0c596e7baf39115e858f upstream.
+
+I couldn't get Xen to boot a L2 HVM when it was nested under KVM - it was
+getting a GP(0) on a rather unspecial vmread from Xen:
+
+ (XEN) ----[ Xen-4.7.0-rc x86_64 debug=n Not tainted ]----
+ (XEN) CPU: 1
+ (XEN) RIP: e008:[<ffff82d0801e629e>] vmx_get_segment_register+0x14e/0x450
+ (XEN) RFLAGS: 0000000000010202 CONTEXT: hypervisor (d1v0)
+ (XEN) rax: ffff82d0801e6288 rbx: ffff83003ffbfb7c rcx: fffffffffffab928
+ (XEN) rdx: 0000000000000000 rsi: 0000000000000000 rdi: ffff83000bdd0000
+ (XEN) rbp: ffff83000bdd0000 rsp: ffff83003ffbfab0 r8: ffff830038813910
+ (XEN) r9: ffff83003faf3958 r10: 0000000a3b9f7640 r11: ffff83003f82d418
+ (XEN) r12: 0000000000000000 r13: ffff83003ffbffff r14: 0000000000004802
+ (XEN) r15: 0000000000000008 cr0: 0000000080050033 cr4: 00000000001526e0
+ (XEN) cr3: 000000003fc79000 cr2: 0000000000000000
+ (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008
+ (XEN) Xen code around <ffff82d0801e629e> (vmx_get_segment_register+0x14e/0x450):
+ (XEN) 00 00 41 be 02 48 00 00 <44> 0f 78 74 24 08 0f 86 38 56 00 00 b8 08 68 00
+ (XEN) Xen stack trace from rsp=ffff83003ffbfab0:
+
+ ...
+
+ (XEN) Xen call trace:
+ (XEN) [<ffff82d0801e629e>] vmx_get_segment_register+0x14e/0x450
+ (XEN) [<ffff82d0801f3695>] get_page_from_gfn_p2m+0x165/0x300
+ (XEN) [<ffff82d0801bfe32>] hvmemul_get_seg_reg+0x52/0x60
+ (XEN) [<ffff82d0801bfe93>] hvm_emulate_prepare+0x53/0x70
+ (XEN) [<ffff82d0801ccacb>] handle_mmio+0x2b/0xd0
+ (XEN) [<ffff82d0801be591>] emulate.c#_hvm_emulate_one+0x111/0x2c0
+ (XEN) [<ffff82d0801cd6a4>] handle_hvm_io_completion+0x274/0x2a0
+ (XEN) [<ffff82d0801f334a>] __get_gfn_type_access+0xfa/0x270
+ (XEN) [<ffff82d08012f3bb>] timer.c#add_entry+0x4b/0xb0
+ (XEN) [<ffff82d08012f80c>] timer.c#remove_entry+0x7c/0x90
+ (XEN) [<ffff82d0801c8433>] hvm_do_resume+0x23/0x140
+ (XEN) [<ffff82d0801e4fe7>] vmx_do_resume+0xa7/0x140
+ (XEN) [<ffff82d080164aeb>] context_switch+0x13b/0xe40
+ (XEN) [<ffff82d080128e6e>] schedule.c#schedule+0x22e/0x570
+ (XEN) [<ffff82d08012c0cc>] softirq.c#__do_softirq+0x5c/0x90
+ (XEN) [<ffff82d0801602c5>] domain.c#idle_loop+0x25/0x50
+ (XEN)
+ (XEN)
+ (XEN) ****************************************
+ (XEN) Panic on CPU 1:
+ (XEN) GENERAL PROTECTION FAULT
+ (XEN) [error_code=0000]
+ (XEN) ****************************************
+
+Tracing my host KVM showed it was the one injecting the GP(0) when
+emulating the VMREAD and checking the destination segment permissions in
+get_vmx_mem_address():
+
+ 3) | vmx_handle_exit() {
+ 3) | handle_vmread() {
+ 3) | nested_vmx_check_permission() {
+ 3) | vmx_get_segment() {
+ 3) 0.074 us | vmx_read_guest_seg_base();
+ 3) 0.065 us | vmx_read_guest_seg_selector();
+ 3) 0.066 us | vmx_read_guest_seg_ar();
+ 3) 1.636 us | }
+ 3) 0.058 us | vmx_get_rflags();
+ 3) 0.062 us | vmx_read_guest_seg_ar();
+ 3) 3.469 us | }
+ 3) | vmx_get_cs_db_l_bits() {
+ 3) 0.058 us | vmx_read_guest_seg_ar();
+ 3) 0.662 us | }
+ 3) | get_vmx_mem_address() {
+ 3) 0.068 us | vmx_cache_reg();
+ 3) | vmx_get_segment() {
+ 3) 0.074 us | vmx_read_guest_seg_base();
+ 3) 0.068 us | vmx_read_guest_seg_selector();
+ 3) 0.071 us | vmx_read_guest_seg_ar();
+ 3) 1.756 us | }
+ 3) | kvm_queue_exception_e() {
+ 3) 0.066 us | kvm_multiple_exception();
+ 3) 0.684 us | }
+ 3) 4.085 us | }
+ 3) 9.833 us | }
+ 3) + 10.366 us | }
+
+Cross-checking the KVM/VMX VMREAD emulation code with the Intel Software
+Developper Manual Volume 3C - "VMREAD - Read Field from Virtual-Machine
+Control Structure", I found that we're enforcing that the destination
+operand is NOT located in a read-only data segment or any code segment when
+the L1 is in long mode - BUT that check should only happen when it is in
+protected mode.
+
+Shuffling the code a bit to make our emulation follow the specification
+allows me to boot a Xen dom0 in a nested KVM and start HVM L2 guests
+without problems.
+
+Fixes: f9eb4af67c9d ("KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions")
+Signed-off-by: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Eugene Korenevsky <ekorenevsky@gmail.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6579,7 +6579,13 @@ static int get_vmx_mem_address(struct kv
+
+ /* Checks for #GP/#SS exceptions. */
+ exn = false;
+- if (is_protmode(vcpu)) {
++ if (is_long_mode(vcpu)) {
++ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
++ * non-canonical form. This is the only check on the memory
++ * destination for long mode!
++ */
++ exn = is_noncanonical_address(*ret);
++ } else if (is_protmode(vcpu)) {
+ /* Protected mode: apply checks for segment validity in the
+ * following order:
+ * - segment type check (#GP(0) may be thrown)
+@@ -6596,17 +6602,10 @@ static int get_vmx_mem_address(struct kv
+ * execute-only code segment
+ */
+ exn = ((s.type & 0xa) == 8);
+- }
+- if (exn) {
+- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+- return 1;
+- }
+- if (is_long_mode(vcpu)) {
+- /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+- * non-canonical form. This is an only check for long mode.
+- */
+- exn = is_noncanonical_address(*ret);
+- } else if (is_protmode(vcpu)) {
++ if (exn) {
++ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
++ return 1;
++ }
+ /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
+ */
+ exn = (s.unusable != 0);
ipv6-fix-mem-leak-in-rt6i_pcpu.patch
arcv2-check-for-ll-sc-livelock-only-if-llsc-is-enabled.patch
arcv2-llsc-software-backoff-is-not-needed-starting-hs2.1c.patch
+keys-potential-uninitialized-variable.patch
+kvm-fix-irq-route-entries-exceeding-kvm_max_irq_routes.patch
+kvm-nvmx-vmx-instructions-fix-segment-checks-when-l1-is-in-long-mode.patch
+hid-elo-kill-not-flush-the-work.patch
+hid-hiddev-validate-num_values-for-hidiocgusages-hidiocsusages-commands.patch
+hid-multitouch-enable-palm-rejection-for-windows-precision-touchpad.patch
+tracing-handle-null-formats-in-hold_module_trace_bprintk_format.patch
+base-make-module_create_drivers_dir-race-free.patch
+iommu-arm-smmu-wire-up-map_sg-for-arm-smmu-v3.patch
+iommu-vt-d-enable-qi-on-all-iommus-before-setting-root-entry.patch
+iommu-amd-fix-unity-mapping-initialization-race.patch
+drm-mgag200-black-screen-fix-for-g200e-rev-4.patch
+ipmi-remove-smi_msg-from-waiting_rcv_msgs-list-before-handle_one_recv_msg.patch
+arm64-rework-valid_user_regs.patch
--- /dev/null
+From 70c8217acd4383e069fe1898bbad36ea4fcdbdcc Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Fri, 17 Jun 2016 16:10:42 -0400
+Subject: tracing: Handle NULL formats in hold_module_trace_bprintk_format()
+
+From: Steven Rostedt (Red Hat) <rostedt@goodmis.org>
+
+commit 70c8217acd4383e069fe1898bbad36ea4fcdbdcc upstream.
+
+If a task uses a non constant string for the format parameter in
+trace_printk(), then the trace_printk_fmt variable is set to NULL. This
+variable is then saved in the __trace_printk_fmt section.
+
+The function hold_module_trace_bprintk_format() checks to see if duplicate
+formats are used by modules, and reuses them if so (saves them to the list
+if it is new). But this function calls lookup_format() that does a strcmp()
+to the value (which is now NULL) and can cause a kernel oops.
+
+This wasn't an issue till 3debb0a9ddb ("tracing: Fix trace_printk() to print
+when not using bprintk()") which added "__used" to the trace_printk_fmt
+variable, and before that, the kernel simply optimized it out (no NULL value
+was saved).
+
+The fix is simply to handle the NULL pointer in lookup_format() and have the
+caller ignore the value if it was NULL.
+
+Link: http://lkml.kernel.org/r/1464769870-18344-1-git-send-email-zhengjun.xing@intel.com
+
+Reported-by: xingzhen <zhengjun.xing@intel.com>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Fixes: 3debb0a9ddb ("tracing: Fix trace_printk() to print when not using bprintk()")
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_printk.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
+ static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
+ {
+ struct trace_bprintk_fmt *pos;
++
++ if (!fmt)
++ return ERR_PTR(-EINVAL);
++
+ list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
+ if (!strcmp(pos->fmt, fmt))
+ return pos;
+@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(co
+ for (iter = start; iter < end; iter++) {
+ struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
+ if (tb_fmt) {
+- *iter = tb_fmt->fmt;
++ if (!IS_ERR(tb_fmt))
++ *iter = tb_fmt->fmt;
+ continue;
+ }
+