]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 24 Jan 2016 23:16:54 +0000 (15:16 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 24 Jan 2016 23:16:54 +0000 (15:16 -0800)
added patches:
alsa-fireworks-bebob-oxfw-dice-enable-to-make-as-built-in.patch
alsa-hda-add-intel-lewisburg-device-ids-audio.patch
alsa-hda-apply-pin-fixup-for-hp-probook-6550b.patch
alsa-hda-disable-64bit-address-for-creative-hda-controllers.patch
ipmi-move-timer-init-to-before-irq-is-setup.patch
ipmi-start-the-timer-and-thread-on-internal-msgs.patch
kvm-ppc-book3s-hv-prohibit-setting-illegal-transaction-state-in-msr.patch
kvm-svm-unconditionally-intercept-db.patch
kvm-vmx-fix-smep-and-smap-without-ept.patch
kvm-x86-correctly-print-ac-in-traces.patch
kvm-x86-expose-msr_tsc_aux-to-userspace.patch
x86-boot-double-boot_heap_size-to-64kb.patch
x86-mm-add-barriers-and-document-switch_mm-vs-flush-synchronization.patch
x86-mm-improve-switch_mm-barrier-comments.patch
x86-mpx-fix-instruction-decoder-condition.patch
x86-reboot-quirks-add-imac10-1-to-pci_reboot_dmi_table.patch
x86-signal-fix-restart_syscall-number-for-x32-tasks.patch
x86-xen-don-t-reset-vcpu_info-on-a-cancelled-suspend.patch
xen-gntdev-grant-maps-should-not-be-subject-to-numa-balancing.patch

20 files changed:
queue-4.1/alsa-fireworks-bebob-oxfw-dice-enable-to-make-as-built-in.patch [new file with mode: 0644]
queue-4.1/alsa-hda-add-intel-lewisburg-device-ids-audio.patch [new file with mode: 0644]
queue-4.1/alsa-hda-apply-pin-fixup-for-hp-probook-6550b.patch [new file with mode: 0644]
queue-4.1/alsa-hda-disable-64bit-address-for-creative-hda-controllers.patch [new file with mode: 0644]
queue-4.1/ipmi-move-timer-init-to-before-irq-is-setup.patch [new file with mode: 0644]
queue-4.1/ipmi-start-the-timer-and-thread-on-internal-msgs.patch [new file with mode: 0644]
queue-4.1/kvm-ppc-book3s-hv-prohibit-setting-illegal-transaction-state-in-msr.patch [new file with mode: 0644]
queue-4.1/kvm-svm-unconditionally-intercept-db.patch [new file with mode: 0644]
queue-4.1/kvm-vmx-fix-smep-and-smap-without-ept.patch [new file with mode: 0644]
queue-4.1/kvm-x86-correctly-print-ac-in-traces.patch [new file with mode: 0644]
queue-4.1/kvm-x86-expose-msr_tsc_aux-to-userspace.patch [new file with mode: 0644]
queue-4.1/series [new file with mode: 0644]
queue-4.1/x86-boot-double-boot_heap_size-to-64kb.patch [new file with mode: 0644]
queue-4.1/x86-mm-add-barriers-and-document-switch_mm-vs-flush-synchronization.patch [new file with mode: 0644]
queue-4.1/x86-mm-improve-switch_mm-barrier-comments.patch [new file with mode: 0644]
queue-4.1/x86-mpx-fix-instruction-decoder-condition.patch [new file with mode: 0644]
queue-4.1/x86-reboot-quirks-add-imac10-1-to-pci_reboot_dmi_table.patch [new file with mode: 0644]
queue-4.1/x86-signal-fix-restart_syscall-number-for-x32-tasks.patch [new file with mode: 0644]
queue-4.1/x86-xen-don-t-reset-vcpu_info-on-a-cancelled-suspend.patch [new file with mode: 0644]
queue-4.1/xen-gntdev-grant-maps-should-not-be-subject-to-numa-balancing.patch [new file with mode: 0644]

diff --git a/queue-4.1/alsa-fireworks-bebob-oxfw-dice-enable-to-make-as-built-in.patch b/queue-4.1/alsa-fireworks-bebob-oxfw-dice-enable-to-make-as-built-in.patch
new file mode 100644 (file)
index 0000000..c6f6ad1
--- /dev/null
@@ -0,0 +1,60 @@
+From df4833886f91eea0d20e6e97066adab308625ef8 Mon Sep 17 00:00:00 2001
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Date: Sun, 18 Oct 2015 13:46:47 +0900
+Subject: ALSA: fireworks/bebob/oxfw/dice: enable to make as built-in
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+commit df4833886f91eea0d20e6e97066adab308625ef8 upstream.
+
+When committed to upstream, these four modules had wrong entries for
+Makefile. This forces them to be loadable modules even if they're set
+as built-in.
+
+This commit fixes this bug.
+
+Fixes: b5b04336015e('ALSA: fireworks: Add skelton for Fireworks based devices')
+Fixes: fd6f4b0dc167('ALSA: bebob: Add skelton for BeBoB based devices')
+Fixes: 1a4e39c2e5ca('ALSA: oxfw: Move to its own directory')
+Fixes: 14ff6a094815('ALSA: dice: Move file to its own directory')
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/firewire/bebob/Makefile     |    2 +-
+ sound/firewire/dice/Makefile      |    2 +-
+ sound/firewire/fireworks/Makefile |    2 +-
+ sound/firewire/oxfw/Makefile      |    2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/sound/firewire/bebob/Makefile
++++ b/sound/firewire/bebob/Makefile
+@@ -1,4 +1,4 @@
+ snd-bebob-objs := bebob_command.o bebob_stream.o bebob_proc.o bebob_midi.o \
+                 bebob_pcm.o bebob_hwdep.o bebob_terratec.o bebob_yamaha.o \
+                 bebob_focusrite.o bebob_maudio.o bebob.o
+-obj-m += snd-bebob.o
++obj-$(CONFIG_SND_BEBOB) += snd-bebob.o
+--- a/sound/firewire/dice/Makefile
++++ b/sound/firewire/dice/Makefile
+@@ -1,3 +1,3 @@
+ snd-dice-objs := dice-transaction.o dice-stream.o dice-proc.o dice-midi.o \
+                dice-pcm.o dice-hwdep.o dice.o
+-obj-m += snd-dice.o
++obj-$(CONFIG_SND_DICE) += snd-dice.o
+--- a/sound/firewire/fireworks/Makefile
++++ b/sound/firewire/fireworks/Makefile
+@@ -1,4 +1,4 @@
+ snd-fireworks-objs := fireworks_transaction.o fireworks_command.o \
+                     fireworks_stream.o fireworks_proc.o fireworks_midi.o \
+                     fireworks_pcm.o fireworks_hwdep.o fireworks.o
+-obj-m += snd-fireworks.o
++obj-$(CONFIG_SND_FIREWORKS) += snd-fireworks.o
+--- a/sound/firewire/oxfw/Makefile
++++ b/sound/firewire/oxfw/Makefile
+@@ -1,3 +1,3 @@
+ snd-oxfw-objs := oxfw-command.o oxfw-stream.o oxfw-control.o oxfw-pcm.o \
+                oxfw-proc.o oxfw-midi.o oxfw-hwdep.o oxfw.o
+-obj-m += snd-oxfw.o
++obj-$(CONFIG_SND_OXFW) += snd-oxfw.o
diff --git a/queue-4.1/alsa-hda-add-intel-lewisburg-device-ids-audio.patch b/queue-4.1/alsa-hda-add-intel-lewisburg-device-ids-audio.patch
new file mode 100644 (file)
index 0000000..bbd33ed
--- /dev/null
@@ -0,0 +1,35 @@
+From 5cf92c8b3dc5da59e05dc81bdc069cedf6f38313 Mon Sep 17 00:00:00 2001
+From: Alexandra Yates <alexandra.yates@linux.intel.com>
+Date: Wed, 4 Nov 2015 15:56:09 -0800
+Subject: ALSA: hda - Add Intel Lewisburg device IDs Audio
+
+From: Alexandra Yates <alexandra.yates@linux.intel.com>
+
+commit 5cf92c8b3dc5da59e05dc81bdc069cedf6f38313 upstream.
+
+Adding Intel codename Lewisburg platform device IDs for audio.
+
+[rearranged the position by tiwai]
+
+Signed-off-by: Alexandra Yates <alexandra.yates@linux.intel.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1977,6 +1977,11 @@ static const struct pci_device_id azx_id
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       { PCI_DEVICE(0x8086, 0x8d21),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++      /* Lewisburg */
++      { PCI_DEVICE(0x8086, 0xa1f0),
++        .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++      { PCI_DEVICE(0x8086, 0xa270),
++        .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       /* Lynx Point-LP */
+       { PCI_DEVICE(0x8086, 0x9c20),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
diff --git a/queue-4.1/alsa-hda-apply-pin-fixup-for-hp-probook-6550b.patch b/queue-4.1/alsa-hda-apply-pin-fixup-for-hp-probook-6550b.patch
new file mode 100644 (file)
index 0000000..03fb0d3
--- /dev/null
@@ -0,0 +1,31 @@
+From c932b98c1e47312822d911c1bb76e81ef50e389c Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 4 Nov 2015 22:39:16 +0100
+Subject: ALSA: hda - Apply pin fixup for HP ProBook 6550b
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit c932b98c1e47312822d911c1bb76e81ef50e389c upstream.
+
+HP ProBook 6550b needs the same pin fixup applied to other HP B-series
+laptops with docks for making its headphone and dock headphone jacks
+working properly.  We just need to add the codec SSID to the list.
+
+Bugzilla: https://bugzilla.kernel.org/attachment.cgi?id=191971
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_sigmatel.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -702,6 +702,7 @@ static bool hp_bnb2011_with_dock(struct
+ static bool hp_blike_system(u32 subsystem_id)
+ {
+       switch (subsystem_id) {
++      case 0x103c1473: /* HP ProBook 6550b */
+       case 0x103c1520:
+       case 0x103c1521:
+       case 0x103c1523:
diff --git a/queue-4.1/alsa-hda-disable-64bit-address-for-creative-hda-controllers.patch b/queue-4.1/alsa-hda-disable-64bit-address-for-creative-hda-controllers.patch
new file mode 100644 (file)
index 0000000..a2a7cee
--- /dev/null
@@ -0,0 +1,54 @@
+From cadd16ea33a938d49aee99edd4758cc76048b399 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 27 Oct 2015 14:21:51 +0100
+Subject: ALSA: hda - Disable 64bit address for Creative HDA controllers
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit cadd16ea33a938d49aee99edd4758cc76048b399 upstream.
+
+We've had many reports that some Creative sound cards with CA0132
+don't work well.  Some reported that it starts working after reloading
+the module, while some reported it starts working when a 32bit kernel
+is used.  All these facts seem implying that the chip fails to
+communicate when the buffer is located in 64bit address.
+
+This patch addresses these issues by just adding AZX_DCAPS_NO_64BIT
+flag to the corresponding PCI entries.  I casually had a chance to
+test an SB Recon3D board, and indeed this seems helping.
+
+Although this hasn't been tested on all Creative devices, it's safer
+to assume that this restriction applies to the rest of them, too.  So
+the flag is applied to all Creative entries.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -329,6 +329,7 @@ enum {
+ #define AZX_DCAPS_PRESET_CTHDA \
+       (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB |\
++       AZX_DCAPS_NO_64BIT |\
+        AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
+ /*
+@@ -2156,11 +2157,13 @@ static const struct pci_device_id azx_id
+         .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+         .class_mask = 0xffffff,
+         .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++        AZX_DCAPS_NO_64BIT |
+         AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #else
+       /* this entry seems still valid -- i.e. without emu20kx chip */
+       { PCI_DEVICE(0x1102, 0x0009),
+         .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++        AZX_DCAPS_NO_64BIT |
+         AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #endif
+       /* CM8888 */
diff --git a/queue-4.1/ipmi-move-timer-init-to-before-irq-is-setup.patch b/queue-4.1/ipmi-move-timer-init-to-before-irq-is-setup.patch
new file mode 100644 (file)
index 0000000..06012fc
--- /dev/null
@@ -0,0 +1,75 @@
+From 27f972d3e00b50639deb4cc1392afaeb08d3cecc Mon Sep 17 00:00:00 2001
+From: Jan Stancek <jstancek@redhat.com>
+Date: Tue, 8 Dec 2015 13:57:51 -0500
+Subject: ipmi: move timer init to before irq is setup
+
+From: Jan Stancek <jstancek@redhat.com>
+
+commit 27f972d3e00b50639deb4cc1392afaeb08d3cecc upstream.
+
+We encountered a panic on boot in ipmi_si on a dell per320 due to an
+uninitialized timer as follows.
+
+static int smi_start_processing(void       *send_info,
+                                ipmi_smi_t intf)
+{
+        /* Try to claim any interrupts. */
+        if (new_smi->irq_setup)
+                new_smi->irq_setup(new_smi);
+
+ --> IRQ arrives here and irq handler tries to modify uninitialized timer
+
+    which triggers BUG_ON(!timer->function) in __mod_timer().
+
+ Call Trace:
+   <IRQ>
+   [<ffffffffa0532617>] start_new_msg+0x47/0x80 [ipmi_si]
+   [<ffffffffa053269e>] start_check_enables+0x4e/0x60 [ipmi_si]
+   [<ffffffffa0532bd8>] smi_event_handler+0x1e8/0x640 [ipmi_si]
+   [<ffffffff810f5584>] ? __rcu_process_callbacks+0x54/0x350
+   [<ffffffffa053327c>] si_irq_handler+0x3c/0x60 [ipmi_si]
+   [<ffffffff810efaf0>] handle_IRQ_event+0x60/0x170
+   [<ffffffff810f245e>] handle_edge_irq+0xde/0x180
+   [<ffffffff8100fc59>] handle_irq+0x49/0xa0
+   [<ffffffff8154643c>] do_IRQ+0x6c/0xf0
+   [<ffffffff8100ba53>] ret_from_intr+0x0/0x11
+
+        /* Set up the timer that drives the interface. */
+        setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+
+The following patch fixes the problem.
+
+To: Openipmi-developer@lists.sourceforge.net
+To: Corey Minyard <minyard@acm.org>
+CC: linux-kernel@vger.kernel.org
+
+Signed-off-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Tony Camuso <tcamuso@redhat.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_si_intf.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1218,14 +1218,14 @@ static int smi_start_processing(void
+       new_smi->intf = intf;
+-      /* Try to claim any interrupts. */
+-      if (new_smi->irq_setup)
+-              new_smi->irq_setup(new_smi);
+-
+       /* Set up the timer that drives the interface. */
+       setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+       smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
++      /* Try to claim any interrupts. */
++      if (new_smi->irq_setup)
++              new_smi->irq_setup(new_smi);
++
+       /*
+        * Check if the user forcefully enabled the daemon.
+        */
diff --git a/queue-4.1/ipmi-start-the-timer-and-thread-on-internal-msgs.patch b/queue-4.1/ipmi-start-the-timer-and-thread-on-internal-msgs.patch
new file mode 100644 (file)
index 0000000..a86f6ae
--- /dev/null
@@ -0,0 +1,196 @@
+From 0cfec916e86d881e209de4b4ae9959a6271e6660 Mon Sep 17 00:00:00 2001
+From: Corey Minyard <cminyard@mvista.com>
+Date: Sat, 5 Sep 2015 17:44:13 -0500
+Subject: ipmi: Start the timer and thread on internal msgs
+
+From: Corey Minyard <cminyard@mvista.com>
+
+commit 0cfec916e86d881e209de4b4ae9959a6271e6660 upstream.
+
+The timer and thread were not being started for internal messages,
+so in interrupt mode if something hung the timer would never go
+off and clean things up.  Factor out the internal message sending
+and start the timer for those messages, too.
+
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Gouji, Masayuki <gouji.masayuki@jp.fujitsu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_si_intf.c |   73 +++++++++++++++++++++++----------------
+ 1 file changed, 44 insertions(+), 29 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -404,18 +404,42 @@ static enum si_sm_result start_next_msg(
+       return rv;
+ }
+-static void start_check_enables(struct smi_info *smi_info)
++static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
++{
++      smi_info->last_timeout_jiffies = jiffies;
++      mod_timer(&smi_info->si_timer, new_val);
++      smi_info->timer_running = true;
++}
++
++/*
++ * Start a new message and (re)start the timer and thread.
++ */
++static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
++                        unsigned int size)
++{
++      smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
++
++      if (smi_info->thread)
++              wake_up_process(smi_info->thread);
++
++      smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
++}
++
++static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+ {
+       unsigned char msg[2];
+       msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+       msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+-      smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
++      if (start_timer)
++              start_new_msg(smi_info, msg, 2);
++      else
++              smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+       smi_info->si_state = SI_CHECKING_ENABLES;
+ }
+-static void start_clear_flags(struct smi_info *smi_info)
++static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+ {
+       unsigned char msg[3];
+@@ -424,7 +448,10 @@ static void start_clear_flags(struct smi
+       msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+       msg[2] = WDT_PRE_TIMEOUT_INT;
+-      smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
++      if (start_timer)
++              start_new_msg(smi_info, msg, 3);
++      else
++              smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+       smi_info->si_state = SI_CLEARING_FLAGS;
+ }
+@@ -434,10 +461,8 @@ static void start_getting_msg_queue(stru
+       smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+       smi_info->curr_msg->data_size = 2;
+-      smi_info->handlers->start_transaction(
+-              smi_info->si_sm,
+-              smi_info->curr_msg->data,
+-              smi_info->curr_msg->data_size);
++      start_new_msg(smi_info, smi_info->curr_msg->data,
++                    smi_info->curr_msg->data_size);
+       smi_info->si_state = SI_GETTING_MESSAGES;
+ }
+@@ -447,20 +472,11 @@ static void start_getting_events(struct
+       smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+       smi_info->curr_msg->data_size = 2;
+-      smi_info->handlers->start_transaction(
+-              smi_info->si_sm,
+-              smi_info->curr_msg->data,
+-              smi_info->curr_msg->data_size);
++      start_new_msg(smi_info, smi_info->curr_msg->data,
++                    smi_info->curr_msg->data_size);
+       smi_info->si_state = SI_GETTING_EVENTS;
+ }
+-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+-{
+-      smi_info->last_timeout_jiffies = jiffies;
+-      mod_timer(&smi_info->si_timer, new_val);
+-      smi_info->timer_running = true;
+-}
+-
+ /*
+  * When we have a situtaion where we run out of memory and cannot
+  * allocate messages, we just leave them in the BMC and run the system
+@@ -470,11 +486,11 @@ static void smi_mod_timer(struct smi_inf
+  * Note that we cannot just use disable_irq(), since the interrupt may
+  * be shared.
+  */
+-static inline bool disable_si_irq(struct smi_info *smi_info)
++static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+ {
+       if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+               smi_info->interrupt_disabled = true;
+-              start_check_enables(smi_info);
++              start_check_enables(smi_info, start_timer);
+               return true;
+       }
+       return false;
+@@ -484,7 +500,7 @@ static inline bool enable_si_irq(struct
+ {
+       if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+               smi_info->interrupt_disabled = false;
+-              start_check_enables(smi_info);
++              start_check_enables(smi_info, true);
+               return true;
+       }
+       return false;
+@@ -502,7 +518,7 @@ static struct ipmi_smi_msg *alloc_msg_ha
+       msg = ipmi_alloc_smi_msg();
+       if (!msg) {
+-              if (!disable_si_irq(smi_info))
++              if (!disable_si_irq(smi_info, true))
+                       smi_info->si_state = SI_NORMAL;
+       } else if (enable_si_irq(smi_info)) {
+               ipmi_free_smi_msg(msg);
+@@ -518,7 +534,7 @@ static void handle_flags(struct smi_info
+               /* Watchdog pre-timeout */
+               smi_inc_stat(smi_info, watchdog_pretimeouts);
+-              start_clear_flags(smi_info);
++              start_clear_flags(smi_info, true);
+               smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+               if (smi_info->intf)
+                       ipmi_smi_watchdog_pretimeout(smi_info->intf);
+@@ -870,8 +886,7 @@ static enum si_sm_result smi_event_handl
+                       msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+                       msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+-                      smi_info->handlers->start_transaction(
+-                              smi_info->si_sm, msg, 2);
++                      start_new_msg(smi_info, msg, 2);
+                       smi_info->si_state = SI_GETTING_FLAGS;
+                       goto restart;
+               }
+@@ -901,7 +916,7 @@ static enum si_sm_result smi_event_handl
+                * disable and messages disabled.
+                */
+               if (smi_info->supports_event_msg_buff || smi_info->irq) {
+-                      start_check_enables(smi_info);
++                      start_check_enables(smi_info, true);
+               } else {
+                       smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+                       if (!smi_info->curr_msg)
+@@ -3515,7 +3530,7 @@ static int try_smi_init(struct smi_info
+        * Start clearing the flags before we enable interrupts or the
+        * timer to avoid racing with the timer.
+        */
+-      start_clear_flags(new_smi);
++      start_clear_flags(new_smi, false);
+       /*
+        * IRQ is defined to be set when non-zero.  req_events will
+@@ -3817,7 +3832,7 @@ static void cleanup_one_si(struct smi_in
+               poll(to_clean);
+               schedule_timeout_uninterruptible(1);
+       }
+-      disable_si_irq(to_clean);
++      disable_si_irq(to_clean, false);
+       while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+               poll(to_clean);
+               schedule_timeout_uninterruptible(1);
diff --git a/queue-4.1/kvm-ppc-book3s-hv-prohibit-setting-illegal-transaction-state-in-msr.patch b/queue-4.1/kvm-ppc-book3s-hv-prohibit-setting-illegal-transaction-state-in-msr.patch
new file mode 100644 (file)
index 0000000..9e9f595
--- /dev/null
@@ -0,0 +1,51 @@
+From c20875a3e638e4a03e099b343ec798edd1af5cc6 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Thu, 12 Nov 2015 16:43:02 +1100
+Subject: KVM: PPC: Book3S HV: Prohibit setting illegal transaction state in MSR
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit c20875a3e638e4a03e099b343ec798edd1af5cc6 upstream.
+
+Currently it is possible for userspace (e.g. QEMU) to set a value
+for the MSR for a guest VCPU which has both of the TS bits set,
+which is an illegal combination.  The result of this is that when
+we execute a hrfid (hypervisor return from interrupt doubleword)
+instruction to enter the guest, the CPU will take a TM Bad Thing
+type of program interrupt (vector 0x700).
+
+Now, if PR KVM is configured in the kernel along with HV KVM, we
+actually handle this without crashing the host or giving hypervisor
+privilege to the guest; instead what happens is that we deliver a
+program interrupt to the guest, with SRR0 reflecting the address
+of the hrfid instruction and SRR1 containing the MSR value at that
+point.  If PR KVM is not configured in the kernel, then we try to
+run the host's program interrupt handler with the MMU set to the
+guest context, which almost certainly causes a host crash.
+
+This closes the hole by making kvmppc_set_msr_hv() check for the
+illegal combination and force the TS field to a safe value (00,
+meaning non-transactional).
+
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -210,6 +210,12 @@ static void kvmppc_core_vcpu_put_hv(stru
+ static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
+ {
++      /*
++       * Check for illegal transactional state bit combination
++       * and if we find it, force the TS field to a safe state.
++       */
++      if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
++              msr &= ~MSR_TS_MASK;
+       vcpu->arch.shregs.msr = msr;
+       kvmppc_end_cede(vcpu);
+ }
diff --git a/queue-4.1/kvm-svm-unconditionally-intercept-db.patch b/queue-4.1/kvm-svm-unconditionally-intercept-db.patch
new file mode 100644 (file)
index 0000000..1b640a0
--- /dev/null
@@ -0,0 +1,80 @@
+From cbdb967af3d54993f5814f1cee0ed311a055377d Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 10 Nov 2015 09:14:39 +0100
+Subject: KVM: svm: unconditionally intercept #DB
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit cbdb967af3d54993f5814f1cee0ed311a055377d upstream.
+
+This is needed to avoid the possibility that the guest triggers
+an infinite stream of #DB exceptions (CVE-2015-8104).
+
+VMX is not affected: because it does not save DR6 in the VMCS,
+it already intercepts #DB unconditionally.
+
+Reported-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c |   14 +++-----------
+ 1 file changed, 3 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1106,6 +1106,7 @@ static void init_vmcb(struct vcpu_svm *s
+       set_exception_intercept(svm, UD_VECTOR);
+       set_exception_intercept(svm, MC_VECTOR);
+       set_exception_intercept(svm, AC_VECTOR);
++      set_exception_intercept(svm, DB_VECTOR);
+       set_intercept(svm, INTERCEPT_INTR);
+       set_intercept(svm, INTERCEPT_NMI);
+@@ -1638,20 +1639,13 @@ static void svm_set_segment(struct kvm_v
+       mark_dirty(svm->vmcb, VMCB_SEG);
+ }
+-static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
++static void update_bp_intercept(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
+-      clr_exception_intercept(svm, DB_VECTOR);
+       clr_exception_intercept(svm, BP_VECTOR);
+-      if (svm->nmi_singlestep)
+-              set_exception_intercept(svm, DB_VECTOR);
+-
+       if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
+-              if (vcpu->guest_debug &
+-                  (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+-                      set_exception_intercept(svm, DB_VECTOR);
+               if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+                       set_exception_intercept(svm, BP_VECTOR);
+       } else
+@@ -1757,7 +1751,6 @@ static int db_interception(struct vcpu_s
+               if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
+                       svm->vmcb->save.rflags &=
+                               ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+-              update_db_bp_intercept(&svm->vcpu);
+       }
+       if (svm->vcpu.guest_debug &
+@@ -3751,7 +3744,6 @@ static void enable_nmi_window(struct kvm
+        */
+       svm->nmi_singlestep = true;
+       svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
+-      update_db_bp_intercept(vcpu);
+ }
+ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
+@@ -4367,7 +4359,7 @@ static struct kvm_x86_ops svm_x86_ops =
+       .vcpu_load = svm_vcpu_load,
+       .vcpu_put = svm_vcpu_put,
+-      .update_db_bp_intercept = update_db_bp_intercept,
++      .update_db_bp_intercept = update_bp_intercept,
+       .get_msr = svm_get_msr,
+       .set_msr = svm_set_msr,
+       .get_segment_base = svm_get_segment_base,
diff --git a/queue-4.1/kvm-vmx-fix-smep-and-smap-without-ept.patch b/queue-4.1/kvm-vmx-fix-smep-and-smap-without-ept.patch
new file mode 100644 (file)
index 0000000..0a36551
--- /dev/null
@@ -0,0 +1,59 @@
+From 656ec4a4928a3db7d16e5cb9bce351a478cfd3d5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Mon, 2 Nov 2015 22:20:00 +0100
+Subject: KVM: VMX: fix SMEP and SMAP without EPT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+
+commit 656ec4a4928a3db7d16e5cb9bce351a478cfd3d5 upstream.
+
+The comment in code had it mostly right, but we enable paging for
+emulated real mode regardless of EPT.
+
+Without EPT (which implies emulated real mode), secondary VCPUs won't
+start unless we disable SM[AE]P when the guest doesn't use paging.
+
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c |   19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3652,20 +3652,21 @@ static int vmx_set_cr4(struct kvm_vcpu *
+               if (!is_paging(vcpu)) {
+                       hw_cr4 &= ~X86_CR4_PAE;
+                       hw_cr4 |= X86_CR4_PSE;
+-                      /*
+-                       * SMEP/SMAP is disabled if CPU is in non-paging mode
+-                       * in hardware. However KVM always uses paging mode to
+-                       * emulate guest non-paging mode with TDP.
+-                       * To emulate this behavior, SMEP/SMAP needs to be
+-                       * manually disabled when guest switches to non-paging
+-                       * mode.
+-                       */
+-                      hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
+               } else if (!(cr4 & X86_CR4_PAE)) {
+                       hw_cr4 &= ~X86_CR4_PAE;
+               }
+       }
++      if (!enable_unrestricted_guest && !is_paging(vcpu))
++              /*
++               * SMEP/SMAP is disabled if CPU is in non-paging mode in
++               * hardware.  However KVM always uses paging mode without
++               * unrestricted guest.
++               * To emulate this behavior, SMEP/SMAP needs to be manually
++               * disabled when guest switches to non-paging mode.
++               */
++              hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
++
+       vmcs_writel(CR4_READ_SHADOW, cr4);
+       vmcs_writel(GUEST_CR4, hw_cr4);
+       return 0;
diff --git a/queue-4.1/kvm-x86-correctly-print-ac-in-traces.patch b/queue-4.1/kvm-x86-correctly-print-ac-in-traces.patch
new file mode 100644 (file)
index 0000000..5cfaef2
--- /dev/null
@@ -0,0 +1,31 @@
+From aba2f06c070f604e388cf77b1dcc7f4cf4577eb0 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 12 Nov 2015 16:42:18 +0100
+Subject: KVM: x86: correctly print #AC in traces
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit aba2f06c070f604e388cf77b1dcc7f4cf4577eb0 upstream.
+
+Poor #AC was so unimportant until a few days ago that we were
+not even tracing its name correctly.  But now it's all over
+the place.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/trace.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -250,7 +250,7 @@ TRACE_EVENT(kvm_inj_virq,
+ #define kvm_trace_sym_exc                                             \
+       EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),  \
+       EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),           \
+-      EXS(MF), EXS(MC)
++      EXS(MF), EXS(AC), EXS(MC)
+ /*
+  * Tracepoint for kvm interrupt injection:
diff --git a/queue-4.1/kvm-x86-expose-msr_tsc_aux-to-userspace.patch b/queue-4.1/kvm-x86-expose-msr_tsc_aux-to-userspace.patch
new file mode 100644 (file)
index 0000000..803232c
--- /dev/null
@@ -0,0 +1,54 @@
+From 9dbe6cf941a6fe82933aef565e4095fb10f65023 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 12 Nov 2015 14:49:17 +0100
+Subject: KVM: x86: expose MSR_TSC_AUX to userspace
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9dbe6cf941a6fe82933aef565e4095fb10f65023 upstream.
+
+If we do not do this, it is not properly saved and restored across
+migration.  Windows notices due to its self-protection mechanisms,
+and is very upset about it (blue screen of death).
+
+Cc: Radim Krcmar <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -940,7 +940,7 @@ static u32 msrs_to_save[] = {
+       MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
+ #endif
+       MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
+-      MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
++      MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
+ };
+ static unsigned num_msrs_to_save;
+@@ -4117,16 +4117,17 @@ static void kvm_init_msr_list(void)
+               /*
+                * Even MSRs that are valid in the host may not be exposed
+-               * to the guests in some cases.  We could work around this
+-               * in VMX with the generic MSR save/load machinery, but it
+-               * is not really worthwhile since it will really only
+-               * happen with nested virtualization.
++               * to the guests in some cases.
+                */
+               switch (msrs_to_save[i]) {
+               case MSR_IA32_BNDCFGS:
+                       if (!kvm_x86_ops->mpx_supported())
+                               continue;
+                       break;
++              case MSR_TSC_AUX:
++                      if (!kvm_x86_ops->rdtscp_supported())
++                              continue;
++                      break;
+               default:
+                       break;
+               }
diff --git a/queue-4.1/series b/queue-4.1/series
new file mode 100644 (file)
index 0000000..2f6d108
--- /dev/null
@@ -0,0 +1,19 @@
+x86-mpx-fix-instruction-decoder-condition.patch
+x86-signal-fix-restart_syscall-number-for-x32-tasks.patch
+xen-gntdev-grant-maps-should-not-be-subject-to-numa-balancing.patch
+x86-xen-don-t-reset-vcpu_info-on-a-cancelled-suspend.patch
+kvm-vmx-fix-smep-and-smap-without-ept.patch
+kvm-svm-unconditionally-intercept-db.patch
+kvm-ppc-book3s-hv-prohibit-setting-illegal-transaction-state-in-msr.patch
+kvm-x86-expose-msr_tsc_aux-to-userspace.patch
+kvm-x86-correctly-print-ac-in-traces.patch
+x86-reboot-quirks-add-imac10-1-to-pci_reboot_dmi_table.patch
+x86-boot-double-boot_heap_size-to-64kb.patch
+x86-mm-add-barriers-and-document-switch_mm-vs-flush-synchronization.patch
+x86-mm-improve-switch_mm-barrier-comments.patch
+ipmi-start-the-timer-and-thread-on-internal-msgs.patch
+ipmi-move-timer-init-to-before-irq-is-setup.patch
+alsa-hda-disable-64bit-address-for-creative-hda-controllers.patch
+alsa-hda-add-intel-lewisburg-device-ids-audio.patch
+alsa-hda-apply-pin-fixup-for-hp-probook-6550b.patch
+alsa-fireworks-bebob-oxfw-dice-enable-to-make-as-built-in.patch
diff --git a/queue-4.1/x86-boot-double-boot_heap_size-to-64kb.patch b/queue-4.1/x86-boot-double-boot_heap_size-to-64kb.patch
new file mode 100644 (file)
index 0000000..779c462
--- /dev/null
@@ -0,0 +1,43 @@
+From 8c31902cffc4d716450be549c66a67a8a3dd479c Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" <hjl.tools@gmail.com>
+Date: Mon, 4 Jan 2016 10:17:09 -0800
+Subject: x86/boot: Double BOOT_HEAP_SIZE to 64KB
+
+From: "H.J. Lu" <hjl.tools@gmail.com>
+
+commit 8c31902cffc4d716450be549c66a67a8a3dd479c upstream.
+
+When decompressing kernel image during x86 bootup, malloc memory
+for ELF program headers may run out of heap space, which leads
+to system halt.  This patch doubles BOOT_HEAP_SIZE to 64KB.
+
+Tested with 32-bit kernel which failed to boot without this patch.
+
+Signed-off-by: H.J. Lu <hjl.tools@gmail.com>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/boot.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -27,7 +27,7 @@
+ #define BOOT_HEAP_SIZE             0x400000
+ #else /* !CONFIG_KERNEL_BZIP2 */
+-#define BOOT_HEAP_SIZE        0x8000
++#define BOOT_HEAP_SIZE        0x10000
+ #endif /* !CONFIG_KERNEL_BZIP2 */
diff --git a/queue-4.1/x86-mm-add-barriers-and-document-switch_mm-vs-flush-synchronization.patch b/queue-4.1/x86-mm-add-barriers-and-document-switch_mm-vs-flush-synchronization.patch
new file mode 100644 (file)
index 0000000..3e240c6
--- /dev/null
@@ -0,0 +1,158 @@
+From 71b3c126e61177eb693423f2e18a1914205b165e Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Wed, 6 Jan 2016 12:21:01 -0800
+Subject: x86/mm: Add barriers and document switch_mm()-vs-flush synchronization
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 71b3c126e61177eb693423f2e18a1914205b165e upstream.
+
+When switch_mm() activates a new PGD, it also sets a bit that
+tells other CPUs that the PGD is in use so that TLB flush IPIs
+will be sent.  In order for that to work correctly, the bit
+needs to be visible prior to loading the PGD and therefore
+starting to fill the local TLB.
+
+Document all the barriers that make this work correctly and add
+a couple that were missing.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-mm@kvack.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mmu_context.h |   33 ++++++++++++++++++++++++++++++++-
+ arch/x86/mm/tlb.c                  |   29 ++++++++++++++++++++++++++---
+ 2 files changed, 58 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -104,8 +104,34 @@ static inline void switch_mm(struct mm_s
+ #endif
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+-              /* Re-load page tables */
++              /*
++               * Re-load page tables.
++               *
++               * This logic has an ordering constraint:
++               *
++               *  CPU 0: Write to a PTE for 'next'
++               *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
++               *  CPU 1: set bit 1 in next's mm_cpumask
++               *  CPU 1: load from the PTE that CPU 0 writes (implicit)
++               *
++               * We need to prevent an outcome in which CPU 1 observes
++               * the new PTE value and CPU 0 observes bit 1 clear in
++               * mm_cpumask.  (If that occurs, then the IPI will never
++               * be sent, and CPU 0's TLB will contain a stale entry.)
++               *
++               * The bad outcome can occur if either CPU's load is
++               * reordered before that CPU's store, so both CPUs much
++               * execute full barriers to prevent this from happening.
++               *
++               * Thus, switch_mm needs a full barrier between the
++               * store to mm_cpumask and any operation that could load
++               * from next->pgd.  This barrier synchronizes with
++               * remote TLB flushers.  Fortunately, load_cr3 is
++               * serializing and thus acts as a full barrier.
++               *
++               */
+               load_cr3(next->pgd);
++
+               trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+               /* Stop flush ipis for the previous mm */
+@@ -142,10 +168,15 @@ static inline void switch_mm(struct mm_s
+                        * schedule, protecting us from simultaneous changes.
+                        */
+                       cpumask_set_cpu(cpu, mm_cpumask(next));
++
+                       /*
+                        * We were in lazy tlb mode and leave_mm disabled
+                        * tlb flush IPI delivery. We must reload CR3
+                        * to make sure to use no freed page tables.
++                       *
++                       * As above, this is a barrier that forces
++                       * TLB repopulation to be ordered after the
++                       * store to mm_cpumask.
+                        */
+                       load_cr3(next->pgd);
+                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -160,7 +160,10 @@ void flush_tlb_current_task(void)
+       preempt_disable();
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
++
++      /* This is an implicit full barrier that synchronizes with switch_mm. */
+       local_flush_tlb();
++
+       trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
+       if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+@@ -187,17 +190,29 @@ void flush_tlb_mm_range(struct mm_struct
+       unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
+       preempt_disable();
+-      if (current->active_mm != mm)
++      if (current->active_mm != mm) {
++              /* Synchronize with switch_mm. */
++              smp_mb();
++
+               goto out;
++      }
+       if (!current->mm) {
+               leave_mm(smp_processor_id());
++
++              /* Synchronize with switch_mm. */
++              smp_mb();
++
+               goto out;
+       }
+       if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
+               base_pages_to_flush = (end - start) >> PAGE_SHIFT;
++      /*
++       * Both branches below are implicit full barriers (MOV to CR or
++       * INVLPG) that synchronize with switch_mm.
++       */
+       if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
+               base_pages_to_flush = TLB_FLUSH_ALL;
+               count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+@@ -227,10 +242,18 @@ void flush_tlb_page(struct vm_area_struc
+       preempt_disable();
+       if (current->active_mm == mm) {
+-              if (current->mm)
++              if (current->mm) {
++                      /*
++                       * Implicit full barrier (INVLPG) that synchronizes
++                       * with switch_mm.
++                       */
+                       __flush_tlb_one(start);
+-              else
++              } else {
+                       leave_mm(smp_processor_id());
++
++                      /* Synchronize with switch_mm. */
++                      smp_mb();
++              }
+       }
+       if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
diff --git a/queue-4.1/x86-mm-improve-switch_mm-barrier-comments.patch b/queue-4.1/x86-mm-improve-switch_mm-barrier-comments.patch
new file mode 100644 (file)
index 0000000..dea88fe
--- /dev/null
@@ -0,0 +1,67 @@
+From 4eaffdd5a5fe6ff9f95e1ab4de1ac904d5e0fa8b Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 12 Jan 2016 12:47:40 -0800
+Subject: x86/mm: Improve switch_mm() barrier comments
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 4eaffdd5a5fe6ff9f95e1ab4de1ac904d5e0fa8b upstream.
+
+My previous comments were still a bit confusing and there was a
+typo. Fix it up.
+
+Reported-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 71b3c126e611 ("x86/mm: Add barriers and document switch_mm()-vs-flush synchronization")
+Link: http://lkml.kernel.org/r/0a0b43cdcdd241c5faaaecfbcc91a155ddedc9a1.1452631609.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mmu_context.h |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -120,14 +120,16 @@ static inline void switch_mm(struct mm_s
+                * be sent, and CPU 0's TLB will contain a stale entry.)
+                *
+                * The bad outcome can occur if either CPU's load is
+-               * reordered before that CPU's store, so both CPUs much
++               * reordered before that CPU's store, so both CPUs must
+                * execute full barriers to prevent this from happening.
+                *
+                * Thus, switch_mm needs a full barrier between the
+                * store to mm_cpumask and any operation that could load
+-               * from next->pgd.  This barrier synchronizes with
+-               * remote TLB flushers.  Fortunately, load_cr3 is
+-               * serializing and thus acts as a full barrier.
++               * from next->pgd.  TLB fills are special and can happen
++               * due to instruction fetches or for no reason at all,
++               * and neither LOCK nor MFENCE orders them.
++               * Fortunately, load_cr3() is serializing and gives the
++               * ordering guarantee we need.
+                *
+                */
+               load_cr3(next->pgd);
+@@ -174,9 +176,8 @@ static inline void switch_mm(struct mm_s
+                        * tlb flush IPI delivery. We must reload CR3
+                        * to make sure to use no freed page tables.
+                        *
+-                       * As above, this is a barrier that forces
+-                       * TLB repopulation to be ordered after the
+-                       * store to mm_cpumask.
++                       * As above, load_cr3() is serializing and orders TLB
++                       * fills with respect to the mm_cpumask write.
+                        */
+                       load_cr3(next->pgd);
+                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
diff --git a/queue-4.1/x86-mpx-fix-instruction-decoder-condition.patch b/queue-4.1/x86-mpx-fix-instruction-decoder-condition.patch
new file mode 100644 (file)
index 0000000..e43867b
--- /dev/null
@@ -0,0 +1,64 @@
+From 8e8efe0379bd93e8219ca0fc6fa80b5dd85b09cb Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave.hansen@linux.intel.com>
+Date: Mon, 30 Nov 2015 16:31:13 -0800
+Subject: x86/mpx: Fix instruction decoder condition
+
+From: Dave Hansen <dave.hansen@linux.intel.com>
+
+commit 8e8efe0379bd93e8219ca0fc6fa80b5dd85b09cb upstream.
+
+MPX decodes instructions in order to tell which bounds register
+was violated.  Part of this decoding involves looking at the "REX
+prefix" which is a special instrucion prefix used to retrofit
+support for new registers in to old instructions.
+
+The X86_REX_*() macros are defined to return actual bit values:
+
+       #define X86_REX_R(rex) ((rex) & 4)
+
+*not* boolean values.  However, the MPX code was checking for
+them like they were booleans.  This might have led to us
+mis-decoding the "REX prefix" and giving false information out to
+userspace about bounds violations.  X86_REX_B() actually is bit 1,
+so this is really only broken for the X86_REX_X() case.
+
+Fix the conditionals up to tolerate the non-boolean values.
+
+Fixes: fcc7ffd67991 "x86, mpx: Decode MPX instruction to get bound violation information"
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: x86@kernel.org
+Cc: Dave Hansen <dave@sr71.net>
+Link: http://lkml.kernel.org/r/20151201003113.D800C1E0@viggo.jf.intel.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/mpx.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -120,19 +120,19 @@ static int get_reg_offset(struct insn *i
+       switch (type) {
+       case REG_TYPE_RM:
+               regno = X86_MODRM_RM(insn->modrm.value);
+-              if (X86_REX_B(insn->rex_prefix.value) == 1)
++              if (X86_REX_B(insn->rex_prefix.value))
+                       regno += 8;
+               break;
+       case REG_TYPE_INDEX:
+               regno = X86_SIB_INDEX(insn->sib.value);
+-              if (X86_REX_X(insn->rex_prefix.value) == 1)
++              if (X86_REX_X(insn->rex_prefix.value))
+                       regno += 8;
+               break;
+       case REG_TYPE_BASE:
+               regno = X86_SIB_BASE(insn->sib.value);
+-              if (X86_REX_B(insn->rex_prefix.value) == 1)
++              if (X86_REX_B(insn->rex_prefix.value))
+                       regno += 8;
+               break;
diff --git a/queue-4.1/x86-reboot-quirks-add-imac10-1-to-pci_reboot_dmi_table.patch b/queue-4.1/x86-reboot-quirks-add-imac10-1-to-pci_reboot_dmi_table.patch
new file mode 100644 (file)
index 0000000..5e6f3a3
--- /dev/null
@@ -0,0 +1,48 @@
+From 2f0c0b2d96b1205efb14347009748d786c2d9ba5 Mon Sep 17 00:00:00 2001
+From: Mario Kleiner <mario.kleiner.de@gmail.com>
+Date: Fri, 18 Dec 2015 20:24:06 +0100
+Subject: x86/reboot/quirks: Add iMac10,1 to pci_reboot_dmi_table[]
+
+From: Mario Kleiner <mario.kleiner.de@gmail.com>
+
+commit 2f0c0b2d96b1205efb14347009748d786c2d9ba5 upstream.
+
+Without the reboot=pci method, the iMac 10,1 simply
+hangs after printing "Restarting system" at the point
+when it should reboot. This fixes it.
+
+Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1450466646-26663-1-git-send-email-mario.kleiner.de@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/reboot.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -182,6 +182,14 @@ static struct dmi_system_id __initdata r
+                       DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+               },
+       },
++      {       /* Handle problems with rebooting on the iMac10,1. */
++              .callback = set_pci_reboot,
++              .ident = "Apple iMac10,1",
++              .matches = {
++                  DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++                  DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
++              },
++      },
+       /* ASRock */
+       {       /* Handle problems with rebooting on ASRock Q1900DC-ITX */
diff --git a/queue-4.1/x86-signal-fix-restart_syscall-number-for-x32-tasks.patch b/queue-4.1/x86-signal-fix-restart_syscall-number-for-x32-tasks.patch
new file mode 100644 (file)
index 0000000..0ca719d
--- /dev/null
@@ -0,0 +1,60 @@
+From 22eab1108781eff09961ae7001704f7bd8fb1dce Mon Sep 17 00:00:00 2001
+From: "Dmitry V. Levin" <ldv@altlinux.org>
+Date: Tue, 1 Dec 2015 00:54:36 +0300
+Subject: x86/signal: Fix restart_syscall number for x32 tasks
+
+From: "Dmitry V. Levin" <ldv@altlinux.org>
+
+commit 22eab1108781eff09961ae7001704f7bd8fb1dce upstream.
+
+When restarting a syscall with regs->ax == -ERESTART_RESTARTBLOCK,
+regs->ax is assigned to a restart_syscall number.  For x32 tasks, this
+syscall number must have __X32_SYSCALL_BIT set, otherwise it will be
+an x86_64 syscall number instead of a valid x32 syscall number. This
+issue has been there since the introduction of x32.
+
+Reported-by: strace/tests/restart_syscall.test
+Reported-and-tested-by: Elvira Khabirova <lineprinter0@gmail.com>
+Signed-off-by: Dmitry V. Levin <ldv@altlinux.org>
+Cc: Elvira Khabirova <lineprinter0@gmail.com>
+Link: http://lkml.kernel.org/r/20151130215436.GA25996@altlinux.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/signal.c |   17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -667,12 +667,15 @@ handle_signal(struct ksignal *ksig, stru
+       signal_setup_done(failed, ksig, stepping);
+ }
+-#ifdef CONFIG_X86_32
+-#define NR_restart_syscall    __NR_restart_syscall
+-#else /* !CONFIG_X86_32 */
+-#define NR_restart_syscall    \
+-      test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
+-#endif /* CONFIG_X86_32 */
++static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
++{
++#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
++      return __NR_restart_syscall;
++#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
++      return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
++              __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
++#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
++}
+ /*
+  * Note that 'init' is a special process: it doesn't get signals it doesn't
+@@ -701,7 +704,7 @@ static void do_signal(struct pt_regs *re
+                       break;
+               case -ERESTART_RESTARTBLOCK:
+-                      regs->ax = NR_restart_syscall;
++                      regs->ax = get_nr_restart_syscall(regs);
+                       regs->ip -= 2;
+                       break;
+               }
diff --git a/queue-4.1/x86-xen-don-t-reset-vcpu_info-on-a-cancelled-suspend.patch b/queue-4.1/x86-xen-don-t-reset-vcpu_info-on-a-cancelled-suspend.patch
new file mode 100644 (file)
index 0000000..3a49a4d
--- /dev/null
@@ -0,0 +1,36 @@
+From 6a1f513776b78c994045287073e55bae44ed9f8c Mon Sep 17 00:00:00 2001
+From: "Ouyang Zhaowei (Charles)" <ouyangzhaowei@huawei.com>
+Date: Wed, 6 May 2015 09:47:04 +0800
+Subject: x86/xen: don't reset vcpu_info on a cancelled suspend
+
+From: "Ouyang Zhaowei (Charles)" <ouyangzhaowei@huawei.com>
+
+commit 6a1f513776b78c994045287073e55bae44ed9f8c upstream.
+
+On a cancelled suspend the vcpu_info location does not change (it's
+still in the per-cpu area registered by xen_vcpu_setup()).  So do not
+call xen_hvm_init_shared_info() which would make the kernel think its
+back in the shared info.  With the wrong vcpu_info, events cannot be
+received and the domain will hang after a cancelled suspend.
+
+Signed-off-by: Charles Ouyang <ouyangzhaowei@huawei.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/suspend.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/xen/suspend.c
++++ b/arch/x86/xen/suspend.c
+@@ -32,7 +32,8 @@ static void xen_hvm_post_suspend(int sus
+ {
+ #ifdef CONFIG_XEN_PVHVM
+       int cpu;
+-      xen_hvm_init_shared_info();
++      if (!suspend_cancelled)
++          xen_hvm_init_shared_info();
+       xen_callback_vector();
+       xen_unplug_emulated_devices();
+       if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/queue-4.1/xen-gntdev-grant-maps-should-not-be-subject-to-numa-balancing.patch b/queue-4.1/xen-gntdev-grant-maps-should-not-be-subject-to-numa-balancing.patch
new file mode 100644 (file)
index 0000000..49279e8
--- /dev/null
@@ -0,0 +1,40 @@
+From 9c17d96500f78d7ecdb71ca6942830158bc75a2b Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Tue, 10 Nov 2015 15:10:33 -0500
+Subject: xen/gntdev: Grant maps should not be subject to NUMA balancing
+
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+
+commit 9c17d96500f78d7ecdb71ca6942830158bc75a2b upstream.
+
+Doing so will cause the grant to be unmapped and then, during
+fault handling, the fault to be mistakenly treated as NUMA hint
+fault.
+
+In addition, even if those maps could partcipate in NUMA
+balancing, it wouldn't provide any benefit since we are unable
+to determine physical page's node (even if/when VNUMA is
+implemented).
+
+Marking grant maps' VMAs as VM_IO will exclude them from being
+part of NUMA balancing.
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip
+       vma->vm_ops = &gntdev_vmops;
+-      vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
++      vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+       if (use_ptemod)
+               vma->vm_flags |= VM_DONTCOPY;