--- /dev/null
+From df4833886f91eea0d20e6e97066adab308625ef8 Mon Sep 17 00:00:00 2001
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Date: Sun, 18 Oct 2015 13:46:47 +0900
+Subject: ALSA: fireworks/bebob/oxfw/dice: enable to make as built-in
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+commit df4833886f91eea0d20e6e97066adab308625ef8 upstream.
+
+When committed to upstream, these four modules had wrong entries for
+Makefile. This forces them to be loadable modules even if they're set
+as built-in.
+
+This commit fixes this bug.
+
+Fixes: b5b04336015e('ALSA: fireworks: Add skelton for Fireworks based devices')
+Fixes: fd6f4b0dc167('ALSA: bebob: Add skelton for BeBoB based devices')
+Fixes: 1a4e39c2e5ca('ALSA: oxfw: Move to its own directory')
+Fixes: 14ff6a094815('ALSA: dice: Move file to its own directory')
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/firewire/bebob/Makefile | 2 +-
+ sound/firewire/dice/Makefile | 2 +-
+ sound/firewire/fireworks/Makefile | 2 +-
+ sound/firewire/oxfw/Makefile | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/sound/firewire/bebob/Makefile
++++ b/sound/firewire/bebob/Makefile
+@@ -1,4 +1,4 @@
+ snd-bebob-objs := bebob_command.o bebob_stream.o bebob_proc.o bebob_midi.o \
+ bebob_pcm.o bebob_hwdep.o bebob_terratec.o bebob_yamaha.o \
+ bebob_focusrite.o bebob_maudio.o bebob.o
+-obj-m += snd-bebob.o
++obj-$(CONFIG_SND_BEBOB) += snd-bebob.o
+--- a/sound/firewire/dice/Makefile
++++ b/sound/firewire/dice/Makefile
+@@ -1,3 +1,3 @@
+ snd-dice-objs := dice-transaction.o dice-stream.o dice-proc.o dice-midi.o \
+ dice-pcm.o dice-hwdep.o dice.o
+-obj-m += snd-dice.o
++obj-$(CONFIG_SND_DICE) += snd-dice.o
+--- a/sound/firewire/fireworks/Makefile
++++ b/sound/firewire/fireworks/Makefile
+@@ -1,4 +1,4 @@
+ snd-fireworks-objs := fireworks_transaction.o fireworks_command.o \
+ fireworks_stream.o fireworks_proc.o fireworks_midi.o \
+ fireworks_pcm.o fireworks_hwdep.o fireworks.o
+-obj-m += snd-fireworks.o
++obj-$(CONFIG_SND_FIREWORKS) += snd-fireworks.o
+--- a/sound/firewire/oxfw/Makefile
++++ b/sound/firewire/oxfw/Makefile
+@@ -1,3 +1,3 @@
+ snd-oxfw-objs := oxfw-command.o oxfw-stream.o oxfw-control.o oxfw-pcm.o \
+ oxfw-proc.o oxfw-midi.o oxfw-hwdep.o oxfw.o
+-obj-m += snd-oxfw.o
++obj-$(CONFIG_SND_OXFW) += snd-oxfw.o
--- /dev/null
+From 5cf92c8b3dc5da59e05dc81bdc069cedf6f38313 Mon Sep 17 00:00:00 2001
+From: Alexandra Yates <alexandra.yates@linux.intel.com>
+Date: Wed, 4 Nov 2015 15:56:09 -0800
+Subject: ALSA: hda - Add Intel Lewisburg device IDs Audio
+
+From: Alexandra Yates <alexandra.yates@linux.intel.com>
+
+commit 5cf92c8b3dc5da59e05dc81bdc069cedf6f38313 upstream.
+
+Adding Intel codename Lewisburg platform device IDs for audio.
+
+[rearranged the position by tiwai]
+
+Signed-off-by: Alexandra Yates <alexandra.yates@linux.intel.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2105,6 +2105,11 @@ static const struct pci_device_id azx_id
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ { PCI_DEVICE(0x8086, 0x8d21),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++ /* Lewisburg */
++ { PCI_DEVICE(0x8086, 0xa1f0),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++ { PCI_DEVICE(0x8086, 0xa270),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ /* Lynx Point-LP */
+ { PCI_DEVICE(0x8086, 0x9c20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
--- /dev/null
+From c932b98c1e47312822d911c1bb76e81ef50e389c Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 4 Nov 2015 22:39:16 +0100
+Subject: ALSA: hda - Apply pin fixup for HP ProBook 6550b
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit c932b98c1e47312822d911c1bb76e81ef50e389c upstream.
+
+HP ProBook 6550b needs the same pin fixup applied to other HP B-series
+laptops with docks for making its headphone and dock headphone jacks
+working properly. We just need to add the codec SSID to the list.
+
+Bugzilla: https://bugzilla.kernel.org/attachment.cgi?id=191971
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_sigmatel.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -702,6 +702,7 @@ static bool hp_bnb2011_with_dock(struct
+ static bool hp_blike_system(u32 subsystem_id)
+ {
+ switch (subsystem_id) {
++ case 0x103c1473: /* HP ProBook 6550b */
+ case 0x103c1520:
+ case 0x103c1521:
+ case 0x103c1523:
--- /dev/null
+From cadd16ea33a938d49aee99edd4758cc76048b399 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 27 Oct 2015 14:21:51 +0100
+Subject: ALSA: hda - Disable 64bit address for Creative HDA controllers
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit cadd16ea33a938d49aee99edd4758cc76048b399 upstream.
+
+We've had many reports that some Creative sound cards with CA0132
+don't work well. Some reported that it starts working after reloading
+the module, while some reported it starts working when a 32bit kernel
+is used. All these facts seem implying that the chip fails to
+communicate when the buffer is located in 64bit address.
+
+This patch addresses these issues by just adding AZX_DCAPS_NO_64BIT
+flag to the corresponding PCI entries. I casually had a chance to
+test an SB Recon3D board, and indeed this seems helping.
+
+Although this hasn't been tested on all Creative devices, it's safer
+to assume that this restriction applies to the rest of them, too. So
+the flag is applied to all Creative entries.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -334,6 +334,7 @@ enum {
+
+ #define AZX_DCAPS_PRESET_CTHDA \
+ (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB |\
++ AZX_DCAPS_NO_64BIT |\
+ AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
+
+ /*
+@@ -2284,11 +2285,13 @@ static const struct pci_device_id azx_id
+ .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ .class_mask = 0xffffff,
+ .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++ AZX_DCAPS_NO_64BIT |
+ AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #else
+ /* this entry seems still valid -- i.e. without emu20kx chip */
+ { PCI_DEVICE(0x1102, 0x0009),
+ .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++ AZX_DCAPS_NO_64BIT |
+ AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #endif
+ /* CM8888 */
--- /dev/null
+From de1ab6af5c3d92c0a031083962a7ff270cf301b7 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 2 Nov 2015 17:35:34 +0100
+Subject: ALSA: hda - Fix lost 4k BDL boundary workaround
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit de1ab6af5c3d92c0a031083962a7ff270cf301b7 upstream.
+
+During the migration to HDA core code, we lost the workaround for 4k
+BDL boundary. The flag exists in the new hdac_bus, but it's never
+set. This resulted in the sudden sound stall on some controllers that
+require this workaround like Creative Recon3D.
+
+This patch fixes the issue by setting the flag for such controllers
+properly.
+
+Fixes: ccc98865aa44 ('ALSA: hda - Migrate more hdac_stream codes')
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_controller.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1059,6 +1059,9 @@ int azx_bus_init(struct azx *chip, const
+ bus->needs_damn_long_delay = 1;
+ }
+
++ if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
++ bus->core.align_bdle_4k = true;
++
+ /* AMD chipsets often cause the communication stalls upon certain
+ * sequence like the pin-detection. It seems that forcing the synced
+ * access works around the stall. Grrr...
--- /dev/null
+From 6ed1131fe196ad7ffc13acc1a1eadc08a1db0303 Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Mon, 26 Oct 2015 15:37:39 +0800
+Subject: ALSA: hda/realtek - Dell XPS one ALC3260 speaker no sound after resume back
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 6ed1131fe196ad7ffc13acc1a1eadc08a1db0303 upstream.
+
+This machine had I2S codec for speaker output.
+It need to refill the I2S codec initial verb after resume back.
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Reported-and-tested-by: George Gugulea <gugulea@gmail.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4596,6 +4596,7 @@ enum {
+ ALC292_FIXUP_DELL_E7X,
+ ALC292_FIXUP_DISABLE_AAMIX,
+ ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC275_FIXUP_DELL_XPS,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5165,6 +5166,17 @@ static const struct hda_fixup alc269_fix
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE
+ },
++ [ALC275_FIXUP_DELL_XPS] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* Enables internal speaker */
++ {0x20, AC_VERB_SET_COEF_INDEX, 0x1f},
++ {0x20, AC_VERB_SET_PROC_COEF, 0x00c0},
++ {0x20, AC_VERB_SET_COEF_INDEX, 0x30},
++ {0x20, AC_VERB_SET_PROC_COEF, 0x00b1},
++ {}
++ }
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5179,6 +5191,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
++ SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+ SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
+ SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
+ SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
--- /dev/null
+From 27f972d3e00b50639deb4cc1392afaeb08d3cecc Mon Sep 17 00:00:00 2001
+From: Jan Stancek <jstancek@redhat.com>
+Date: Tue, 8 Dec 2015 13:57:51 -0500
+Subject: ipmi: move timer init to before irq is setup
+
+From: Jan Stancek <jstancek@redhat.com>
+
+commit 27f972d3e00b50639deb4cc1392afaeb08d3cecc upstream.
+
+We encountered a panic on boot in ipmi_si on a dell per320 due to an
+uninitialized timer as follows.
+
+static int smi_start_processing(void *send_info,
+ ipmi_smi_t intf)
+{
+ /* Try to claim any interrupts. */
+ if (new_smi->irq_setup)
+ new_smi->irq_setup(new_smi);
+
+ --> IRQ arrives here and irq handler tries to modify uninitialized timer
+
+ which triggers BUG_ON(!timer->function) in __mod_timer().
+
+ Call Trace:
+ <IRQ>
+ [<ffffffffa0532617>] start_new_msg+0x47/0x80 [ipmi_si]
+ [<ffffffffa053269e>] start_check_enables+0x4e/0x60 [ipmi_si]
+ [<ffffffffa0532bd8>] smi_event_handler+0x1e8/0x640 [ipmi_si]
+ [<ffffffff810f5584>] ? __rcu_process_callbacks+0x54/0x350
+ [<ffffffffa053327c>] si_irq_handler+0x3c/0x60 [ipmi_si]
+ [<ffffffff810efaf0>] handle_IRQ_event+0x60/0x170
+ [<ffffffff810f245e>] handle_edge_irq+0xde/0x180
+ [<ffffffff8100fc59>] handle_irq+0x49/0xa0
+ [<ffffffff8154643c>] do_IRQ+0x6c/0xf0
+ [<ffffffff8100ba53>] ret_from_intr+0x0/0x11
+
+ /* Set up the timer that drives the interface. */
+ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+
+The following patch fixes the problem.
+
+To: Openipmi-developer@lists.sourceforge.net
+To: Corey Minyard <minyard@acm.org>
+CC: linux-kernel@vger.kernel.org
+
+Signed-off-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Tony Camuso <tcamuso@redhat.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_si_intf.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1223,14 +1223,14 @@ static int smi_start_processing(void
+
+ new_smi->intf = intf;
+
+- /* Try to claim any interrupts. */
+- if (new_smi->irq_setup)
+- new_smi->irq_setup(new_smi);
+-
+ /* Set up the timer that drives the interface. */
+ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+
++ /* Try to claim any interrupts. */
++ if (new_smi->irq_setup)
++ new_smi->irq_setup(new_smi);
++
+ /*
+ * Check if the user forcefully enabled the daemon.
+ */
--- /dev/null
+From 0cfec916e86d881e209de4b4ae9959a6271e6660 Mon Sep 17 00:00:00 2001
+From: Corey Minyard <cminyard@mvista.com>
+Date: Sat, 5 Sep 2015 17:44:13 -0500
+Subject: ipmi: Start the timer and thread on internal msgs
+
+From: Corey Minyard <cminyard@mvista.com>
+
+commit 0cfec916e86d881e209de4b4ae9959a6271e6660 upstream.
+
+The timer and thread were not being started for internal messages,
+so in interrupt mode if something hung the timer would never go
+off and clean things up. Factor out the internal message sending
+and start the timer for those messages, too.
+
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Gouji, Masayuki <gouji.masayuki@jp.fujitsu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_si_intf.c | 73 +++++++++++++++++++++++----------------
+ 1 file changed, 44 insertions(+), 29 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(
+ return rv;
+ }
+
+-static void start_check_enables(struct smi_info *smi_info)
++static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
++{
++ smi_info->last_timeout_jiffies = jiffies;
++ mod_timer(&smi_info->si_timer, new_val);
++ smi_info->timer_running = true;
++}
++
++/*
++ * Start a new message and (re)start the timer and thread.
++ */
++static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
++ unsigned int size)
++{
++ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
++
++ if (smi_info->thread)
++ wake_up_process(smi_info->thread);
++
++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
++}
++
++static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+ {
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
++ if (start_timer)
++ start_new_msg(smi_info, msg, 2);
++ else
++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_CHECKING_ENABLES;
+ }
+
+-static void start_clear_flags(struct smi_info *smi_info)
++static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+ {
+ unsigned char msg[3];
+
+@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
++ if (start_timer)
++ start_new_msg(smi_info, msg, 3);
++ else
++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_CLEARING_FLAGS;
+ }
+
+@@ -442,10 +469,8 @@ static void start_getting_msg_queue(stru
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+- smi_info->handlers->start_transaction(
+- smi_info->si_sm,
+- smi_info->curr_msg->data,
+- smi_info->curr_msg->data_size);
++ start_new_msg(smi_info, smi_info->curr_msg->data,
++ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_MESSAGES;
+ }
+
+@@ -455,20 +480,11 @@ static void start_getting_events(struct
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+- smi_info->handlers->start_transaction(
+- smi_info->si_sm,
+- smi_info->curr_msg->data,
+- smi_info->curr_msg->data_size);
++ start_new_msg(smi_info, smi_info->curr_msg->data,
++ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+ }
+
+-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+-{
+- smi_info->last_timeout_jiffies = jiffies;
+- mod_timer(&smi_info->si_timer, new_val);
+- smi_info->timer_running = true;
+-}
+-
+ /*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_inf
+ * Note that we cannot just use disable_irq(), since the interrupt may
+ * be shared.
+ */
+-static inline bool disable_si_irq(struct smi_info *smi_info)
++static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+ {
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ smi_info->interrupt_disabled = true;
+- start_check_enables(smi_info);
++ start_check_enables(smi_info, start_timer);
+ return true;
+ }
+ return false;
+@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct
+ {
+ if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+ smi_info->interrupt_disabled = false;
+- start_check_enables(smi_info);
++ start_check_enables(smi_info, true);
+ return true;
+ }
+ return false;
+@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_ha
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+- if (!disable_si_irq(smi_info))
++ if (!disable_si_irq(smi_info, true))
+ smi_info->si_state = SI_NORMAL;
+ } else if (enable_si_irq(smi_info)) {
+ ipmi_free_smi_msg(msg);
+@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info
+ /* Watchdog pre-timeout */
+ smi_inc_stat(smi_info, watchdog_pretimeouts);
+
+- start_clear_flags(smi_info);
++ start_clear_flags(smi_info, true);
+ smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ if (smi_info->intf)
+ ipmi_smi_watchdog_pretimeout(smi_info->intf);
+@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handl
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+- smi_info->handlers->start_transaction(
+- smi_info->si_sm, msg, 2);
++ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
+@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handl
+ * disable and messages disabled.
+ */
+ if (smi_info->supports_event_msg_buff || smi_info->irq) {
+- start_check_enables(smi_info);
++ start_check_enables(smi_info, true);
+ } else {
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+@@ -3613,7 +3628,7 @@ static int try_smi_init(struct smi_info
+ * Start clearing the flags before we enable interrupts or the
+ * timer to avoid racing with the timer.
+ */
+- start_clear_flags(new_smi);
++ start_clear_flags(new_smi, false);
+
+ /*
+ * IRQ is defined to be set when non-zero. req_events will
+@@ -3908,7 +3923,7 @@ static void cleanup_one_si(struct smi_in
+ poll(to_clean);
+ schedule_timeout_uninterruptible(1);
+ }
+- disable_si_irq(to_clean);
++ disable_si_irq(to_clean, false);
+ while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+ poll(to_clean);
+ schedule_timeout_uninterruptible(1);
--- /dev/null
+From f74f2e2e26199f695ca3df94f29e9ab7cb707ea4 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Tue, 3 Nov 2015 16:03:30 +1100
+Subject: KVM: PPC: Book3S HV: Don't dynamically split core when already split
+
+From: Paul Mackerras <paulus@samba.org>
+
+commit f74f2e2e26199f695ca3df94f29e9ab7cb707ea4 upstream.
+
+In static micro-threading modes, the dynamic micro-threading code
+is supposed to be disabled, because subcores can't make independent
+decisions about what micro-threading mode to put the core in - there is
+only one micro-threading mode for the whole core. The code that
+implements dynamic micro-threading checks for this, except that the
+check was missed in one case. This means that it is possible for a
+subcore in static 2-way micro-threading mode to try to put the core
+into 4-way micro-threading mode, which usually leads to stuck CPUs,
+spinlock lockups, and other stalls in the host.
+
+The problem was in the can_split_piggybacked_subcores() function, which
+should always return false if the system is in a static micro-threading
+mode. This fixes the problem by making can_split_piggybacked_subcores()
+use subcore_config_ok() for its checks, as subcore_config_ok() includes
+the necessary check for the static micro-threading modes.
+
+Credit to Gautham Shenoy for working out that the reason for the hangs
+and stalls we were seeing was that we were trying to do dynamic 4-way
+micro-threading while we were in static 2-way mode.
+
+Fixes: b4deba5c41e9
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2019,7 +2019,7 @@ static bool can_split_piggybacked_subcor
+ return false;
+ n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
+ }
+- if (n_subcores > 3 || large_sub < 0)
++ if (large_sub < 0 || !subcore_config_ok(n_subcores + 1, 2))
+ return false;
+
+ /*
--- /dev/null
+From c20875a3e638e4a03e099b343ec798edd1af5cc6 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Thu, 12 Nov 2015 16:43:02 +1100
+Subject: KVM: PPC: Book3S HV: Prohibit setting illegal transaction state in MSR
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit c20875a3e638e4a03e099b343ec798edd1af5cc6 upstream.
+
+Currently it is possible for userspace (e.g. QEMU) to set a value
+for the MSR for a guest VCPU which has both of the TS bits set,
+which is an illegal combination. The result of this is that when
+we execute a hrfid (hypervisor return from interrupt doubleword)
+instruction to enter the guest, the CPU will take a TM Bad Thing
+type of program interrupt (vector 0x700).
+
+Now, if PR KVM is configured in the kernel along with HV KVM, we
+actually handle this without crashing the host or giving hypervisor
+privilege to the guest; instead what happens is that we deliver a
+program interrupt to the guest, with SRR0 reflecting the address
+of the hrfid instruction and SRR1 containing the MSR value at that
+point. If PR KVM is not configured in the kernel, then we try to
+run the host's program interrupt handler with the MMU set to the
+guest context, which almost certainly causes a host crash.
+
+This closes the hole by making kvmppc_set_msr_hv() check for the
+illegal combination and force the TS field to a safe value (00,
+meaning non-transactional).
+
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(stru
+
+ static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
+ {
++ /*
++ * Check for illegal transactional state bit combination
++ * and if we find it, force the TS field to a safe state.
++ */
++ if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
++ msr &= ~MSR_TS_MASK;
+ vcpu->arch.shregs.msr = msr;
+ kvmppc_end_cede(vcpu);
+ }
--- /dev/null
+From cbdb967af3d54993f5814f1cee0ed311a055377d Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 10 Nov 2015 09:14:39 +0100
+Subject: KVM: svm: unconditionally intercept #DB
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit cbdb967af3d54993f5814f1cee0ed311a055377d upstream.
+
+This is needed to avoid the possibility that the guest triggers
+an infinite stream of #DB exceptions (CVE-2015-8104).
+
+VMX is not affected: because it does not save DR6 in the VMCS,
+it already intercepts #DB unconditionally.
+
+Reported-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c | 14 +++-----------
+ 1 file changed, 3 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1108,6 +1108,7 @@ static void init_vmcb(struct vcpu_svm *s
+ set_exception_intercept(svm, UD_VECTOR);
+ set_exception_intercept(svm, MC_VECTOR);
+ set_exception_intercept(svm, AC_VECTOR);
++ set_exception_intercept(svm, DB_VECTOR);
+
+ set_intercept(svm, INTERCEPT_INTR);
+ set_intercept(svm, INTERCEPT_NMI);
+@@ -1642,20 +1643,13 @@ static void svm_set_segment(struct kvm_v
+ mark_dirty(svm->vmcb, VMCB_SEG);
+ }
+
+-static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
++static void update_bp_intercept(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+- clr_exception_intercept(svm, DB_VECTOR);
+ clr_exception_intercept(svm, BP_VECTOR);
+
+- if (svm->nmi_singlestep)
+- set_exception_intercept(svm, DB_VECTOR);
+-
+ if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
+- if (vcpu->guest_debug &
+- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+- set_exception_intercept(svm, DB_VECTOR);
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ set_exception_intercept(svm, BP_VECTOR);
+ } else
+@@ -1761,7 +1755,6 @@ static int db_interception(struct vcpu_s
+ if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
+ svm->vmcb->save.rflags &=
+ ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+- update_db_bp_intercept(&svm->vcpu);
+ }
+
+ if (svm->vcpu.guest_debug &
+@@ -3761,7 +3754,6 @@ static void enable_nmi_window(struct kvm
+ */
+ svm->nmi_singlestep = true;
+ svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
+- update_db_bp_intercept(vcpu);
+ }
+
+ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
+@@ -4383,7 +4375,7 @@ static struct kvm_x86_ops svm_x86_ops =
+ .vcpu_load = svm_vcpu_load,
+ .vcpu_put = svm_vcpu_put,
+
+- .update_db_bp_intercept = update_db_bp_intercept,
++ .update_db_bp_intercept = update_bp_intercept,
+ .get_msr = svm_get_msr,
+ .set_msr = svm_set_msr,
+ .get_segment_base = svm_get_segment_base,
--- /dev/null
+From 656ec4a4928a3db7d16e5cb9bce351a478cfd3d5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Mon, 2 Nov 2015 22:20:00 +0100
+Subject: KVM: VMX: fix SMEP and SMAP without EPT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+
+commit 656ec4a4928a3db7d16e5cb9bce351a478cfd3d5 upstream.
+
+The comment in code had it mostly right, but we enable paging for
+emulated real mode regardless of EPT.
+
+Without EPT (which implies emulated real mode), secondary VCPUs won't
+start unless we disable SM[AE]P when the guest doesn't use paging.
+
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3644,20 +3644,21 @@ static int vmx_set_cr4(struct kvm_vcpu *
+ if (!is_paging(vcpu)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ hw_cr4 |= X86_CR4_PSE;
+- /*
+- * SMEP/SMAP is disabled if CPU is in non-paging mode
+- * in hardware. However KVM always uses paging mode to
+- * emulate guest non-paging mode with TDP.
+- * To emulate this behavior, SMEP/SMAP needs to be
+- * manually disabled when guest switches to non-paging
+- * mode.
+- */
+- hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
+ } else if (!(cr4 & X86_CR4_PAE)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ }
+ }
+
++ if (!enable_unrestricted_guest && !is_paging(vcpu))
++ /*
++ * SMEP/SMAP is disabled if CPU is in non-paging mode in
++ * hardware. However KVM always uses paging mode without
++ * unrestricted guest.
++ * To emulate this behavior, SMEP/SMAP needs to be manually
++ * disabled when guest switches to non-paging mode.
++ */
++ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
++
+ vmcs_writel(CR4_READ_SHADOW, cr4);
+ vmcs_writel(GUEST_CR4, hw_cr4);
+ return 0;
--- /dev/null
+From aba2f06c070f604e388cf77b1dcc7f4cf4577eb0 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 12 Nov 2015 16:42:18 +0100
+Subject: KVM: x86: correctly print #AC in traces
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit aba2f06c070f604e388cf77b1dcc7f4cf4577eb0 upstream.
+
+Poor #AC was so unimportant until a few days ago that we were
+not even tracing its name correctly. But now it's all over
+the place.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -250,7 +250,7 @@ TRACE_EVENT(kvm_inj_virq,
+ #define kvm_trace_sym_exc \
+ EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
+ EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
+- EXS(MF), EXS(MC)
++ EXS(MF), EXS(AC), EXS(MC)
+
+ /*
+ * Tracepoint for kvm interrupt injection:
--- /dev/null
+From 9dbe6cf941a6fe82933aef565e4095fb10f65023 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 12 Nov 2015 14:49:17 +0100
+Subject: KVM: x86: expose MSR_TSC_AUX to userspace
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9dbe6cf941a6fe82933aef565e4095fb10f65023 upstream.
+
+If we do not do this, it is not properly saved and restored across
+migration. Windows notices due to its self-protection mechanisms,
+and is very upset about it (blue screen of death).
+
+Cc: Radim Krcmar <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -942,7 +942,7 @@ static u32 msrs_to_save[] = {
+ MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
+ #endif
+ MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
+- MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
++ MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
+ };
+
+ static unsigned num_msrs_to_save;
+@@ -3847,16 +3847,17 @@ static void kvm_init_msr_list(void)
+
+ /*
+ * Even MSRs that are valid in the host may not be exposed
+- * to the guests in some cases. We could work around this
+- * in VMX with the generic MSR save/load machinery, but it
+- * is not really worthwhile since it will really only
+- * happen with nested virtualization.
++ * to the guests in some cases.
+ */
+ switch (msrs_to_save[i]) {
+ case MSR_IA32_BNDCFGS:
+ if (!kvm_x86_ops->mpx_supported())
+ continue;
+ break;
++ case MSR_TSC_AUX:
++ if (!kvm_x86_ops->rdtscp_supported())
++ continue;
++ break;
+ default:
+ break;
+ }
--- /dev/null
+x86-smpboot-re-enable-init_udelay-0-by-default-on-modern-cpus.patch
+x86-mpx-fix-instruction-decoder-condition.patch
+x86-signal-fix-restart_syscall-number-for-x32-tasks.patch
+x86-paravirt-prevent-rtc_cmos-platform-device-init-on-pv-guests.patch
+x86-mce-ensure-offline-cpus-don-t-participate-in-rendezvous-process.patch
+xen-gntdev-grant-maps-should-not-be-subject-to-numa-balancing.patch
+x86-xen-don-t-reset-vcpu_info-on-a-cancelled-suspend.patch
+kvm-vmx-fix-smep-and-smap-without-ept.patch
+kvm-ppc-book3s-hv-don-t-dynamically-split-core-when-already-split.patch
+kvm-svm-unconditionally-intercept-db.patch
+kvm-ppc-book3s-hv-prohibit-setting-illegal-transaction-state-in-msr.patch
+kvm-x86-expose-msr_tsc_aux-to-userspace.patch
+kvm-x86-correctly-print-ac-in-traces.patch
+x86-reboot-quirks-add-imac10-1-to-pci_reboot_dmi_table.patch
+x86-boot-double-boot_heap_size-to-64kb.patch
+x86-mm-add-barriers-and-document-switch_mm-vs-flush-synchronization.patch
+x86-mm-improve-switch_mm-barrier-comments.patch
+timers-use-proper-base-migration-in-add_timer_on.patch
+ipmi-start-the-timer-and-thread-on-internal-msgs.patch
+ipmi-move-timer-init-to-before-irq-is-setup.patch
+alsa-hda-realtek-dell-xps-one-alc3260-speaker-no-sound-after-resume-back.patch
+alsa-hda-disable-64bit-address-for-creative-hda-controllers.patch
+alsa-hda-fix-lost-4k-bdl-boundary-workaround.patch
+alsa-hda-add-intel-lewisburg-device-ids-audio.patch
+alsa-hda-apply-pin-fixup-for-hp-probook-6550b.patch
+alsa-fireworks-bebob-oxfw-dice-enable-to-make-as-built-in.patch
--- /dev/null
+From 22b886dd1018093920c4250dee2a9a3cb7cff7b8 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 4 Nov 2015 12:15:33 -0500
+Subject: timers: Use proper base migration in add_timer_on()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 22b886dd1018093920c4250dee2a9a3cb7cff7b8 upstream.
+
+Regardless of the previous CPU a timer was on, add_timer_on()
+currently simply sets timer->flags to the new CPU. As the caller must
+be seeing the timer as idle, this is locally fine, but the timer
+leaving the old base while unlocked can lead to race conditions as
+follows.
+
+Let's say timer was on cpu 0.
+
+ cpu 0 cpu 1
+ -----------------------------------------------------------------------------
+ del_timer(timer) succeeds
+ del_timer(timer)
+ lock_timer_base(timer) locks cpu_0_base
+ add_timer_on(timer, 1)
+ spin_lock(&cpu_1_base->lock)
+ timer->flags set to cpu_1_base
+ operates on @timer operates on @timer
+
+This triggered with mod_delayed_work_on() which contains
+"if (del_timer()) add_timer_on()" sequence eventually leading to the
+following oops.
+
+ BUG: unable to handle kernel NULL pointer dereference at (null)
+ IP: [<ffffffff810ca6e9>] detach_if_pending+0x69/0x1a0
+ ...
+ Workqueue: wqthrash wqthrash_workfunc [wqthrash]
+ task: ffff8800172ca680 ti: ffff8800172d0000 task.ti: ffff8800172d0000
+ RIP: 0010:[<ffffffff810ca6e9>] [<ffffffff810ca6e9>] detach_if_pending+0x69/0x1a0
+ ...
+ Call Trace:
+ [<ffffffff810cb0b4>] del_timer+0x44/0x60
+ [<ffffffff8106e836>] try_to_grab_pending+0xb6/0x160
+ [<ffffffff8106e913>] mod_delayed_work_on+0x33/0x80
+ [<ffffffffa0000081>] wqthrash_workfunc+0x61/0x90 [wqthrash]
+ [<ffffffff8106dba8>] process_one_work+0x1e8/0x650
+ [<ffffffff8106e05e>] worker_thread+0x4e/0x450
+ [<ffffffff810746af>] kthread+0xef/0x110
+ [<ffffffff8185980f>] ret_from_fork+0x3f/0x70
+
+Fix it by updating add_timer_on() to perform proper migration as
+__mod_timer() does.
+
+Reported-and-tested-by: Jeff Layton <jlayton@poochiereds.net>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Chris Worley <chris.worley@primarydata.com>
+Cc: bfields@fieldses.org
+Cc: Michael Skralivetsky <michael.skralivetsky@primarydata.com>
+Cc: Trond Myklebust <trond.myklebust@primarydata.com>
+Cc: Shaohua Li <shli@fb.com>
+Cc: Jeff Layton <jlayton@poochiereds.net>
+Cc: kernel-team@fb.com
+Link: http://lkml.kernel.org/r/20151029103113.2f893924@tlielax.poochiereds.net
+Link: http://lkml.kernel.org/r/20151104171533.GI5749@mtj.duckdns.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/timer.c | 22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -970,13 +970,29 @@ EXPORT_SYMBOL(add_timer);
+ */
+ void add_timer_on(struct timer_list *timer, int cpu)
+ {
+- struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
++ struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
++ struct tvec_base *base;
+ unsigned long flags;
+
+ timer_stats_timer_set_start_info(timer);
+ BUG_ON(timer_pending(timer) || !timer->function);
+- spin_lock_irqsave(&base->lock, flags);
+- timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
++
++ /*
++ * If @timer was on a different CPU, it should be migrated with the
++ * old base locked to prevent other operations proceeding with the
++ * wrong base locked. See lock_timer_base().
++ */
++ base = lock_timer_base(timer, &flags);
++ if (base != new_base) {
++ timer->flags |= TIMER_MIGRATING;
++
++ spin_unlock(&base->lock);
++ base = new_base;
++ spin_lock(&base->lock);
++ WRITE_ONCE(timer->flags,
++ (timer->flags & ~TIMER_BASEMASK) | cpu);
++ }
++
+ debug_activate(timer, timer->expires);
+ internal_add_timer(base, timer);
+ spin_unlock_irqrestore(&base->lock, flags);
--- /dev/null
+From 8c31902cffc4d716450be549c66a67a8a3dd479c Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" <hjl.tools@gmail.com>
+Date: Mon, 4 Jan 2016 10:17:09 -0800
+Subject: x86/boot: Double BOOT_HEAP_SIZE to 64KB
+
+From: "H.J. Lu" <hjl.tools@gmail.com>
+
+commit 8c31902cffc4d716450be549c66a67a8a3dd479c upstream.
+
+When decompressing kernel image during x86 bootup, malloc memory
+for ELF program headers may run out of heap space, which leads
+to system halt. This patch doubles BOOT_HEAP_SIZE to 64KB.
+
+Tested with 32-bit kernel which failed to boot without this patch.
+
+Signed-off-by: H.J. Lu <hjl.tools@gmail.com>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/boot.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -27,7 +27,7 @@
+ #define BOOT_HEAP_SIZE 0x400000
+ #else /* !CONFIG_KERNEL_BZIP2 */
+
+-#define BOOT_HEAP_SIZE 0x8000
++#define BOOT_HEAP_SIZE 0x10000
+
+ #endif /* !CONFIG_KERNEL_BZIP2 */
+
--- /dev/null
+From d90167a941f62860f35eb960e1012aa2d30e7e94 Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Thu, 10 Dec 2015 11:12:26 +0100
+Subject: x86/mce: Ensure offline CPUs don't participate in rendezvous process
+
+From: Ashok Raj <ashok.raj@intel.com>
+
+commit d90167a941f62860f35eb960e1012aa2d30e7e94 upstream.
+
+Intel's MCA implementation broadcasts MCEs to all CPUs on the
+node. This poses a problem for offlined CPUs which cannot
+participate in the rendezvous process:
+
+ Kernel panic - not syncing: Timeout: Not all CPUs entered broadcast exception handler
+ Kernel Offset: disabled
+ Rebooting in 100 seconds..
+
+More specifically, Linux does a soft offline of a CPU when
+writing a 0 to /sys/devices/system/cpu/cpuX/online, which
+doesn't prevent the #MC exception from being broadcasted to that
+CPU.
+
+Ensure that offline CPUs don't participate in the MCE rendezvous
+and clear the RIP valid status bit so that a second MCE won't
+cause a shutdown.
+
+Without the patch, mce_start() will increment mce_callin and
+wait for all CPUs. Offlined CPUs should avoid participating in
+the rendezvous process altogether.
+
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+[ Massage commit message. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Link: http://lkml.kernel.org/r/1449742346-21470-2-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mcheck/mce.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -999,6 +999,17 @@ void do_machine_check(struct pt_regs *re
+ int flags = MF_ACTION_REQUIRED;
+ int lmce = 0;
+
++ /* If this CPU is offline, just bail out. */
++ if (cpu_is_offline(smp_processor_id())) {
++ u64 mcgstatus;
++
++ mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
++ if (mcgstatus & MCG_STATUS_RIPV) {
++ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
++ return;
++ }
++ }
++
+ ist_enter(regs);
+
+ this_cpu_inc(mce_exception_count);
--- /dev/null
+From 71b3c126e61177eb693423f2e18a1914205b165e Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Wed, 6 Jan 2016 12:21:01 -0800
+Subject: x86/mm: Add barriers and document switch_mm()-vs-flush synchronization
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 71b3c126e61177eb693423f2e18a1914205b165e upstream.
+
+When switch_mm() activates a new PGD, it also sets a bit that
+tells other CPUs that the PGD is in use so that TLB flush IPIs
+will be sent. In order for that to work correctly, the bit
+needs to be visible prior to loading the PGD and therefore
+starting to fill the local TLB.
+
+Document all the barriers that make this work correctly and add
+a couple that were missing.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-mm@kvack.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mmu_context.h | 33 ++++++++++++++++++++++++++++++++-
+ arch/x86/mm/tlb.c | 29 ++++++++++++++++++++++++++---
+ 2 files changed, 58 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -116,8 +116,34 @@ static inline void switch_mm(struct mm_s
+ #endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+- /* Re-load page tables */
++ /*
++ * Re-load page tables.
++ *
++ * This logic has an ordering constraint:
++ *
++ * CPU 0: Write to a PTE for 'next'
++ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
++ * CPU 1: set bit 1 in next's mm_cpumask
++ * CPU 1: load from the PTE that CPU 0 writes (implicit)
++ *
++ * We need to prevent an outcome in which CPU 1 observes
++ * the new PTE value and CPU 0 observes bit 1 clear in
++ * mm_cpumask. (If that occurs, then the IPI will never
++ * be sent, and CPU 0's TLB will contain a stale entry.)
++ *
++ * The bad outcome can occur if either CPU's load is
++ * reordered before that CPU's store, so both CPUs much
++ * execute full barriers to prevent this from happening.
++ *
++ * Thus, switch_mm needs a full barrier between the
++ * store to mm_cpumask and any operation that could load
++ * from next->pgd. This barrier synchronizes with
++ * remote TLB flushers. Fortunately, load_cr3 is
++ * serializing and thus acts as a full barrier.
++ *
++ */
+ load_cr3(next->pgd);
++
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+
+ /* Stop flush ipis for the previous mm */
+@@ -156,10 +182,15 @@ static inline void switch_mm(struct mm_s
+ * schedule, protecting us from simultaneous changes.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
++
+ /*
+ * We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
++ *
++ * As above, this is a barrier that forces
++ * TLB repopulation to be ordered after the
++ * store to mm_cpumask.
+ */
+ load_cr3(next->pgd);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -161,7 +161,10 @@ void flush_tlb_current_task(void)
+ preempt_disable();
+
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
++
++ /* This is an implicit full barrier that synchronizes with switch_mm. */
+ local_flush_tlb();
++
+ trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+@@ -188,17 +191,29 @@ void flush_tlb_mm_range(struct mm_struct
+ unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
+
+ preempt_disable();
+- if (current->active_mm != mm)
++ if (current->active_mm != mm) {
++ /* Synchronize with switch_mm. */
++ smp_mb();
++
+ goto out;
++ }
+
+ if (!current->mm) {
+ leave_mm(smp_processor_id());
++
++ /* Synchronize with switch_mm. */
++ smp_mb();
++
+ goto out;
+ }
+
+ if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
+ base_pages_to_flush = (end - start) >> PAGE_SHIFT;
+
++ /*
++ * Both branches below are implicit full barriers (MOV to CR or
++ * INVLPG) that synchronize with switch_mm.
++ */
+ if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
+ base_pages_to_flush = TLB_FLUSH_ALL;
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+@@ -228,10 +243,18 @@ void flush_tlb_page(struct vm_area_struc
+ preempt_disable();
+
+ if (current->active_mm == mm) {
+- if (current->mm)
++ if (current->mm) {
++ /*
++ * Implicit full barrier (INVLPG) that synchronizes
++ * with switch_mm.
++ */
+ __flush_tlb_one(start);
+- else
++ } else {
+ leave_mm(smp_processor_id());
++
++ /* Synchronize with switch_mm. */
++ smp_mb();
++ }
+ }
+
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
--- /dev/null
+From 4eaffdd5a5fe6ff9f95e1ab4de1ac904d5e0fa8b Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 12 Jan 2016 12:47:40 -0800
+Subject: x86/mm: Improve switch_mm() barrier comments
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 4eaffdd5a5fe6ff9f95e1ab4de1ac904d5e0fa8b upstream.
+
+My previous comments were still a bit confusing and there was a
+typo. Fix it up.
+
+Reported-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 71b3c126e611 ("x86/mm: Add barriers and document switch_mm()-vs-flush synchronization")
+Link: http://lkml.kernel.org/r/0a0b43cdcdd241c5faaaecfbcc91a155ddedc9a1.1452631609.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mmu_context.h | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -132,14 +132,16 @@ static inline void switch_mm(struct mm_s
+ * be sent, and CPU 0's TLB will contain a stale entry.)
+ *
+ * The bad outcome can occur if either CPU's load is
+- * reordered before that CPU's store, so both CPUs much
++ * reordered before that CPU's store, so both CPUs must
+ * execute full barriers to prevent this from happening.
+ *
+ * Thus, switch_mm needs a full barrier between the
+ * store to mm_cpumask and any operation that could load
+- * from next->pgd. This barrier synchronizes with
+- * remote TLB flushers. Fortunately, load_cr3 is
+- * serializing and thus acts as a full barrier.
++ * from next->pgd. TLB fills are special and can happen
++ * due to instruction fetches or for no reason at all,
++ * and neither LOCK nor MFENCE orders them.
++ * Fortunately, load_cr3() is serializing and gives the
++ * ordering guarantee we need.
+ *
+ */
+ load_cr3(next->pgd);
+@@ -188,9 +190,8 @@ static inline void switch_mm(struct mm_s
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ *
+- * As above, this is a barrier that forces
+- * TLB repopulation to be ordered after the
+- * store to mm_cpumask.
++ * As above, load_cr3() is serializing and orders TLB
++ * fills with respect to the mm_cpumask write.
+ */
+ load_cr3(next->pgd);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
--- /dev/null
+From 8e8efe0379bd93e8219ca0fc6fa80b5dd85b09cb Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave.hansen@linux.intel.com>
+Date: Mon, 30 Nov 2015 16:31:13 -0800
+Subject: x86/mpx: Fix instruction decoder condition
+
+From: Dave Hansen <dave.hansen@linux.intel.com>
+
+commit 8e8efe0379bd93e8219ca0fc6fa80b5dd85b09cb upstream.
+
+MPX decodes instructions in order to tell which bounds register
+was violated. Part of this decoding involves looking at the "REX
+prefix" which is a special instrucion prefix used to retrofit
+support for new registers in to old instructions.
+
+The X86_REX_*() macros are defined to return actual bit values:
+
+ #define X86_REX_R(rex) ((rex) & 4)
+
+*not* boolean values. However, the MPX code was checking for
+them like they were booleans. This might have led to us
+mis-decoding the "REX prefix" and giving false information out to
+userspace about bounds violations. X86_REX_B() actually is bit 1,
+so this is really only broken for the X86_REX_X() case.
+
+Fix the conditionals up to tolerate the non-boolean values.
+
+Fixes: fcc7ffd67991 "x86, mpx: Decode MPX instruction to get bound violation information"
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: x86@kernel.org
+Cc: Dave Hansen <dave@sr71.net>
+Link: http://lkml.kernel.org/r/20151201003113.D800C1E0@viggo.jf.intel.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/mpx.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *i
+ switch (type) {
+ case REG_TYPE_RM:
+ regno = X86_MODRM_RM(insn->modrm.value);
+- if (X86_REX_B(insn->rex_prefix.value) == 1)
++ if (X86_REX_B(insn->rex_prefix.value))
+ regno += 8;
+ break;
+
+ case REG_TYPE_INDEX:
+ regno = X86_SIB_INDEX(insn->sib.value);
+- if (X86_REX_X(insn->rex_prefix.value) == 1)
++ if (X86_REX_X(insn->rex_prefix.value))
+ regno += 8;
+ break;
+
+ case REG_TYPE_BASE:
+ regno = X86_SIB_BASE(insn->sib.value);
+- if (X86_REX_B(insn->rex_prefix.value) == 1)
++ if (X86_REX_B(insn->rex_prefix.value))
+ regno += 8;
+ break;
+
--- /dev/null
+From d8c98a1d1488747625ad6044d423406e17e99b7a Mon Sep 17 00:00:00 2001
+From: David Vrabel <david.vrabel@citrix.com>
+Date: Fri, 11 Dec 2015 09:07:53 -0500
+Subject: x86/paravirt: Prevent rtc_cmos platform device init on PV guests
+
+From: David Vrabel <david.vrabel@citrix.com>
+
+commit d8c98a1d1488747625ad6044d423406e17e99b7a upstream.
+
+Adding the rtc platform device in non-privileged Xen PV guests causes
+an IRQ conflict because these guests do not have legacy PIC and may
+allocate irqs in the legacy range.
+
+In a single VCPU Xen PV guest we should have:
+
+/proc/interrupts:
+ CPU0
+ 0: 4934 xen-percpu-virq timer0
+ 1: 0 xen-percpu-ipi spinlock0
+ 2: 0 xen-percpu-ipi resched0
+ 3: 0 xen-percpu-ipi callfunc0
+ 4: 0 xen-percpu-virq debug0
+ 5: 0 xen-percpu-ipi callfuncsingle0
+ 6: 0 xen-percpu-ipi irqwork0
+ 7: 321 xen-dyn-event xenbus
+ 8: 90 xen-dyn-event hvc_console
+ ...
+
+But hvc_console cannot get its interrupt because it is already in use
+by rtc0 and the console does not work.
+
+ genirq: Flags mismatch irq 8. 00000000 (hvc_console) vs. 00000000 (rtc0)
+
+We can avoid this problem by realizing that unprivileged PV guests (both
+Xen and lguests) are not supposed to have rtc_cmos device and so
+adding it is not necessary.
+
+Privileged guests (i.e. Xen's dom0) do use it but they should not have
+irq conflicts since they allocate irqs above legacy range (above
+gsi_top, in fact).
+
+Instead of explicitly testing whether the guest is privileged we can
+extend pv_info structure to include information about guest's RTC
+support.
+
+Reported-and-tested-by: Sander Eikelenboom <linux@eikelenboom.it>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: vkuznets@redhat.com
+Cc: xen-devel@lists.xenproject.org
+Cc: konrad.wilk@oracle.com
+Link: http://lkml.kernel.org/r/1449842873-2613-1-git-send-email-boris.ostrovsky@oracle.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/paravirt.h | 6 ++++++
+ arch/x86/include/asm/paravirt_types.h | 5 +++++
+ arch/x86/include/asm/processor.h | 1 +
+ arch/x86/kernel/rtc.c | 3 +++
+ arch/x86/lguest/boot.c | 1 +
+ arch/x86/xen/enlighten.c | 4 +++-
+ 6 files changed, 19 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -19,6 +19,12 @@ static inline int paravirt_enabled(void)
+ return pv_info.paravirt_enabled;
+ }
+
++static inline int paravirt_has_feature(unsigned int feature)
++{
++ WARN_ON_ONCE(!pv_info.paravirt_enabled);
++ return (pv_info.features & feature);
++}
++
+ static inline void load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+ {
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -70,9 +70,14 @@ struct pv_info {
+ #endif
+
+ int paravirt_enabled;
++ unsigned int features; /* valid only if paravirt_enabled is set */
+ const char *name;
+ };
+
++#define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x)
++/* Supported features */
++#define PV_SUPPORTED_RTC (1<<0)
++
+ struct pv_init_ops {
+ /*
+ * Patch may replace one of the defined code sequences with
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -472,6 +472,7 @@ static inline unsigned long current_top_
+ #else
+ #define __cpuid native_cpuid
+ #define paravirt_enabled() 0
++#define paravirt_has(x) 0
+
+ static inline void load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+--- a/arch/x86/kernel/rtc.c
++++ b/arch/x86/kernel/rtc.c
+@@ -200,6 +200,9 @@ static __init int add_rtc_cmos(void)
+ }
+ #endif
+
++ if (paravirt_enabled() && !paravirt_has(RTC))
++ return -ENODEV;
++
+ platform_device_register(&rtc_device);
+ dev_info(&rtc_device.dev,
+ "registered platform RTC device (no PNP device found)\n");
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1414,6 +1414,7 @@ __init void lguest_init(void)
+ pv_info.kernel_rpl = 1;
+ /* Everyone except Xen runs with this set. */
+ pv_info.shared_kernel_pmd = 1;
++ pv_info.features = 0;
+
+ /*
+ * We set up all the lguest overrides for sensitive operations. These
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1191,7 +1191,7 @@ static const struct pv_info xen_info __i
+ #ifdef CONFIG_X86_64
+ .extra_user_64bit_cs = FLAT_USER_CS64,
+ #endif
+-
++ .features = 0,
+ .name = "Xen",
+ };
+
+@@ -1534,6 +1534,8 @@ asmlinkage __visible void __init xen_sta
+
+ /* Install Xen paravirt ops */
+ pv_info = xen_info;
++ if (xen_initial_domain())
++ pv_info.features |= PV_SUPPORTED_RTC;
+ pv_init_ops = xen_init_ops;
+ pv_apic_ops = xen_apic_ops;
+ if (!xen_pvh_domain()) {
--- /dev/null
+From 2f0c0b2d96b1205efb14347009748d786c2d9ba5 Mon Sep 17 00:00:00 2001
+From: Mario Kleiner <mario.kleiner.de@gmail.com>
+Date: Fri, 18 Dec 2015 20:24:06 +0100
+Subject: x86/reboot/quirks: Add iMac10,1 to pci_reboot_dmi_table[]
+
+From: Mario Kleiner <mario.kleiner.de@gmail.com>
+
+commit 2f0c0b2d96b1205efb14347009748d786c2d9ba5 upstream.
+
+Without the reboot=pci method, the iMac 10,1 simply
+hangs after printing "Restarting system" at the point
+when it should reboot. This fixes it.
+
+Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1450466646-26663-1-git-send-email-mario.kleiner.de@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/reboot.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -182,6 +182,14 @@ static struct dmi_system_id __initdata r
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ },
+ },
++ { /* Handle problems with rebooting on the iMac10,1. */
++ .callback = set_pci_reboot,
++ .ident = "Apple iMac10,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
++ },
++ },
+
+ /* ASRock */
+ { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
--- /dev/null
+From 22eab1108781eff09961ae7001704f7bd8fb1dce Mon Sep 17 00:00:00 2001
+From: "Dmitry V. Levin" <ldv@altlinux.org>
+Date: Tue, 1 Dec 2015 00:54:36 +0300
+Subject: x86/signal: Fix restart_syscall number for x32 tasks
+
+From: "Dmitry V. Levin" <ldv@altlinux.org>
+
+commit 22eab1108781eff09961ae7001704f7bd8fb1dce upstream.
+
+When restarting a syscall with regs->ax == -ERESTART_RESTARTBLOCK,
+regs->ax is assigned to a restart_syscall number. For x32 tasks, this
+syscall number must have __X32_SYSCALL_BIT set, otherwise it will be
+an x86_64 syscall number instead of a valid x32 syscall number. This
+issue has been there since the introduction of x32.
+
+Reported-by: strace/tests/restart_syscall.test
+Reported-and-tested-by: Elvira Khabirova <lineprinter0@gmail.com>
+Signed-off-by: Dmitry V. Levin <ldv@altlinux.org>
+Cc: Elvira Khabirova <lineprinter0@gmail.com>
+Link: http://lkml.kernel.org/r/20151130215436.GA25996@altlinux.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/signal.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -688,12 +688,15 @@ handle_signal(struct ksignal *ksig, stru
+ signal_setup_done(failed, ksig, stepping);
+ }
+
+-#ifdef CONFIG_X86_32
+-#define NR_restart_syscall __NR_restart_syscall
+-#else /* !CONFIG_X86_32 */
+-#define NR_restart_syscall \
+- test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
+-#endif /* CONFIG_X86_32 */
++static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
++{
++#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
++ return __NR_restart_syscall;
++#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
++ return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
++ __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
++#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
++}
+
+ /*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+@@ -722,7 +725,7 @@ void do_signal(struct pt_regs *regs)
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+- regs->ax = NR_restart_syscall;
++ regs->ax = get_nr_restart_syscall(regs);
+ regs->ip -= 2;
+ break;
+ }
--- /dev/null
+From 656279a1f3b210cf48ccc572fd7c6b8e2250be77 Mon Sep 17 00:00:00 2001
+From: Len Brown <len.brown@intel.com>
+Date: Sun, 22 Nov 2015 18:16:15 -0500
+Subject: x86 smpboot: Re-enable init_udelay=0 by default on modern CPUs
+
+From: Len Brown <len.brown@intel.com>
+
+commit 656279a1f3b210cf48ccc572fd7c6b8e2250be77 upstream.
+
+commit f1ccd249319e allowed the cmdline "cpu_init_udelay=" to work
+with all values, including the default of 10000.
+
+But in setting the default of 10000, it over-rode the code that sets
+the delay 0 on modern processors.
+
+Also, tidy up use of INT/UINT.
+
+Fixes: f1ccd249319e "x86/smpboot: Fix cpu_init_udelay=10000 corner case boot parameter misbehavior"
+Reported-by: Shane <shrybman@teksavvy.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: dparsons@brightdsl.net
+Link: http://lkml.kernel.org/r/9082eb809ef40dad02db714759c7aaf618c518d4.1448232494.git.len.brown@intel.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/smpboot.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
+ */
+ #define UDELAY_10MS_DEFAULT 10000
+
+-static unsigned int init_udelay = INT_MAX;
++static unsigned int init_udelay = UINT_MAX;
+
+ static int __init cpu_init_udelay(char *str)
+ {
+@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_
+ static void __init smp_quirk_init_udelay(void)
+ {
+ /* if cmdline changed it from default, leave it alone */
+- if (init_udelay != INT_MAX)
++ if (init_udelay != UINT_MAX)
+ return;
+
+ /* if modern processor, use no delay */
+ if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
+- ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
++ ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
+ init_udelay = 0;
+-
++ return;
++ }
+ /* else, use legacy delay */
+ init_udelay = UDELAY_10MS_DEFAULT;
+ }
--- /dev/null
+From 6a1f513776b78c994045287073e55bae44ed9f8c Mon Sep 17 00:00:00 2001
+From: "Ouyang Zhaowei (Charles)" <ouyangzhaowei@huawei.com>
+Date: Wed, 6 May 2015 09:47:04 +0800
+Subject: x86/xen: don't reset vcpu_info on a cancelled suspend
+
+From: "Ouyang Zhaowei (Charles)" <ouyangzhaowei@huawei.com>
+
+commit 6a1f513776b78c994045287073e55bae44ed9f8c upstream.
+
+On a cancelled suspend the vcpu_info location does not change (it's
+still in the per-cpu area registered by xen_vcpu_setup()). So do not
+call xen_hvm_init_shared_info() which would make the kernel think its
+back in the shared info. With the wrong vcpu_info, events cannot be
+received and the domain will hang after a cancelled suspend.
+
+Signed-off-by: Charles Ouyang <ouyangzhaowei@huawei.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/suspend.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/xen/suspend.c
++++ b/arch/x86/xen/suspend.c
+@@ -33,7 +33,8 @@ static void xen_hvm_post_suspend(int sus
+ {
+ #ifdef CONFIG_XEN_PVHVM
+ int cpu;
+- xen_hvm_init_shared_info();
++ if (!suspend_cancelled)
++ xen_hvm_init_shared_info();
+ xen_callback_vector();
+ xen_unplug_emulated_devices();
+ if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
--- /dev/null
+From 9c17d96500f78d7ecdb71ca6942830158bc75a2b Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Tue, 10 Nov 2015 15:10:33 -0500
+Subject: xen/gntdev: Grant maps should not be subject to NUMA balancing
+
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+
+commit 9c17d96500f78d7ecdb71ca6942830158bc75a2b upstream.
+
+Doing so will cause the grant to be unmapped and then, during
+fault handling, the fault to be mistakenly treated as NUMA hint
+fault.
+
+In addition, even if those maps could partcipate in NUMA
+balancing, it wouldn't provide any benefit since we are unable
+to determine physical page's node (even if/when VNUMA is
+implemented).
+
+Marking grant maps' VMAs as VM_IO will exclude them from being
+part of NUMA balancing.
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip
+
+ vma->vm_ops = &gntdev_vmops;
+
+- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
++ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+
+ if (use_ptemod)
+ vma->vm_flags |= VM_DONTCOPY;