--- /dev/null
+From 88c54cdf61f508ebcf8da2d819f5dfc03e954d1d Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 22 Aug 2017 08:15:13 +0200
+Subject: ALSA: core: Fix unexpected error at replacing user TLV
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 88c54cdf61f508ebcf8da2d819f5dfc03e954d1d upstream.
+
+When user tries to replace the user-defined control TLV, the kernel
+checks the change of its content via memcmp(). The problem is that
+the kernel passes the return value from memcmp() as is. memcmp()
+gives a non-zero negative value depending on the comparison result,
+and this shall be recognized as an error code.
+
+The patch covers that corner-case, return 1 properly for the changed
+TLV.
+
+Fixes: 8aa9b586e420 ("[ALSA] Control API - more robust TLV implementation")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/control.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1157,7 +1157,7 @@ static int snd_ctl_elem_user_tlv(struct
+ mutex_lock(&ue->card->user_ctl_lock);
+ change = ue->tlv_data_size != size;
+ if (!change)
+- change = memcmp(ue->tlv_data, new_data, size);
++ change = memcmp(ue->tlv_data, new_data, size) != 0;
+ kfree(ue->tlv_data);
+ ue->tlv_data = new_data;
+ ue->tlv_data_size = size;
--- /dev/null
+From 0c264af7be2013266c5b4c644f3f366399ee490a Mon Sep 17 00:00:00 2001
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Date: Sun, 20 Aug 2017 15:54:26 +0900
+Subject: ALSA: firewire: fix NULL pointer dereference when releasing uninitialized data of iso-resource
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+commit 0c264af7be2013266c5b4c644f3f366399ee490a upstream.
+
+When calling 'iso_resource_free()' for uninitialized data, this function
+causes NULL pointer dereference due to its 'unit' member. This occurs when
+unplugging audio and music units on IEEE 1394 bus at failure of card
+registration.
+
+This commit fixes the bug. The bug exists since kernel v4.5.
+
+Fixes: 324540c4e05c ('ALSA: fireface: postpone sound card registration') at v4.12
+Fixes: 8865a31e0fd8 ('ALSA: firewire-motu: postpone sound card registration') at v4.12
+Fixes: b610386c8afb ('ALSA: firewire-tascam: deleyed registration of sound card') at v4.7
+Fixes: 86c8dd7f4da3 ('ALSA: firewire-digi00x: delayed registration of sound card') at v4.7
+Fixes: 6c29230e2a5f ('ALSA: oxfw: delayed registration of sound card') at v4.7
+Fixes: 7d3c1d5901aa ('ALSA: fireworks: delayed registration of sound card') at v4.7
+Fixes: 04a2c73c97eb ('ALSA: bebob: delayed registration of sound card') at v4.7
+Fixes: b59fb1900b4f ('ALSA: dice: postpone card registration') at v4.5
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/firewire/iso-resources.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/sound/firewire/iso-resources.c
++++ b/sound/firewire/iso-resources.c
+@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
+ */
+ void fw_iso_resources_free(struct fw_iso_resources *r)
+ {
+- struct fw_card *card = fw_parent_device(r->unit)->card;
++ struct fw_card *card;
+ int bandwidth, channel;
+
++ /* Not initialized. */
++ if (r->unit == NULL)
++ return;
++ card = fw_parent_device(r->unit)->card;
++
+ mutex_lock(&r->mutex);
+
+ if (r->allocated) {
--- /dev/null
+From dbd7396b4f24e0c3284fcc05f5def24f52c09884 Mon Sep 17 00:00:00 2001
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Date: Sun, 20 Aug 2017 15:55:02 +0900
+Subject: ALSA: firewire-motu: destroy stream data surely at failure of card initialization
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+commit dbd7396b4f24e0c3284fcc05f5def24f52c09884 upstream.
+
+When failing sound card registration after initializing stream data, this
+module leaves allocated data in stream data. This commit fixes the bug.
+
+Fixes: 9b2bb4f2f4a2 ('ALSA: firewire-motu: add stream management functionality')
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/firewire/motu/motu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/firewire/motu/motu.c
++++ b/sound/firewire/motu/motu.c
+@@ -128,6 +128,7 @@ static void do_registration(struct work_
+ return;
+ error:
+ snd_motu_transaction_unregister(motu);
++ snd_motu_stream_destroy_duplex(motu);
+ snd_card_free(motu->card);
+ dev_info(&motu->unit->device,
+ "Sound card registration failed: %d\n", err);
--- /dev/null
+From bbba6f9d3da357bbabc6fda81e99ff5584500e76 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 23 Aug 2017 09:30:17 +0200
+Subject: ALSA: hda - Add stereo mic quirk for Lenovo G50-70 (17aa:3978)
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit bbba6f9d3da357bbabc6fda81e99ff5584500e76 upstream.
+
+Lenovo G50-70 (17aa:3978) with Conexant codec chip requires the
+similar workaround for the inverted stereo dmic like other Lenovo
+models.
+
+Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=1020657
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -947,6 +947,7 @@ static const struct snd_pci_quirk cxt506
+ SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
--- /dev/null
+From 07b3b5e9ed807a0d2077319b8e43a42e941db818 Mon Sep 17 00:00:00 2001
+From: Joakim Tjernlund <joakim.tjernlund@infinera.com>
+Date: Tue, 22 Aug 2017 08:33:53 +0200
+Subject: ALSA: usb-audio: Add delay quirk for H650e/Jabra 550a USB headsets
+
+From: Joakim Tjernlund <joakim.tjernlund@infinera.com>
+
+commit 07b3b5e9ed807a0d2077319b8e43a42e941db818 upstream.
+
+These headsets reports a lot of: cannot set freq 44100 to ep 0x81
+and need a small delay between sample rate settings, just like
+Zoom R16/24. Add both headsets to the Zoom R16/24 quirk for
+a 1 ms delay between control msgs.
+
+Signed-off-by: Joakim Tjernlund <joakim.tjernlund@infinera.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/quirks.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_de
+ && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
+
+- /* Zoom R16/24 needs a tiny delay here, otherwise requests like
+- * get/set frequency return as failed despite actually succeeding.
++ /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
++ * otherwise requests like get/set frequency return as failed despite
++ * actually succeeding.
+ */
+- if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
++ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
++ chip->usb_id == USB_ID(0x046d, 0x0a46) ||
++ chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(1);
+ }
--- /dev/null
+From 7d79cee2c6540ea64dd917a14e2fd63d4ac3d3c0 Mon Sep 17 00:00:00 2001
+From: Alexey Brodkin <Alexey.Brodkin@synopsys.com>
+Date: Tue, 1 Aug 2017 12:58:47 +0300
+Subject: ARCv2: PAE40: Explicitly set MSB counterpart of SLC region ops addresses
+
+From: Alexey Brodkin <Alexey.Brodkin@synopsys.com>
+
+commit 7d79cee2c6540ea64dd917a14e2fd63d4ac3d3c0 upstream.
+
+It is necessary to explicitly set both SLC_AUX_RGN_START1 and SLC_AUX_RGN_END1
+which hold MSB bits of the physical address correspondingly of region start
+and end otherwise SLC region operation is executed in unpredictable manner
+
+Without this patch, SLC flushes on HSDK (IOC disabled) were taking
+seconds.
+
+Reported-by: Vladimir Kondratiev <vladimir.kondratiev@intel.com>
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+[vgupta: PAR40 regs only written if PAE40 exist]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/include/asm/cache.h | 2 ++
+ arch/arc/mm/cache.c | 13 +++++++++++--
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/arch/arc/include/asm/cache.h
++++ b/arch/arc/include/asm/cache.h
+@@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_e
+ #define ARC_REG_SLC_FLUSH 0x904
+ #define ARC_REG_SLC_INVALIDATE 0x905
+ #define ARC_REG_SLC_RGN_START 0x914
++#define ARC_REG_SLC_RGN_START1 0x915
+ #define ARC_REG_SLC_RGN_END 0x916
++#define ARC_REG_SLC_RGN_END1 0x917
+
+ /* Bit val in SLC_CONTROL */
+ #define SLC_CTRL_DIS 0x001
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr,
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ unsigned int ctrl;
++ phys_addr_t end;
+
+ spin_lock_irqsave(&lock, flags);
+
+@@ -694,8 +695,16 @@ noinline void slc_op(phys_addr_t paddr,
+ * END needs to be setup before START (latter triggers the operation)
+ * END can't be same as START, so add (l2_line_sz - 1) to sz
+ */
+- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
+- write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
++ end = paddr + sz + l2_line_sz - 1;
++ if (is_pae40_enabled())
++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
++
++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
++
++ if (is_pae40_enabled())
++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
++
++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(ARC_REG_SLC_CTRL);
--- /dev/null
+From b5ddb6d54729d814356937572d6c9b599f10c29f Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Thu, 3 Aug 2017 17:45:44 +0530
+Subject: ARCv2: PAE40: set MSB even if !CONFIG_ARC_HAS_PAE40 but PAE exists in SoC
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit b5ddb6d54729d814356937572d6c9b599f10c29f upstream.
+
+PAE40 confiuration in hardware extends some of the address registers
+for TLB/cache ops to 2 words.
+
+So far kernel was NOT setting the higher word if feature was not enabled
+in software which is wrong. Those need to be set to 0 in such case.
+
+Normally this would be done in the cache flush / tlb ops, however since
+these registers only exist conditionally, this would have to be
+conditional to a flag being set on boot which is expensive/ugly -
+specially for the more common case of PAE exists but not in use.
+Optimize that by zero'ing them once at boot - nobody will write to
+them afterwards
+
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/include/asm/mmu.h | 2 ++
+ arch/arc/mm/cache.c | 34 ++++++++++++++++++++++++++++------
+ arch/arc/mm/tlb.c | 12 +++++++++++-
+ 3 files changed, 41 insertions(+), 7 deletions(-)
+
+--- a/arch/arc/include/asm/mmu.h
++++ b/arch/arc/include/asm/mmu.h
+@@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
+ return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+ }
+
++extern int pae40_exist_but_not_enab(void);
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -1123,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
+ __dc_enable();
+ }
+
++/*
++ * Cache related boot time checks/setups only needed on master CPU:
++ * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
++ * Assume SMP only, so all cores will have same cache config. A check on
++ * one core suffices for all
++ * - IOC setup / dma callbacks only need to be done once
++ */
+ void __init arc_cache_init_master(void)
+ {
+ unsigned int __maybe_unused cpu = smp_processor_id();
+@@ -1202,12 +1209,27 @@ void __ref arc_cache_init(void)
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+- /*
+- * Only master CPU needs to execute rest of function:
+- * - Assume SMP so all cores will have same cache config so
+- * any geomtry checks will be same for all
+- * - IOC setup / dma callbacks only need to be setup once
+- */
+ if (!cpu)
+ arc_cache_init_master();
++
++ /*
++ * In PAE regime, TLB and cache maintenance ops take wider addresses
++ * And even if PAE is not enabled in kernel, the upper 32-bits still need
++ * to be zeroed to keep the ops sane.
++ * As an optimization for more common !PAE enabled case, zero them out
++ * once at init, rather than checking/setting to 0 for every runtime op
++ */
++ if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
++
++ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
++ write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
++
++ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
++ write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
++
++ if (l2_line_sz) {
++ write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
++ write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
++ }
++ }
+ }
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -104,6 +104,8 @@
+ /* A copy of the ASID from the PID reg is kept in asid_cache */
+ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
+
++static int __read_mostly pae_exists;
++
+ /*
+ * Utility Routine to erase a J-TLB entry
+ * Caller needs to setup Index Reg (manually or via getIndex)
+@@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
+ mmu->u_dtlb = mmu4->u_dtlb * 4;
+ mmu->u_itlb = mmu4->u_itlb * 4;
+ mmu->sasid = mmu4->sasid;
+- mmu->pae = mmu4->pae;
++ pae_exists = mmu->pae = mmu4->pae;
+ }
+ }
+
+@@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, cha
+ return buf;
+ }
+
++int pae40_exist_but_not_enab(void)
++{
++ return pae_exists && !is_pae40_enabled();
++}
++
+ void arc_mmu_init(void)
+ {
+ char str[256];
+@@ -859,6 +866,9 @@ void arc_mmu_init(void)
+ /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
+ #endif
++
++ if (pae40_exist_but_not_enab())
++ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+ }
+
+ /*
--- /dev/null
+From b37174d95b0251611a80ef60abf03752e9d66d67 Mon Sep 17 00:00:00 2001
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Date: Fri, 7 Jul 2017 12:25:14 +0300
+Subject: ARCv2: SLC: Make sure busy bit is set properly for region ops
+
+From: Alexey Brodkin <abrodkin@synopsys.com>
+
+commit b37174d95b0251611a80ef60abf03752e9d66d67 upstream.
+
+c70c473396cb "ARCv2: SLC: Make sure busy bit is set properly on SLC flushing"
+fixes problem for entire SLC operation where the problem was initially
+caught. But given a nature of the issue it is perfectly possible for
+busy bit to be read incorrectly even when region operation was started.
+
+So extending initial fix for regional operation as well.
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/mm/cache.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -697,6 +697,9 @@ noinline void slc_op(phys_addr_t paddr,
+ write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
+ write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
+
++ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
++ read_aux_reg(ARC_REG_SLC_CTRL);
++
+ while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+
+ spin_unlock_irqrestore(&lock, flags);
--- /dev/null
+From fffa281b48a91ad6dac1a18c5907ece58fa3879b Mon Sep 17 00:00:00 2001
+From: Ross Zwisler <ross.zwisler@linux.intel.com>
+Date: Fri, 25 Aug 2017 15:55:36 -0700
+Subject: dax: fix deadlock due to misaligned PMD faults
+
+From: Ross Zwisler <ross.zwisler@linux.intel.com>
+
+commit fffa281b48a91ad6dac1a18c5907ece58fa3879b upstream.
+
+In DAX there are two separate places where the 2MiB range of a PMD is
+defined.
+
+The first is in the page tables, where a PMD mapping inserted for a
+given address spans from (vmf->address & PMD_MASK) to ((vmf->address &
+PMD_MASK) + PMD_SIZE - 1). That is, from the 2MiB boundary below the
+address to the 2MiB boundary above the address.
+
+So, for example, a fault at address 3MiB (0x30 0000) falls within the
+PMD that ranges from 2MiB (0x20 0000) to 4MiB (0x40 0000).
+
+The second PMD range is in the mapping->page_tree, where a given file
+offset is covered by a radix tree entry that spans from one 2MiB aligned
+file offset to another 2MiB aligned file offset.
+
+So, for example, the file offset for 3MiB (pgoff 768) falls within the
+PMD range for the order 9 radix tree entry that ranges from 2MiB (pgoff
+512) to 4MiB (pgoff 1024).
+
+This system works so long as the addresses and file offsets for a given
+mapping both have the same offsets relative to the start of each PMD.
+
+Consider the case where the starting address for a given file isn't 2MiB
+aligned - say our faulting address is 3 MiB (0x30 0000), but that
+corresponds to the beginning of our file (pgoff 0). Now all the PMDs in
+the mapping are misaligned so that the 2MiB range defined in the page
+tables never matches up with the 2MiB range defined in the radix tree.
+
+The current code notices this case for DAX faults to storage with the
+following test in dax_pmd_insert_mapping():
+
+ if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
+ goto unlock_fallback;
+
+This test makes sure that the pfn we get from the driver is 2MiB
+aligned, and relies on the assumption that the 2MiB alignment of the pfn
+we get back from the driver matches the 2MiB alignment of the faulting
+address.
+
+However, faults to holes were not checked and we could hit the problem
+described above.
+
+This was reported in response to the NVML nvml/src/test/pmempool_sync
+TEST5:
+
+ $ cd nvml/src/test/pmempool_sync
+ $ make TEST5
+
+You can grab NVML here:
+
+ https://github.com/pmem/nvml/
+
+The dmesg warning you see when you hit this error is:
+
+ WARNING: CPU: 13 PID: 2900 at fs/dax.c:641 dax_insert_mapping_entry+0x2df/0x310
+
+Where we notice in dax_insert_mapping_entry() that the radix tree entry
+we are about to replace doesn't match the locked entry that we had
+previously inserted into the tree. This happens because the initial
+insertion was done in grab_mapping_entry() using a pgoff calculated from
+the faulting address (vmf->address), and the replacement in
+dax_pmd_load_hole() => dax_insert_mapping_entry() is done using
+vmf->pgoff.
+
+In our failure case those two page offsets (one calculated from
+vmf->address, one using vmf->pgoff) point to different order 9 radix
+tree entries.
+
+This failure case can result in a deadlock because the radix tree unlock
+also happens on the pgoff calculated from vmf->address. This means that
+the locked radix tree entry that we swapped in to the tree in
+dax_insert_mapping_entry() using vmf->pgoff is never unlocked, so all
+future faults to that 2MiB range will block forever.
+
+Fix this by validating that the faulting address's PMD offset matches
+the PMD offset from the start of the file. This check is done at the
+very beginning of the fault and covers faults that would have mapped to
+storage as well as faults to holes. I left the COLOUR check in
+dax_pmd_insert_mapping() in place in case we ever hit the insanity
+condition where the alignment of the pfn we get from the driver doesn't
+match the alignment of the userspace address.
+
+Link: http://lkml.kernel.org/r/20170822222436.18926-1-ross.zwisler@linux.intel.com
+Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
+Reported-by: "Slusarz, Marcin" <marcin.slusarz@intel.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: Matthew Wilcox <mawilcox@microsoft.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/dax.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1380,6 +1380,16 @@ static int dax_iomap_pmd_fault(struct vm
+
+ trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
+
++ /*
++ * Make sure that the faulting address's PMD offset (color) matches
++ * the PMD offset from the start of the file. This is necessary so
++ * that a PMD range in the page table overlaps exactly with a PMD
++ * range in the radix tree.
++ */
++ if ((vmf->pgoff & PG_PMD_COLOUR) !=
++ ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
++ goto fallback;
++
+ /* Fall back to PTEs if we're going to COW */
+ if (write && !(vma->vm_flags & VM_SHARED))
+ goto fallback;
--- /dev/null
+From 4a646580f793d19717f7e034c8d473b509c27d49 Mon Sep 17 00:00:00 2001
+From: Masaki Ota <masaki.ota@jp.alps.com>
+Date: Thu, 24 Aug 2017 15:44:36 -0700
+Subject: Input: ALPS - fix two-finger scroll breakage in right side on ALPS touchpad
+
+From: Masaki Ota <masaki.ota@jp.alps.com>
+
+commit 4a646580f793d19717f7e034c8d473b509c27d49 upstream.
+
+Fixed the issue that two finger scroll does not work correctly
+on V8 protocol. The cause is that V8 protocol X-coordinate decode
+is wrong at SS4 PLUS device. I added SS4 PLUS X decode definition.
+
+Mote notes:
+the problem manifests itself by the commit e7348396c6d5 ("Input: ALPS
+- fix V8+ protocol handling (73 03 28)"), where a fix for the V8+
+protocol was applied. Although the culprit must have been present
+beforehand, the two-finger scroll worked casually even with the
+wrongly reported values by some reason. It got broken by the commit
+above just because it changed x_max value, and this made libinput
+correctly figuring the MT events. Since the X coord is reported as
+falsely doubled, the events on the right-half side go outside the
+boundary, thus they are no longer handled. This resulted as a broken
+two-finger scroll.
+
+One finger event is decoded differently, and it didn't suffer from
+this problem. The problem was only about MT events. --tiwai
+
+Fixes: e7348396c6d5 ("Input: ALPS - fix V8+ protocol handling (73 03 28)")
+Signed-off-by: Masaki Ota <masaki.ota@jp.alps.com>
+Tested-by: Takashi Iwai <tiwai@suse.de>
+Tested-by: Paul Donohue <linux-kernel@PaulSD.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/alps.c | 41 +++++++++++++++++++++++++++++++----------
+ drivers/input/mouse/alps.h | 8 ++++++++
+ 2 files changed, 39 insertions(+), 10 deletions(-)
+
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alp
+
+ case SS4_PACKET_ID_TWO:
+ if (priv->flags & ALPS_BUTTONPAD) {
+- f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++ } else {
++ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
++ }
+ f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
+- f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
+ f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
+ } else {
+- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++ } else {
++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
++ }
+ f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
+- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
+ }
+ f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
+@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alp
+
+ case SS4_PACKET_ID_MULTI:
+ if (priv->flags & ALPS_BUTTONPAD) {
+- f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++ } else {
++ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
++ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
++ }
++
+ f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
+- f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX_BL;
+ no_data_y = SS4_MFPACKET_NO_AY_BL;
+ } else {
+- f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++ } else {
++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
++ }
+ f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
+- f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX;
+ no_data_y = SS4_MFPACKET_NO_AY;
+@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(stru
+
+ memset(otp, 0, sizeof(otp));
+
+- if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
+- alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
++ if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
++ alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
+ return -1;
+
+ alps_update_device_area_ss4_v2(otp, priv);
+--- a/drivers/input/mouse/alps.h
++++ b/drivers/input/mouse/alps.h
+@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
+ ((_b[1 + _i * 3] << 5) & 0x1F00) \
+ )
+
++#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
++ ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
++ )
++
+ #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
+ ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
+ ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
+@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
+ ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
+ )
+
++#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
++ ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
++ )
++
+ #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
+ ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
+ )
--- /dev/null
+From 1d2226e45040ed4aee95b633cbd64702bf7fc2a1 Mon Sep 17 00:00:00 2001
+From: KT Liao <kt.liao@emc.com.tw>
+Date: Fri, 18 Aug 2017 16:58:15 -0700
+Subject: Input: elan_i2c - add ELAN0602 ACPI ID to support Lenovo Yoga310
+
+From: KT Liao <kt.liao@emc.com.tw>
+
+commit 1d2226e45040ed4aee95b633cbd64702bf7fc2a1 upstream.
+
+Add ELAN0602 to the list of known ACPI IDs to enable support for ELAN
+touchpads found in Lenovo Yoga310.
+
+Signed-off-by: KT Liao <kt.liao@emc.com.tw>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/elan_i2c_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1223,6 +1223,7 @@ static const struct acpi_device_id elan_
+ { "ELAN0000", 0 },
+ { "ELAN0100", 0 },
+ { "ELAN0600", 0 },
++ { "ELAN0602", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0608", 0 },
+ { "ELAN0605", 0 },
--- /dev/null
+From ec667683c532c93fb41e100e5d61a518971060e2 Mon Sep 17 00:00:00 2001
+From: Aaron Ma <aaron.ma@canonical.com>
+Date: Fri, 18 Aug 2017 12:17:21 -0700
+Subject: Input: trackpoint - add new trackpoint firmware ID
+
+From: Aaron Ma <aaron.ma@canonical.com>
+
+commit ec667683c532c93fb41e100e5d61a518971060e2 upstream.
+
+Synaptics add new TP firmware ID: 0x2 and 0x3, for now both lower 2 bits
+are indicated as TP. Change the constant to bitwise values.
+
+This makes trackpoint to be recognized on Lenovo Carbon X1 Gen5 instead
+of it being identified as "PS/2 Generic Mouse".
+
+Signed-off-by: Aaron Ma <aaron.ma@canonical.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/trackpoint.c | 3 ++-
+ drivers/input/mouse/trackpoint.h | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(str
+ if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
+ return -1;
+
+- if (param[0] != TP_MAGIC_IDENT)
++ /* add new TP ID. */
++ if (!(param[0] & TP_MAGIC_IDENT))
+ return -1;
+
+ if (firmware_id)
+--- a/drivers/input/mouse/trackpoint.h
++++ b/drivers/input/mouse/trackpoint.h
+@@ -21,8 +21,9 @@
+ #define TP_COMMAND 0xE2 /* Commands start with this */
+
+ #define TP_READ_ID 0xE1 /* Sent for device identification */
+-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
+ /* by the firmware ID */
++ /* Firmware ID includes 0x1, 0x2, 0x3 */
+
+
+ /*
--- /dev/null
+From 38cfd5e3df9c4f88e76b547eee2087ee5c042ae2 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 23 Aug 2017 23:16:29 +0200
+Subject: KVM, pkeys: do not use PKRU value in vcpu->arch.guest_fpu.state
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 38cfd5e3df9c4f88e76b547eee2087ee5c042ae2 upstream.
+
+The host pkru is restored right after vcpu exit (commit 1be0e61), so
+KVM_GET_XSAVE will return the host PKRU value instead. Fix this by
+using the guest PKRU explicitly in fill_xsave and load_xsave. This
+part is based on a patch by Junkang Fu.
+
+The host PKRU data may also not match the value in vcpu->arch.guest_fpu.state,
+because it could have been changed by userspace since the last time
+it was saved, so skip loading it in kvm_load_guest_fpu.
+
+Reported-by: Junkang Fu <junkang.fjk@alibaba-inc.com>
+Cc: Yang Zhang <zy107165@alibaba-inc.com>
+Fixes: 1be0e61c1f255faaeab04a390e00c8b9b9042870
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/fpu/internal.h | 6 +++---
+ arch/x86/kvm/x86.c | 17 ++++++++++++++---
+ 2 files changed, 17 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate
+ return 0;
+ }
+
+-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
++static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
+ {
+ if (use_xsave()) {
+- copy_kernel_to_xregs(&fpstate->xsave, -1);
++ copy_kernel_to_xregs(&fpstate->xsave, mask);
+ } else {
+ if (use_fxsr())
+ copy_kernel_to_fxregs(&fpstate->fxsave);
+@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs
+ : : [addr] "m" (fpstate));
+ }
+
+- __copy_kernel_to_fpregs(fpstate);
++ __copy_kernel_to_fpregs(fpstate, -1);
+ }
+
+ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3236,7 +3236,12 @@ static void fill_xsave(u8 *dest, struct
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+- memcpy(dest + offset, src, size);
++ if (feature == XFEATURE_MASK_PKRU)
++ memcpy(dest + offset, &vcpu->arch.pkru,
++ sizeof(vcpu->arch.pkru));
++ else
++ memcpy(dest + offset, src, size);
++
+ }
+
+ valid -= feature;
+@@ -3274,7 +3279,11 @@ static void load_xsave(struct kvm_vcpu *
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+- memcpy(dest, src + offset, size);
++ if (feature == XFEATURE_MASK_PKRU)
++ memcpy(&vcpu->arch.pkru, src + offset,
++ sizeof(vcpu->arch.pkru));
++ else
++ memcpy(dest, src + offset, size);
+ }
+
+ valid -= feature;
+@@ -7616,7 +7625,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu
+ */
+ vcpu->guest_fpu_loaded = 1;
+ __kernel_fpu_begin();
+- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
++ /* PKRU is separately restored in kvm_x86_ops->run. */
++ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
++ ~XFEATURE_MASK_PKRU);
+ trace_kvm_fpu(1);
+ }
+
--- /dev/null
+From 857b8de96795646c5891cf44ae6fb19b9ff74bf9 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Thu, 3 Aug 2017 14:27:30 +0200
+Subject: KVM: s390: sthyi: fix specification exception detection
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 857b8de96795646c5891cf44ae6fb19b9ff74bf9 upstream.
+
+sthyi should only generate a specification exception if the function
+code is zero and the response buffer is not on a 4k boundary.
+
+The current code would also test for unknown function codes if the
+response buffer, that is currently only defined for function code 0,
+is not on a 4k boundary and incorrectly inject a specification
+exception instead of returning with condition code 3 and return code 4
+(unsupported function code).
+
+Fix this by moving the boundary check.
+
+Fixes: 95ca2cb57985 ("KVM: s390: Add sthyi emulation")
+Reviewed-by: Janosch Frank <frankja@linux.vnet.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/sthyi.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/sthyi.c
++++ b/arch/s390/kvm/sthyi.c
+@@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+ VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
+ trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+
+- if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
++ if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ if (code & 0xffff) {
+@@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+ goto out;
+ }
+
++ if (addr & ~PAGE_MASK)
++ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
+ /*
+ * If the page has not yet been faulted in, we want to do that
+ * now and not after all the expensive calculations.
--- /dev/null
+From 4a4eefcd0e49f9f339933324c1bde431186a0a7d Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Thu, 3 Aug 2017 13:05:11 +0200
+Subject: KVM: s390: sthyi: fix sthyi inline assembly
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 4a4eefcd0e49f9f339933324c1bde431186a0a7d upstream.
+
+The sthyi inline assembly misses register r3 within the clobber
+list. The sthyi instruction will always write a return code to
+register "R2+1", which in this case would be r3. Due to that we may
+have register corruption and see host crashes or data corruption
+depending on how gcc decided to allocate and use registers during
+compile time.
+
+Fixes: 95ca2cb57985 ("KVM: s390: Add sthyi emulation")
+Reviewed-by: Janosch Frank <frankja@linux.vnet.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/sthyi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/sthyi.c
++++ b/arch/s390/kvm/sthyi.c
+@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
+ "srl %[cc],28\n"
+ : [cc] "=d" (cc)
+ : [code] "d" (code), [addr] "a" (addr)
+- : "memory", "cc");
++ : "3", "memory", "cc");
+ return cc;
+ }
+
--- /dev/null
+From c469268cd523245cc58255f6696e0c295485cb0b Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 24 Aug 2017 11:59:31 +0200
+Subject: KVM: x86: block guest protection keys unless the host has them enabled
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit c469268cd523245cc58255f6696e0c295485cb0b upstream.
+
+If the host has protection keys disabled, we cannot read and write the
+guest PKRU---RDPKRU and WRPKRU fail with #GP(0) if CR4.PKE=0. Block
+the PKU cpuid bit in that case.
+
+This ensures that guest_CR4.PKE=1 implies host_CR4.PKE=1.
+
+Fixes: 1be0e61c1f255faaeab04a390e00c8b9b9042870
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct
+ entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+ cpuid_mask(&entry->ecx, CPUID_7_ECX);
+ /* PKU is not yet implemented for shadow paging. */
+- if (!tdp_enabled)
++ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
+ entry->ecx &= ~F(PKU);
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
--- /dev/null
+From b9dd21e104bcd45e124acfe978a79df71259e59b Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 23 Aug 2017 23:14:38 +0200
+Subject: KVM: x86: simplify handling of PKRU
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit b9dd21e104bcd45e124acfe978a79df71259e59b upstream.
+
+Move it to struct kvm_arch_vcpu, replacing guest_pkru_valid with a
+simple comparison against the host value of the register. The write of
+PKRU in addition can be skipped if the guest has not enabled the feature.
+Once we do this, we need not test OSPKE in the host anymore, because
+guest_CR4.PKE=1 implies host_CR4.PKE=1.
+
+The static PKU test is kept to elide the code on older CPUs.
+
+Suggested-by: Yang Zhang <zy107165@alibaba-inc.com>
+Fixes: 1be0e61c1f255faaeab04a390e00c8b9b9042870
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/kvm_cache_regs.h | 5 -----
+ arch/x86/kvm/mmu.h | 2 +-
+ arch/x86/kvm/svm.c | 7 -------
+ arch/x86/kvm/vmx.c | 25 ++++++++-----------------
+ 5 files changed, 10 insertions(+), 30 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -486,6 +486,7 @@ struct kvm_vcpu_arch {
+ unsigned long cr4;
+ unsigned long cr4_guest_owned_bits;
+ unsigned long cr8;
++ u32 pkru;
+ u32 hflags;
+ u64 efer;
+ u64 apic_base;
+--- a/arch/x86/kvm/kvm_cache_regs.h
++++ b/arch/x86/kvm/kvm_cache_regs.h
+@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struc
+ | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
+ }
+
+-static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
+-{
+- return kvm_x86_ops->get_pkru(vcpu);
+-}
+-
+ static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.hflags |= HF_GUEST_MASK;
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -182,7 +182,7 @@ static inline u8 permission_fault(struct
+ * index of the protection domain, so pte_pkey * 2 is
+ * is the index of the first bit for the domain.
+ */
+- pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
++ pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
+
+ /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
+ offset = (pfec & ~1) +
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1725,11 +1725,6 @@ static void svm_set_rflags(struct kvm_vc
+ to_svm(vcpu)->vmcb->save.rflags = rflags;
+ }
+
+-static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
+-{
+- return 0;
+-}
+-
+ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+ {
+ switch (reg) {
+@@ -5313,8 +5308,6 @@ static struct kvm_x86_ops svm_x86_ops __
+ .get_rflags = svm_get_rflags,
+ .set_rflags = svm_set_rflags,
+
+- .get_pkru = svm_get_pkru,
+-
+ .tlb_flush = svm_flush_tlb,
+
+ .run = svm_vcpu_run,
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -636,8 +636,6 @@ struct vcpu_vmx {
+
+ u64 current_tsc_ratio;
+
+- bool guest_pkru_valid;
+- u32 guest_pkru;
+ u32 host_pkru;
+
+ /*
+@@ -2368,11 +2366,6 @@ static void vmx_set_rflags(struct kvm_vc
+ vmcs_writel(GUEST_RFLAGS, rflags);
+ }
+
+-static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
+-{
+- return to_vmx(vcpu)->guest_pkru;
+-}
+-
+ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+ {
+ u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+@@ -8860,8 +8853,10 @@ static void __noclone vmx_vcpu_run(struc
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vmx_set_interrupt_shadow(vcpu, 0);
+
+- if (vmx->guest_pkru_valid)
+- __write_pkru(vmx->guest_pkru);
++ if (static_cpu_has(X86_FEATURE_PKU) &&
++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
++ vcpu->arch.pkru != vmx->host_pkru)
++ __write_pkru(vcpu->arch.pkru);
+
+ atomic_switch_perf_msrs(vmx);
+ debugctlmsr = get_debugctlmsr();
+@@ -9009,13 +9004,11 @@ static void __noclone vmx_vcpu_run(struc
+ * back on host, so it is safe to read guest PKRU from current
+ * XSAVE.
+ */
+- if (boot_cpu_has(X86_FEATURE_OSPKE)) {
+- vmx->guest_pkru = __read_pkru();
+- if (vmx->guest_pkru != vmx->host_pkru) {
+- vmx->guest_pkru_valid = true;
++ if (static_cpu_has(X86_FEATURE_PKU) &&
++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
++ vcpu->arch.pkru = __read_pkru();
++ if (vcpu->arch.pkru != vmx->host_pkru)
+ __write_pkru(vmx->host_pkru);
+- } else
+- vmx->guest_pkru_valid = false;
+ }
+
+ /*
+@@ -11507,8 +11500,6 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .get_rflags = vmx_get_rflags,
+ .set_rflags = vmx_set_rflags,
+
+- .get_pkru = vmx_get_pkru,
+-
+ .tlb_flush = vmx_flush_tlb,
+
+ .run = vmx_vcpu_run,
--- /dev/null
+From 435c0b87d661da83771c30ed775f7c37eed193fb Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Fri, 25 Aug 2017 15:55:33 -0700
+Subject: mm, shmem: fix handling /sys/kernel/mm/transparent_hugepage/shmem_enabled
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit 435c0b87d661da83771c30ed775f7c37eed193fb upstream.
+
+/sys/kernel/mm/transparent_hugepage/shmem_enabled controls if we want
+to allocate huge pages when allocate pages for private in-kernel shmem
+mount.
+
+Unfortunately, as Dan noticed, I've screwed it up and the only way to
+make kernel allocate huge page for the mount is to use "force" there.
+All other values will be effectively ignored.
+
+Link: http://lkml.kernel.org/r/20170822144254.66431-1-kirill.shutemov@linux.intel.com
+Fixes: 5a6e75f8110c ("shmem: prepare huge= mount option and sysfs knob")
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/shmem.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3964,7 +3964,7 @@ int __init shmem_init(void)
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+- if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
++ if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
+ SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
+ else
+ shmem_huge = 0; /* just in case it was patched */
+@@ -4025,7 +4025,7 @@ static ssize_t shmem_enabled_store(struc
+ return -EINVAL;
+
+ shmem_huge = huge;
+- if (shmem_huge < SHMEM_HUGE_DENY)
++ if (shmem_huge > SHMEM_HUGE_DENY)
+ SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
+ return count;
+ }
--- /dev/null
+From 556b969a1cfe2686aae149137fa1dfcac0eefe54 Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Fri, 25 Aug 2017 15:55:30 -0700
+Subject: PM/hibernate: touch NMI watchdog when creating snapshot
+
+From: Chen Yu <yu.c.chen@intel.com>
+
+commit 556b969a1cfe2686aae149137fa1dfcac0eefe54 upstream.
+
+There is a problem that when counting the pages for creating the
+hibernation snapshot will take significant amount of time, especially on
+system with large memory. Since the counting job is performed with irq
+disabled, this might lead to NMI lockup. The following warning were
+found on a system with 1.5TB DRAM:
+
+ Freezing user space processes ... (elapsed 0.002 seconds) done.
+ OOM killer disabled.
+ PM: Preallocating image memory...
+ NMI watchdog: Watchdog detected hard LOCKUP on cpu 27
+ CPU: 27 PID: 3128 Comm: systemd-sleep Not tainted 4.13.0-0.rc2.git0.1.fc27.x86_64 #1
+ task: ffff9f01971ac000 task.stack: ffffb1a3f325c000
+ RIP: 0010:memory_bm_find_bit+0xf4/0x100
+ Call Trace:
+ swsusp_set_page_free+0x2b/0x30
+ mark_free_pages+0x147/0x1c0
+ count_data_pages+0x41/0xa0
+ hibernate_preallocate_memory+0x80/0x450
+ hibernation_snapshot+0x58/0x410
+ hibernate+0x17c/0x310
+ state_store+0xdf/0xf0
+ kobj_attr_store+0xf/0x20
+ sysfs_kf_write+0x37/0x40
+ kernfs_fop_write+0x11c/0x1a0
+ __vfs_write+0x37/0x170
+ vfs_write+0xb1/0x1a0
+ SyS_write+0x55/0xc0
+ entry_SYSCALL_64_fastpath+0x1a/0xa5
+ ...
+ done (allocated 6590003 pages)
+ PM: Allocated 26360012 kbytes in 19.89 seconds (1325.28 MB/s)
+
+It has taken nearly 20 seconds(2.10GHz CPU) thus the NMI lockup was
+triggered. In case the timeout of the NMI watch dog has been set to 1
+second, a safe interval should be 6590003/20 = 320k pages in theory.
+However there might also be some platforms running at a lower frequency,
+so feed the watchdog every 100k pages.
+
+[yu.c.chen@intel.com: simplification]
+ Link: http://lkml.kernel.org/r/1503460079-29721-1-git-send-email-yu.c.chen@intel.com
+[yu.c.chen@intel.com: use interval of 128k instead of 100k to avoid modulus]
+Link: http://lkml.kernel.org/r/1503328098-5120-1-git-send-email-yu.c.chen@intel.com
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Reported-by: Jan Filipcewicz <jan.filipcewicz@intel.com>
+Suggested-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Len Brown <lenb@kernel.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -66,6 +66,7 @@
+ #include <linux/kthread.h>
+ #include <linux/memcontrol.h>
+ #include <linux/ftrace.h>
++#include <linux/nmi.h>
+
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -2495,9 +2496,14 @@ void drain_all_pages(struct zone *zone)
+
+ #ifdef CONFIG_HIBERNATION
+
++/*
++ * Touch the watchdog for every WD_PAGE_COUNT pages.
++ */
++#define WD_PAGE_COUNT (128*1024)
++
+ void mark_free_pages(struct zone *zone)
+ {
+- unsigned long pfn, max_zone_pfn;
++ unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
+ unsigned long flags;
+ unsigned int order, t;
+ struct page *page;
+@@ -2512,6 +2518,11 @@ void mark_free_pages(struct zone *zone)
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+
++ if (!--page_count) {
++ touch_nmi_watchdog();
++ page_count = WD_PAGE_COUNT;
++ }
++
+ if (page_zone(page) != zone)
+ continue;
+
+@@ -2525,8 +2536,13 @@ void mark_free_pages(struct zone *zone)
+ unsigned long i;
+
+ pfn = page_to_pfn(page);
+- for (i = 0; i < (1UL << order); i++)
++ for (i = 0; i < (1UL << order); i++) {
++ if (!--page_count) {
++ touch_nmi_watchdog();
++ page_count = WD_PAGE_COUNT;
++ }
+ swsusp_set_page_free(pfn_to_page(pfn + i));
++ }
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
bpf-verifier-add-additional-patterns-to-evaluate_reg_imm_alu.patch
bpf-fix-mixed-signed-unsigned-derived-min-max-value-bounds.patch
bpf-verifier-fix-min-max-handling-in-bpf_sub.patch
+input-trackpoint-add-new-trackpoint-firmware-id.patch
+input-elan_i2c-add-elan0602-acpi-id-to-support-lenovo-yoga310.patch
+input-alps-fix-two-finger-scroll-breakage-in-right-side-on-alps-touchpad.patch
+kvm-s390-sthyi-fix-sthyi-inline-assembly.patch
+kvm-s390-sthyi-fix-specification-exception-detection.patch
+kvm-x86-simplify-handling-of-pkru.patch
+kvm-pkeys-do-not-use-pkru-value-in-vcpu-arch.guest_fpu.state.patch
+kvm-x86-block-guest-protection-keys-unless-the-host-has-them-enabled.patch
+alsa-usb-audio-add-delay-quirk-for-h650e-jabra-550a-usb-headsets.patch
+alsa-core-fix-unexpected-error-at-replacing-user-tlv.patch
+alsa-hda-add-stereo-mic-quirk-for-lenovo-g50-70-17aa-3978.patch
+alsa-firewire-fix-null-pointer-dereference-when-releasing-uninitialized-data-of-iso-resource.patch
+alsa-firewire-motu-destroy-stream-data-surely-at-failure-of-card-initialization.patch
+arcv2-slc-make-sure-busy-bit-is-set-properly-for-region-ops.patch
+arcv2-pae40-explicitly-set-msb-counterpart-of-slc-region-ops-addresses.patch
+arcv2-pae40-set-msb-even-if-config_arc_has_pae40-but-pae-exists-in-soc.patch
+pm-hibernate-touch-nmi-watchdog-when-creating-snapshot.patch
+mm-shmem-fix-handling-sys-kernel-mm-transparent_hugepage-shmem_enabled.patch
+dax-fix-deadlock-due-to-misaligned-pmd-faults.patch