]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 7 Aug 2017 20:38:53 +0000 (13:38 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 7 Aug 2017 20:38:53 +0000 (13:38 -0700)
added patches:
alsa-hda-fix-speaker-output-from-vaio-vpcl14m1r.patch
asoc-do-not-close-shared-backend-dailink.patch
brcmfmac-fix-memleak-due-to-calling-brcmf_sdiod_sgtable_alloc-twice.patch
cgroup-create-dfl_root-files-on-subsys-registration.patch
cgroup-fix-error-return-value-from-cgroup_subtree_control.patch
cpuset-fix-a-deadlock-due-to-incomplete-patching-of-cpusets_enabled.patch
device-property-make-dev_fwnode-public.patch
drm-amdgpu-fix-undue-fallthroughs-in-golden-registers-initialization.patch
iwlwifi-dvm-prevent-an-out-of-bounds-access.patch
kvm-async_pf-make-rcu-irq-exit-if-not-triggered-from-idle-task.patch
libata-array-underflow-in-ata_find_dev.patch
mm-mprotect-flush-tlb-if-potentially-racing-with-a-parallel-reclaim-leaving-stale-tlb-entries.patch
mmc-core-fix-access-to-hs400-es-devices.patch
mmc-core-use-device_property_read-instead-of-of_property_read.patch
mmc-dw_mmc-use-device_property_read-instead-of-of_property_read.patch
mmc-sdhci-of-at91-force-card-detect-value-for-non-removable-devices.patch
nfsv4-fix-exchange_id-corrupt-verifier-issue.patch
parisc-handle-vma-s-whose-context-is-not-current-in-flush_cache_range.patch
workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch

20 files changed:
queue-4.9/alsa-hda-fix-speaker-output-from-vaio-vpcl14m1r.patch [new file with mode: 0644]
queue-4.9/asoc-do-not-close-shared-backend-dailink.patch [new file with mode: 0644]
queue-4.9/brcmfmac-fix-memleak-due-to-calling-brcmf_sdiod_sgtable_alloc-twice.patch [new file with mode: 0644]
queue-4.9/cgroup-create-dfl_root-files-on-subsys-registration.patch [new file with mode: 0644]
queue-4.9/cgroup-fix-error-return-value-from-cgroup_subtree_control.patch [new file with mode: 0644]
queue-4.9/cpuset-fix-a-deadlock-due-to-incomplete-patching-of-cpusets_enabled.patch [new file with mode: 0644]
queue-4.9/device-property-make-dev_fwnode-public.patch [new file with mode: 0644]
queue-4.9/drm-amdgpu-fix-undue-fallthroughs-in-golden-registers-initialization.patch [new file with mode: 0644]
queue-4.9/iwlwifi-dvm-prevent-an-out-of-bounds-access.patch [new file with mode: 0644]
queue-4.9/kvm-async_pf-make-rcu-irq-exit-if-not-triggered-from-idle-task.patch [new file with mode: 0644]
queue-4.9/libata-array-underflow-in-ata_find_dev.patch [new file with mode: 0644]
queue-4.9/mm-mprotect-flush-tlb-if-potentially-racing-with-a-parallel-reclaim-leaving-stale-tlb-entries.patch [new file with mode: 0644]
queue-4.9/mmc-core-fix-access-to-hs400-es-devices.patch [new file with mode: 0644]
queue-4.9/mmc-core-use-device_property_read-instead-of-of_property_read.patch [new file with mode: 0644]
queue-4.9/mmc-dw_mmc-use-device_property_read-instead-of-of_property_read.patch [new file with mode: 0644]
queue-4.9/mmc-sdhci-of-at91-force-card-detect-value-for-non-removable-devices.patch [new file with mode: 0644]
queue-4.9/nfsv4-fix-exchange_id-corrupt-verifier-issue.patch [new file with mode: 0644]
queue-4.9/parisc-handle-vma-s-whose-context-is-not-current-in-flush_cache_range.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch [new file with mode: 0644]

diff --git a/queue-4.9/alsa-hda-fix-speaker-output-from-vaio-vpcl14m1r.patch b/queue-4.9/alsa-hda-fix-speaker-output-from-vaio-vpcl14m1r.patch
new file mode 100644 (file)
index 0000000..09544c5
--- /dev/null
@@ -0,0 +1,30 @@
+From 3f3c371421e601fa93b6cb7fb52da9ad59ec90b4 Mon Sep 17 00:00:00 2001
+From: "Sergei A. Trusov" <sergei.a.trusov@ya.ru>
+Date: Wed, 2 Aug 2017 20:23:48 +1000
+Subject: ALSA: hda - Fix speaker output from VAIO VPCL14M1R
+
+From: Sergei A. Trusov <sergei.a.trusov@ya.ru>
+
+commit 3f3c371421e601fa93b6cb7fb52da9ad59ec90b4 upstream.
+
+Sony VAIO VPCL14M1R needs the quirk to make the speaker working properly.
+
+Tested-by: Dmitriy <mexx400@yandex.ru>
+Signed-off-by: Sergei A. Trusov <sergei.a.trusov@ya.ru>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882
+       SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+       SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+       SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
++      SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
diff --git a/queue-4.9/asoc-do-not-close-shared-backend-dailink.patch b/queue-4.9/asoc-do-not-close-shared-backend-dailink.patch
new file mode 100644 (file)
index 0000000..6805d5e
--- /dev/null
@@ -0,0 +1,39 @@
+From b1cd2e34c69a2f3988786af451b6e17967c293a0 Mon Sep 17 00:00:00 2001
+From: Banajit Goswami <bgoswami@codeaurora.org>
+Date: Fri, 14 Jul 2017 23:15:05 -0700
+Subject: ASoC: do not close shared backend dailink
+
+From: Banajit Goswami <bgoswami@codeaurora.org>
+
+commit b1cd2e34c69a2f3988786af451b6e17967c293a0 upstream.
+
+Multiple frontend dailinks may be connected to a backend
+dailink at the same time. When one of frontend dailinks is
+closed, the associated backend dailink should not be closed
+if it is connected to other active frontend dailinks. Change
+ensures that backend dailink is closed only after all
+connected frontend dailinks are closed.
+
+Signed-off-by: Gopikrishnaiah Anandan <agopik@codeaurora.org>
+Signed-off-by: Banajit Goswami <bgoswami@codeaurora.org>
+Signed-off-by: Patrick Lai <plai@codeaurora.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/soc-pcm.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_so
+               dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
+                               be->dai_link->name, event, dir);
++              if ((event == SND_SOC_DAPM_STREAM_STOP) &&
++                  (be->dpcm[dir].users >= 1))
++                      continue;
++
+               snd_soc_dapm_stream_event(be, dir, event);
+       }
diff --git a/queue-4.9/brcmfmac-fix-memleak-due-to-calling-brcmf_sdiod_sgtable_alloc-twice.patch b/queue-4.9/brcmfmac-fix-memleak-due-to-calling-brcmf_sdiod_sgtable_alloc-twice.patch
new file mode 100644 (file)
index 0000000..cf02ed3
--- /dev/null
@@ -0,0 +1,40 @@
+From 5f5d03143de5e0c593da4ab18fc6393c2815e108 Mon Sep 17 00:00:00 2001
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+Date: Wed, 26 Jul 2017 13:09:24 +0100
+Subject: brcmfmac: fix memleak due to calling brcmf_sdiod_sgtable_alloc() twice
+
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+
+commit 5f5d03143de5e0c593da4ab18fc6393c2815e108 upstream.
+
+Due to a bugfix in wireless tree and the commit mentioned below a merge
+was needed which went haywire. So the submitted change resulted in the
+function brcmf_sdiod_sgtable_alloc() being called twice during the probe
+thus leaking the memory of the first call.
+
+Fixes: 4d7928959832 ("brcmfmac: switch to new platform data")
+Reported-by: Stefan Wahren <stefan.wahren@i2se.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Reviewed-by: Hante Meuleman <hante.meuleman@broadcom.com>
+Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c |    5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4161,11 +4161,6 @@ struct brcmf_sdio *brcmf_sdio_probe(stru
+               goto fail;
+       }
+-      /* allocate scatter-gather table. sg support
+-       * will be disabled upon allocation failure.
+-       */
+-      brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+-
+       /* Query the F2 block size, set roundup accordingly */
+       bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+       bus->roundup = min(max_roundup, bus->blocksize);
diff --git a/queue-4.9/cgroup-create-dfl_root-files-on-subsys-registration.patch b/queue-4.9/cgroup-create-dfl_root-files-on-subsys-registration.patch
new file mode 100644 (file)
index 0000000..6a319bb
--- /dev/null
@@ -0,0 +1,45 @@
+From 7af608e4f9530372aec6e940552bf76595f2e265 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 18 Jul 2017 17:57:46 -0400
+Subject: cgroup: create dfl_root files on subsys registration
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 7af608e4f9530372aec6e940552bf76595f2e265 upstream.
+
+On subsystem registration, css_populate_dir() is not called on the new
+root css, so the interface files for the subsystem on cgrp_dfl_root
+aren't created on registration.  This is a residue from the days when
+cgrp_dfl_root was used only as the parking spot for unused subsystems,
+which no longer is true as it's used as the root for cgroup2.
+
+This is often fine as later operations tend to create them as a part
+of mount (cgroup1) or subtree_control operations (cgroup2); however,
+it's not difficult to mount cgroup2 with the controller interface
+files missing as Waiman found out.
+
+Fix it by invoking css_populate_dir() on the root css on subsys
+registration.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-and-tested-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5718,6 +5718,10 @@ int __init cgroup_init(void)
+               if (ss->bind)
+                       ss->bind(init_css_set.subsys[ssid]);
++
++              mutex_lock(&cgroup_mutex);
++              css_populate_dir(init_css_set.subsys[ssid]);
++              mutex_unlock(&cgroup_mutex);
+       }
+       /* init_css_set.subsys[] has been updated, re-hash */
diff --git a/queue-4.9/cgroup-fix-error-return-value-from-cgroup_subtree_control.patch b/queue-4.9/cgroup-fix-error-return-value-from-cgroup_subtree_control.patch
new file mode 100644 (file)
index 0000000..9527ba8
--- /dev/null
@@ -0,0 +1,39 @@
+From 3c74541777302eec43a0d1327c4d58b8659a776b Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Sun, 23 Jul 2017 08:14:15 -0400
+Subject: cgroup: fix error return value from cgroup_subtree_control()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 3c74541777302eec43a0d1327c4d58b8659a776b upstream.
+
+While refactoring, f7b2814bb9b6 ("cgroup: factor out
+cgroup_{apply|finalize}_control() from
+cgroup_subtree_control_write()") broke error return value from the
+function.  The return value from the last operation is always
+overridden to zero.  Fix it.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -3487,11 +3487,11 @@ static ssize_t cgroup_subtree_control_wr
+       cgrp->subtree_control &= ~disable;
+       ret = cgroup_apply_control(cgrp);
+-
+       cgroup_finalize_control(cgrp, ret);
++      if (ret)
++              goto out_unlock;
+       kernfs_activate(cgrp->kn);
+-      ret = 0;
+ out_unlock:
+       cgroup_kn_unlock(of->kn);
+       return ret ?: nbytes;
diff --git a/queue-4.9/cpuset-fix-a-deadlock-due-to-incomplete-patching-of-cpusets_enabled.patch b/queue-4.9/cpuset-fix-a-deadlock-due-to-incomplete-patching-of-cpusets_enabled.patch
new file mode 100644 (file)
index 0000000..92fdc2f
--- /dev/null
@@ -0,0 +1,174 @@
+From 89affbf5d9ebb15c6460596822e8857ea2f9e735 Mon Sep 17 00:00:00 2001
+From: Dima Zavin <dmitriyz@waymo.com>
+Date: Wed, 2 Aug 2017 13:32:18 -0700
+Subject: cpuset: fix a deadlock due to incomplete patching of cpusets_enabled()
+
+From: Dima Zavin <dmitriyz@waymo.com>
+
+commit 89affbf5d9ebb15c6460596822e8857ea2f9e735 upstream.
+
+In codepaths that use the begin/retry interface for reading
+mems_allowed_seq with irqs disabled, there exists a race condition that
+stalls the patch process after only modifying a subset of the
+static_branch call sites.
+
+This problem manifested itself as a deadlock in the slub allocator,
+inside get_any_partial.  The loop reads mems_allowed_seq value (via
+read_mems_allowed_begin), performs the defrag operation, and then
+verifies the consistency of mem_allowed via the read_mems_allowed_retry
+and the cookie returned by xxx_begin.
+
+The issue here is that both begin and retry first check if cpusets are
+enabled via cpusets_enabled() static branch.  This branch can be
+rewritted dynamically (via cpuset_inc) if a new cpuset is created.  The
+x86 jump label code fully synchronizes across all CPUs for every entry
+it rewrites.  If it rewrites only one of the callsites (specifically the
+one in read_mems_allowed_retry) and then waits for the
+smp_call_function(do_sync_core) to complete while a CPU is inside the
+begin/retry section with IRQs off and the mems_allowed value is changed,
+we can hang.
+
+This is because begin() will always return 0 (since it wasn't patched
+yet) while retry() will test the 0 against the actual value of the seq
+counter.
+
+The fix is to use two different static keys: one for begin
+(pre_enable_key) and one for retry (enable_key).  In cpuset_inc(), we
+first bump the pre_enable key to ensure that cpuset_mems_allowed_begin()
+always return a valid seqcount if are enabling cpusets.  Similarly, when
+disabling cpusets via cpuset_dec(), we first ensure that callers of
+cpuset_mems_allowed_retry() will start ignoring the seqcount value
+before we let cpuset_mems_allowed_begin() return 0.
+
+The relevant stack traces of the two stuck threads:
+
+  CPU: 1 PID: 1415 Comm: mkdir Tainted: G L  4.9.36-00104-g540c51286237 #4
+  Hardware name: Default string Default string/Hardware, BIOS 4.29.1-20170526215256 05/26/2017
+  task: ffff8817f9c28000 task.stack: ffffc9000ffa4000
+  RIP: smp_call_function_many+0x1f9/0x260
+  Call Trace:
+    smp_call_function+0x3b/0x70
+    on_each_cpu+0x2f/0x90
+    text_poke_bp+0x87/0xd0
+    arch_jump_label_transform+0x93/0x100
+    __jump_label_update+0x77/0x90
+    jump_label_update+0xaa/0xc0
+    static_key_slow_inc+0x9e/0xb0
+    cpuset_css_online+0x70/0x2e0
+    online_css+0x2c/0xa0
+    cgroup_apply_control_enable+0x27f/0x3d0
+    cgroup_mkdir+0x2b7/0x420
+    kernfs_iop_mkdir+0x5a/0x80
+    vfs_mkdir+0xf6/0x1a0
+    SyS_mkdir+0xb7/0xe0
+    entry_SYSCALL_64_fastpath+0x18/0xad
+
+  ...
+
+  CPU: 2 PID: 1 Comm: init Tainted: G L  4.9.36-00104-g540c51286237 #4
+  Hardware name: Default string Default string/Hardware, BIOS 4.29.1-20170526215256 05/26/2017
+  task: ffff8818087c0000 task.stack: ffffc90000030000
+  RIP: int3+0x39/0x70
+  Call Trace:
+    <#DB> ? ___slab_alloc+0x28b/0x5a0
+    <EOE> ? copy_process.part.40+0xf7/0x1de0
+    __slab_alloc.isra.80+0x54/0x90
+    copy_process.part.40+0xf7/0x1de0
+    copy_process.part.40+0xf7/0x1de0
+    kmem_cache_alloc_node+0x8a/0x280
+    copy_process.part.40+0xf7/0x1de0
+    _do_fork+0xe7/0x6c0
+    _raw_spin_unlock_irq+0x2d/0x60
+    trace_hardirqs_on_caller+0x136/0x1d0
+    entry_SYSCALL_64_fastpath+0x5/0xad
+    do_syscall_64+0x27/0x350
+    SyS_clone+0x19/0x20
+    do_syscall_64+0x60/0x350
+    entry_SYSCALL64_slow_path+0x25/0x25
+
+Link: http://lkml.kernel.org/r/20170731040113.14197-1-dmitriyz@waymo.com
+Fixes: 46e700abc44c ("mm, page_alloc: remove unnecessary taking of a seqlock when cpusets are disabled")
+Signed-off-by: Dima Zavin <dmitriyz@waymo.com>
+Reported-by: Cliff Spradlin <cspradlin@waymo.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Christopher Lameter <cl@linux.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpuset.h |   19 +++++++++++++++++--
+ kernel/cpuset.c        |    1 +
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -16,6 +16,19 @@
+ #ifdef CONFIG_CPUSETS
++/*
++ * Static branch rewrites can happen in an arbitrary order for a given
++ * key. In code paths where we need to loop with read_mems_allowed_begin() and
++ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
++ * to ensure that begin() always gets rewritten before retry() in the
++ * disabled -> enabled transition. If not, then if local irqs are disabled
++ * around the loop, we can deadlock since retry() would always be
++ * comparing the latest value of the mems_allowed seqcount against 0 as
++ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
++ * transition should happen in reverse order for the same reasons (want to stop
++ * looking at real value of mems_allowed.sequence in retry() first).
++ */
++extern struct static_key_false cpusets_pre_enable_key;
+ extern struct static_key_false cpusets_enabled_key;
+ static inline bool cpusets_enabled(void)
+ {
+@@ -30,12 +43,14 @@ static inline int nr_cpusets(void)
+ static inline void cpuset_inc(void)
+ {
++      static_branch_inc(&cpusets_pre_enable_key);
+       static_branch_inc(&cpusets_enabled_key);
+ }
+ static inline void cpuset_dec(void)
+ {
+       static_branch_dec(&cpusets_enabled_key);
++      static_branch_dec(&cpusets_pre_enable_key);
+ }
+ extern int cpuset_init(void);
+@@ -113,7 +128,7 @@ extern void cpuset_print_current_mems_al
+  */
+ static inline unsigned int read_mems_allowed_begin(void)
+ {
+-      if (!cpusets_enabled())
++      if (!static_branch_unlikely(&cpusets_pre_enable_key))
+               return 0;
+       return read_seqcount_begin(&current->mems_allowed_seq);
+@@ -127,7 +142,7 @@ static inline unsigned int read_mems_all
+  */
+ static inline bool read_mems_allowed_retry(unsigned int seq)
+ {
+-      if (!cpusets_enabled())
++      if (!static_branch_unlikely(&cpusets_enabled_key))
+               return false;
+       return read_seqcount_retry(&current->mems_allowed_seq, seq);
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -61,6 +61,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
++DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
+ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
+ /* See "Frequency meter" comments, below. */
diff --git a/queue-4.9/device-property-make-dev_fwnode-public.patch b/queue-4.9/device-property-make-dev_fwnode-public.patch
new file mode 100644 (file)
index 0000000..ab33717
--- /dev/null
@@ -0,0 +1,51 @@
+From e44bb0cbdc88686c21e2175a990b40bf6db5d005 Mon Sep 17 00:00:00 2001
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+Date: Tue, 28 Mar 2017 10:52:24 +0300
+Subject: device property: Make dev_fwnode() public
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+commit e44bb0cbdc88686c21e2175a990b40bf6db5d005 upstream.
+
+The function to obtain a fwnode related to a struct device is useful for
+drivers that use the fwnode property API: it allows not being aware of the
+underlying firmware implementation.
+
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Chris Metcalf <cmetcalf@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/property.c  |    3 ++-
+ include/linux/property.h |    2 ++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -182,11 +182,12 @@ static int pset_prop_read_string(struct
+       return 0;
+ }
+-static inline struct fwnode_handle *dev_fwnode(struct device *dev)
++struct fwnode_handle *dev_fwnode(struct device *dev)
+ {
+       return IS_ENABLED(CONFIG_OF) && dev->of_node ?
+               &dev->of_node->fwnode : dev->fwnode;
+ }
++EXPORT_SYMBOL_GPL(dev_fwnode);
+ /**
+  * device_property_present - check if a property of a device is present
+--- a/include/linux/property.h
++++ b/include/linux/property.h
+@@ -33,6 +33,8 @@ enum dev_dma_attr {
+       DEV_DMA_COHERENT,
+ };
++struct fwnode_handle *dev_fwnode(struct device *dev);
++
+ bool device_property_present(struct device *dev, const char *propname);
+ int device_property_read_u8_array(struct device *dev, const char *propname,
+                                 u8 *val, size_t nval);
diff --git a/queue-4.9/drm-amdgpu-fix-undue-fallthroughs-in-golden-registers-initialization.patch b/queue-4.9/drm-amdgpu-fix-undue-fallthroughs-in-golden-registers-initialization.patch
new file mode 100644 (file)
index 0000000..d26457b
--- /dev/null
@@ -0,0 +1,52 @@
+From 5694785cf09bf0e7bd8e5f62361ea34fa162a4a0 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Sun, 30 Jul 2017 10:18:25 +0200
+Subject: drm/amdgpu: Fix undue fallthroughs in golden registers initialization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jean Delvare <jdelvare@suse.de>
+
+commit 5694785cf09bf0e7bd8e5f62361ea34fa162a4a0 upstream.
+
+As I was staring at the si_init_golden_registers code, I noticed that
+the Pitcairn initialization silently falls through the Cape Verde
+initialization, and the Oland initialization falls through the Hainan
+initialization. However there is no comment stating that this is
+intentional, and the radeon driver doesn't have any such fallthrough,
+so I suspect this is not supposed to happen.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Fixes: 62a37553414a ("drm/amdgpu: add si implementation v10")
+Cc: Ken Wang <Qingqing.Wang@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Marek Olšák" <maraeo@gmail.com>
+Cc: "Christian König" <christian.koenig@amd.com>
+Cc: Flora Cui <Flora.Cui@amd.com>
+Reviewed-by: Marek Olšák <marek.olsak@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/si.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1301,6 +1301,7 @@ static void si_init_golden_registers(str
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
++              break;
+       case CHIP_VERDE:
+               amdgpu_program_register_sequence(adev,
+                                                verde_golden_registers,
+@@ -1325,6 +1326,7 @@ static void si_init_golden_registers(str
+               amdgpu_program_register_sequence(adev,
+                                                oland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
++              break;
+       case CHIP_HAINAN:
+               amdgpu_program_register_sequence(adev,
+                                                hainan_golden_registers,
diff --git a/queue-4.9/iwlwifi-dvm-prevent-an-out-of-bounds-access.patch b/queue-4.9/iwlwifi-dvm-prevent-an-out-of-bounds-access.patch
new file mode 100644 (file)
index 0000000..397699d
--- /dev/null
@@ -0,0 +1,48 @@
+From 0b0f934e92a8eaed2e6c48a50eae6f84661f74f3 Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Thu, 8 Jun 2017 10:55:26 +0300
+Subject: iwlwifi: dvm: prevent an out of bounds access
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit 0b0f934e92a8eaed2e6c48a50eae6f84661f74f3 upstream.
+
+iwlagn_check_ratid_empty takes the tid as a parameter, but
+it doesn't check that it is not IWL_TID_NON_QOS.
+Since IWL_TID_NON_QOS = 8 and iwl_priv::tid_data is an array
+with 8 entries, accessing iwl_priv::tid_data[IWL_TID_NON_QOS]
+is a bad idea.
+This happened in iwlagn_rx_reply_tx. Since
+iwlagn_check_ratid_empty is relevant only to check whether
+we can open A-MPDU, this flow is irrelevant if tid is
+IWL_TID_NON_QOS. Call iwlagn_check_ratid_empty only inside
+the
+       if (tid != IWL_TID_NON_QOS)
+
+a few lines earlier in the function.
+
+Reported-by: Seraphime Kirkovski <kirkseraph@gmail.com>
+Tested-by: Seraphime Kirkovski <kirkseraph@gmail.com>
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/intel/iwlwifi/dvm/tx.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv
+                               next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+                                                 next_reclaimed);
++                      iwlagn_check_ratid_empty(priv, sta_id, tid);
+               }
+               iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
+-              iwlagn_check_ratid_empty(priv, sta_id, tid);
+               freed = 0;
+               /* process frames */
diff --git a/queue-4.9/kvm-async_pf-make-rcu-irq-exit-if-not-triggered-from-idle-task.patch b/queue-4.9/kvm-async_pf-make-rcu-irq-exit-if-not-triggered-from-idle-task.patch
new file mode 100644 (file)
index 0000000..1098711
--- /dev/null
@@ -0,0 +1,75 @@
+From 337c017ccdf2653d0040099433fc1a2b1beb5926 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Tue, 1 Aug 2017 05:20:03 -0700
+Subject: KVM: async_pf: make rcu irq exit if not triggered from idle task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit 337c017ccdf2653d0040099433fc1a2b1beb5926 upstream.
+
+ WARNING: CPU: 5 PID: 1242 at kernel/rcu/tree_plugin.h:323 rcu_note_context_switch+0x207/0x6b0
+ CPU: 5 PID: 1242 Comm: unity-settings- Not tainted 4.13.0-rc2+ #1
+ RIP: 0010:rcu_note_context_switch+0x207/0x6b0
+ Call Trace:
+  __schedule+0xda/0xba0
+  ? kvm_async_pf_task_wait+0x1b2/0x270
+  schedule+0x40/0x90
+  kvm_async_pf_task_wait+0x1cc/0x270
+  ? prepare_to_swait+0x22/0x70
+  do_async_page_fault+0x77/0xb0
+  ? do_async_page_fault+0x77/0xb0
+  async_page_fault+0x28/0x30
+ RIP: 0010:__d_lookup_rcu+0x90/0x1e0
+
+I encounter this when trying to stress the async page fault in L1 guest w/
+L2 guests running.
+
+Commit 9b132fbe5419 (Add rcu user eqs exception hooks for async page
+fault) adds rcu_irq_enter/exit() to kvm_async_pf_task_wait() to exit cpu
+idle eqs when needed, to protect the code that needs use rcu.  However,
+we need to call the pair even if the function calls schedule(), as seen
+from the above backtrace.
+
+This patch fixes it by informing the RCU subsystem exit/enter the irq
+towards/away from idle for both n.halted and !n.halted.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kvm.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -152,6 +152,8 @@ void kvm_async_pf_task_wait(u32 token)
+               if (hlist_unhashed(&n.link))
+                       break;
++              rcu_irq_exit();
++
+               if (!n.halted) {
+                       local_irq_enable();
+                       schedule();
+@@ -160,11 +162,11 @@ void kvm_async_pf_task_wait(u32 token)
+                       /*
+                        * We cannot reschedule. So halt.
+                        */
+-                      rcu_irq_exit();
+                       native_safe_halt();
+                       local_irq_disable();
+-                      rcu_irq_enter();
+               }
++
++              rcu_irq_enter();
+       }
+       if (!n.halted)
+               finish_swait(&n.wq, &wait);
diff --git a/queue-4.9/libata-array-underflow-in-ata_find_dev.patch b/queue-4.9/libata-array-underflow-in-ata_find_dev.patch
new file mode 100644 (file)
index 0000000..19daf43
--- /dev/null
@@ -0,0 +1,44 @@
+From 59a5e266c3f5c1567508888dd61a45b86daed0fa Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 19 Jul 2017 13:06:41 +0300
+Subject: libata: array underflow in ata_find_dev()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 59a5e266c3f5c1567508888dd61a45b86daed0fa upstream.
+
+My static checker complains that "devno" can be negative, meaning that
+we read before the start of the loop.  I've looked at the code, and I
+think the warning is right.  This come from /proc so it's root only or
+it would be quite a quite a serious bug.  The call tree looks like this:
+
+proc_scsi_write() <- gets id and channel from simple_strtoul()
+-> scsi_add_single_device() <- calls shost->transportt->user_scan()
+   -> ata_scsi_user_scan()
+      -> ata_find_dev()
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-scsi.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2971,10 +2971,12 @@ static unsigned int atapi_xlat(struct at
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+ {
+       if (!sata_pmp_attached(ap)) {
+-              if (likely(devno < ata_link_max_devices(&ap->link)))
++              if (likely(devno >= 0 &&
++                         devno < ata_link_max_devices(&ap->link)))
+                       return &ap->link.device[devno];
+       } else {
+-              if (likely(devno < ap->nr_pmp_links))
++              if (likely(devno >= 0 &&
++                         devno < ap->nr_pmp_links))
+                       return &ap->pmp_link[devno].device[0];
+       }
diff --git a/queue-4.9/mm-mprotect-flush-tlb-if-potentially-racing-with-a-parallel-reclaim-leaving-stale-tlb-entries.patch b/queue-4.9/mm-mprotect-flush-tlb-if-potentially-racing-with-a-parallel-reclaim-leaving-stale-tlb-entries.patch
new file mode 100644 (file)
index 0000000..4ba21cd
--- /dev/null
@@ -0,0 +1,207 @@
+From 3ea277194daaeaa84ce75180ec7c7a2075027a68 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 2 Aug 2017 13:31:52 -0700
+Subject: mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 3ea277194daaeaa84ce75180ec7c7a2075027a68 upstream.
+
+Nadav Amit identified a theoritical race between page reclaim and
+mprotect due to TLB flushes being batched outside of the PTL being held.
+
+He described the race as follows:
+
+        CPU0                            CPU1
+        ----                            ----
+                                        user accesses memory using RW PTE
+                                        [PTE now cached in TLB]
+        try_to_unmap_one()
+        ==> ptep_get_and_clear()
+        ==> set_tlb_ubc_flush_pending()
+                                        mprotect(addr, PROT_READ)
+                                        ==> change_pte_range()
+                                        ==> [ PTE non-present - no flush ]
+
+                                        user writes using cached RW PTE
+        ...
+
+        try_to_unmap_flush()
+
+The same type of race exists for reads when protecting for PROT_NONE and
+also exists for operations that can leave an old TLB entry behind such
+as munmap, mremap and madvise.
+
+For some operations like mprotect, it's not necessarily a data integrity
+issue but it is a correctness issue as there is a window where an
+mprotect that limits access still allows access.  For munmap, it's
+potentially a data integrity issue although the race is massive as an
+munmap, mmap and return to userspace must all complete between the
+window when reclaim drops the PTL and flushes the TLB.  However, it's
+theoritically possible so handle this issue by flushing the mm if
+reclaim is potentially currently batching TLB flushes.
+
+Other instances where a flush is required for a present pte should be ok
+as either the page lock is held preventing parallel reclaim or a page
+reference count is elevated preventing a parallel free leading to
+corruption.  In the case of page_mkclean there isn't an obvious path
+that userspace could take advantage of without using the operations that
+are guarded by this patch.  Other users such as gup as a race with
+reclaim looks just at PTEs.  huge page variants should be ok as they
+don't race with reclaim.  mincore only looks at PTEs.  userfault also
+should be ok as if a parallel reclaim takes place, it will either fault
+the page back in or read some of the data before the flush occurs
+triggering a fault.
+
+Note that a variant of this patch was acked by Andy Lutomirski but this
+was for the x86 parts on top of his PCID work which didn't make the 4.13
+merge window as expected.  His ack is dropped from this version and
+there will be a follow-on patch on top of PCID that will include his
+ack.
+
+[akpm@linux-foundation.org: tweak comments]
+[akpm@linux-foundation.org: fix spello]
+Link: http://lkml.kernel.org/r/20170717155523.emckq2esjro6hf3z@suse.de
+Reported-by: Nadav Amit <nadav.amit@gmail.com>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mm_types.h |    4 ++++
+ mm/internal.h            |    5 ++++-
+ mm/madvise.c             |    1 +
+ mm/memory.c              |    1 +
+ mm/mprotect.c            |    1 +
+ mm/mremap.c              |    1 +
+ mm/rmap.c                |   36 ++++++++++++++++++++++++++++++++++++
+ 7 files changed, 48 insertions(+), 1 deletion(-)
+
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -508,6 +508,10 @@ struct mm_struct {
+        */
+       bool tlb_flush_pending;
+ #endif
++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++      /* See flush_tlb_batched_pending() */
++      bool tlb_flush_batched;
++#endif
+       struct uprobes_state uprobes_state;
+ #ifdef CONFIG_X86_INTEL_MPX
+       /* address of the bounds directory */
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -472,6 +472,7 @@ struct tlbflush_unmap_batch;
+ #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ void try_to_unmap_flush(void);
+ void try_to_unmap_flush_dirty(void);
++void flush_tlb_batched_pending(struct mm_struct *mm);
+ #else
+ static inline void try_to_unmap_flush(void)
+ {
+@@ -479,7 +480,9 @@ static inline void try_to_unmap_flush(vo
+ static inline void try_to_unmap_flush_dirty(void)
+ {
+ }
+-
++static inline void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++}
+ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+ extern const struct trace_print_flags pageflag_names[];
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -282,6 +282,7 @@ static int madvise_free_pte_range(pmd_t
+               return 0;
+       orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++      flush_tlb_batched_pending(mm);
+       arch_enter_lazy_mmu_mode();
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1124,6 +1124,7 @@ again:
+       init_rss_vec(rss);
+       start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       pte = start_pte;
++      flush_tlb_batched_pending(mm);
+       arch_enter_lazy_mmu_mode();
+       do {
+               pte_t ptent = *pte;
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -74,6 +74,7 @@ static unsigned long change_pte_range(st
+       if (!pte)
+               return 0;
++      flush_tlb_batched_pending(vma->vm_mm);
+       arch_enter_lazy_mmu_mode();
+       do {
+               oldpte = *pte;
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -142,6 +142,7 @@ static void move_ptes(struct vm_area_str
+       new_ptl = pte_lockptr(mm, new_pmd);
+       if (new_ptl != old_ptl)
+               spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
++      flush_tlb_batched_pending(vma->vm_mm);
+       arch_enter_lazy_mmu_mode();
+       for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -617,6 +617,13 @@ static void set_tlb_ubc_flush_pending(st
+       tlb_ubc->flush_required = true;
+       /*
++       * Ensure compiler does not re-order the setting of tlb_flush_batched
++       * before the PTE is cleared.
++       */
++      barrier();
++      mm->tlb_flush_batched = true;
++
++      /*
+        * If the PTE was dirty then it's best to assume it's writable. The
+        * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+        * before the page is queued for IO.
+@@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm
+       return should_defer;
+ }
++
++/*
++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
++ * operation such as mprotect or munmap to race between reclaim unmapping
++ * the page and flushing the page. If this race occurs, it potentially allows
++ * access to data via a stale TLB entry. Tracking all mm's that have TLB
++ * batching in flight would be expensive during reclaim so instead track
++ * whether TLB batching occurred in the past and if so then do a flush here
++ * if required. This will cost one additional flush per reclaim cycle paid
++ * by the first operation at risk such as mprotect and mumap.
++ *
++ * This must be called under the PTL so that an access to tlb_flush_batched
++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
++ * via the PTL.
++ */
++void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++      if (mm->tlb_flush_batched) {
++              flush_tlb_mm(mm);
++
++              /*
++               * Do not allow the compiler to re-order the clearing of
++               * tlb_flush_batched before the tlb is flushed.
++               */
++              barrier();
++              mm->tlb_flush_batched = false;
++      }
++}
+ #else
+ static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+               struct page *page, bool writable)
diff --git a/queue-4.9/mmc-core-fix-access-to-hs400-es-devices.patch b/queue-4.9/mmc-core-fix-access-to-hs400-es-devices.patch
new file mode 100644 (file)
index 0000000..97bc9e9
--- /dev/null
@@ -0,0 +1,46 @@
+From 773dc118756b1f38766063e90e582016be868f09 Mon Sep 17 00:00:00 2001
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Wed, 1 Mar 2017 14:11:47 -0800
+Subject: mmc: core: Fix access to HS400-ES devices
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+commit 773dc118756b1f38766063e90e582016be868f09 upstream.
+
+HS400-ES devices fail to initialize with the following error messages.
+
+mmc1: power class selection to bus width 8 ddr 0 failed
+mmc1: error -110 whilst initialising MMC card
+
+This was seen on Samsung Chromebook Plus. Code analysis points to
+commit 3d4ef329757c ("mmc: core: fix multi-bit bus width without
+high-speed mode"), which attempts to set the bus width for all but
+HS200 devices unconditionally. However, for HS400-ES, the bus width
+is already selected.
+
+Cc: Anssi Hannula <anssi.hannula@bitwise.fi>
+Cc: Douglas Anderson <dianders@chromium.org>
+Cc: Brian Norris <briannorris@chromium.org>
+Fixes: 3d4ef329757c ("mmc: core: fix multi-bit bus width ...")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Shawn Lin <shawn.lin@rock-chip.com>
+Tested-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/mmc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1690,7 +1690,7 @@ static int mmc_init_card(struct mmc_host
+               err = mmc_select_hs400(card);
+               if (err)
+                       goto free_card;
+-      } else {
++      } else if (!mmc_card_hs400es(card)) {
+               /* Select the desired bus width optionally */
+               err = mmc_select_bus_width(card);
+               if (err > 0 && mmc_card_hs(card)) {
diff --git a/queue-4.9/mmc-core-use-device_property_read-instead-of-of_property_read.patch b/queue-4.9/mmc-core-use-device_property_read-instead-of-of_property_read.patch
new file mode 100644 (file)
index 0000000..0e7e9a5
--- /dev/null
@@ -0,0 +1,169 @@
+From 73a47a9bb3e2c4a9c553c72456e63ab991b1a4d9 Mon Sep 17 00:00:00 2001
+From: David Woods <dwoods@mellanox.com>
+Date: Fri, 26 May 2017 17:53:21 -0400
+Subject: mmc: core: Use device_property_read instead of of_property_read
+
+From: David Woods <dwoods@mellanox.com>
+
+commit 73a47a9bb3e2c4a9c553c72456e63ab991b1a4d9 upstream.
+
+Using the device_property interfaces allows mmc drivers to work
+on platforms which run on either device tree or ACPI.
+
+Signed-off-by: David Woods <dwoods@mellanox.com>
+Reviewed-by: Chris Metcalf <cmetcalf@mellanox.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/host.c |   70 +++++++++++++++++++++++-------------------------
+ 1 file changed, 34 insertions(+), 36 deletions(-)
+
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -179,19 +179,17 @@ static void mmc_retune_timer(unsigned lo
+  */
+ int mmc_of_parse(struct mmc_host *host)
+ {
+-      struct device_node *np;
++      struct device *dev = host->parent;
+       u32 bus_width;
+       int ret;
+       bool cd_cap_invert, cd_gpio_invert = false;
+       bool ro_cap_invert, ro_gpio_invert = false;
+-      if (!host->parent || !host->parent->of_node)
++      if (!dev || !dev_fwnode(dev))
+               return 0;
+-      np = host->parent->of_node;
+-
+       /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+-      if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
++      if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
+               dev_dbg(host->parent,
+                       "\"bus-width\" property is missing, assuming 1 bit.\n");
+               bus_width = 1;
+@@ -213,7 +211,7 @@ int mmc_of_parse(struct mmc_host *host)
+       }
+       /* f_max is obtained from the optional "max-frequency" property */
+-      of_property_read_u32(np, "max-frequency", &host->f_max);
++      device_property_read_u32(dev, "max-frequency", &host->f_max);
+       /*
+        * Configure CD and WP pins. They are both by default active low to
+@@ -228,12 +226,12 @@ int mmc_of_parse(struct mmc_host *host)
+        */
+       /* Parse Card Detection */
+-      if (of_property_read_bool(np, "non-removable")) {
++      if (device_property_read_bool(dev, "non-removable")) {
+               host->caps |= MMC_CAP_NONREMOVABLE;
+       } else {
+-              cd_cap_invert = of_property_read_bool(np, "cd-inverted");
++              cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+-              if (of_property_read_bool(np, "broken-cd"))
++              if (device_property_read_bool(dev, "broken-cd"))
+                       host->caps |= MMC_CAP_NEEDS_POLL;
+               ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+@@ -259,7 +257,7 @@ int mmc_of_parse(struct mmc_host *host)
+       }
+       /* Parse Write Protection */
+-      ro_cap_invert = of_property_read_bool(np, "wp-inverted");
++      ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
+       ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+       if (!ret)
+@@ -267,62 +265,62 @@ int mmc_of_parse(struct mmc_host *host)
+       else if (ret != -ENOENT && ret != -ENOSYS)
+               return ret;
+-      if (of_property_read_bool(np, "disable-wp"))
++      if (device_property_read_bool(dev, "disable-wp"))
+               host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+       /* See the comment on CD inversion above */
+       if (ro_cap_invert ^ ro_gpio_invert)
+               host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+-      if (of_property_read_bool(np, "cap-sd-highspeed"))
++      if (device_property_read_bool(dev, "cap-sd-highspeed"))
+               host->caps |= MMC_CAP_SD_HIGHSPEED;
+-      if (of_property_read_bool(np, "cap-mmc-highspeed"))
++      if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+               host->caps |= MMC_CAP_MMC_HIGHSPEED;
+-      if (of_property_read_bool(np, "sd-uhs-sdr12"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr12"))
+               host->caps |= MMC_CAP_UHS_SDR12;
+-      if (of_property_read_bool(np, "sd-uhs-sdr25"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr25"))
+               host->caps |= MMC_CAP_UHS_SDR25;
+-      if (of_property_read_bool(np, "sd-uhs-sdr50"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr50"))
+               host->caps |= MMC_CAP_UHS_SDR50;
+-      if (of_property_read_bool(np, "sd-uhs-sdr104"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr104"))
+               host->caps |= MMC_CAP_UHS_SDR104;
+-      if (of_property_read_bool(np, "sd-uhs-ddr50"))
++      if (device_property_read_bool(dev, "sd-uhs-ddr50"))
+               host->caps |= MMC_CAP_UHS_DDR50;
+-      if (of_property_read_bool(np, "cap-power-off-card"))
++      if (device_property_read_bool(dev, "cap-power-off-card"))
+               host->caps |= MMC_CAP_POWER_OFF_CARD;
+-      if (of_property_read_bool(np, "cap-mmc-hw-reset"))
++      if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
+               host->caps |= MMC_CAP_HW_RESET;
+-      if (of_property_read_bool(np, "cap-sdio-irq"))
++      if (device_property_read_bool(np, "cap-sdio-irq"))
+               host->caps |= MMC_CAP_SDIO_IRQ;
+-      if (of_property_read_bool(np, "full-pwr-cycle"))
++      if (device_property_read_bool(np, "full-pwr-cycle"))
+               host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
+-      if (of_property_read_bool(np, "keep-power-in-suspend"))
++      if (device_property_read_bool(np, "keep-power-in-suspend"))
+               host->pm_caps |= MMC_PM_KEEP_POWER;
+-      if (of_property_read_bool(np, "wakeup-source") ||
+-          of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
++      if (device_property_read_bool(np, "wakeup-source") ||
++          device_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
+               host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+-      if (of_property_read_bool(np, "mmc-ddr-1_8v"))
++      if (device_property_read_bool(np, "mmc-ddr-1_8v"))
+               host->caps |= MMC_CAP_1_8V_DDR;
+-      if (of_property_read_bool(np, "mmc-ddr-1_2v"))
++      if (device_property_read_bool(np, "mmc-ddr-1_2v"))
+               host->caps |= MMC_CAP_1_2V_DDR;
+-      if (of_property_read_bool(np, "mmc-hs200-1_8v"))
++      if (device_property_read_bool(np, "mmc-hs200-1_8v"))
+               host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs200-1_2v"))
++      if (device_property_read_bool(np, "mmc-hs200-1_2v"))
+               host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-1_8v"))
++      if (device_property_read_bool(np, "mmc-hs400-1_8v"))
+               host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-1_2v"))
++      if (device_property_read_bool(np, "mmc-hs400-1_2v"))
+               host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
++      if (device_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
+               host->caps2 |= MMC_CAP2_HS400_ES;
+-      if (of_property_read_bool(np, "no-sdio"))
++      if (device_property_read_bool(np, "no-sdio"))
+               host->caps2 |= MMC_CAP2_NO_SDIO;
+-      if (of_property_read_bool(np, "no-sd"))
++      if (device_property_read_bool(np, "no-sd"))
+               host->caps2 |= MMC_CAP2_NO_SD;
+-      if (of_property_read_bool(np, "no-mmc"))
++      if (device_property_read_bool(np, "no-mmc"))
+               host->caps2 |= MMC_CAP2_NO_MMC;
+-      host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
++      host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
+       if (host->dsr_req && (host->dsr & ~0xffff)) {
+               dev_err(host->parent,
+                       "device tree specified broken value for DSR: 0x%x, ignoring\n",
diff --git a/queue-4.9/mmc-dw_mmc-use-device_property_read-instead-of-of_property_read.patch b/queue-4.9/mmc-dw_mmc-use-device_property_read-instead-of-of_property_read.patch
new file mode 100644 (file)
index 0000000..c9a61c4
--- /dev/null
@@ -0,0 +1,83 @@
+From 852ff5fea9eb6a9799f1881d6df2cd69a9e6eed5 Mon Sep 17 00:00:00 2001
+From: David Woods <dwoods@mellanox.com>
+Date: Fri, 26 May 2017 17:53:20 -0400
+Subject: mmc: dw_mmc: Use device_property_read instead of of_property_read
+
+From: David Woods <dwoods@mellanox.com>
+
+commit 852ff5fea9eb6a9799f1881d6df2cd69a9e6eed5 upstream.
+
+Using the device_property interfaces allows the dw_mmc driver to work
+on platforms which run on either device tree or ACPI.
+
+Signed-off-by: David Woods <dwoods@mellanox.com>
+Reviewed-by: Chris Metcalf <cmetcalf@mellanox.com>
+Acked-by: Jaehoon Chung <jh80.chung@samsung.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/dw_mmc.c |   19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2610,8 +2610,8 @@ static int dw_mci_init_slot(struct dw_mc
+       host->slot[id] = slot;
+       mmc->ops = &dw_mci_ops;
+-      if (of_property_read_u32_array(host->dev->of_node,
+-                                     "clock-freq-min-max", freq, 2)) {
++      if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
++                                         freq, 2)) {
+               mmc->f_min = DW_MCI_FREQ_MIN;
+               mmc->f_max = DW_MCI_FREQ_MAX;
+       } else {
+@@ -2709,7 +2709,6 @@ static void dw_mci_init_dma(struct dw_mc
+ {
+       int addr_config;
+       struct device *dev = host->dev;
+-      struct device_node *np = dev->of_node;
+       /*
+       * Check tansfer mode from HCON[17:16]
+@@ -2770,8 +2769,9 @@ static void dw_mci_init_dma(struct dw_mc
+               dev_info(host->dev, "Using internal DMA controller.\n");
+       } else {
+               /* TRANS_MODE_EDMAC: check dma bindings again */
+-              if ((of_property_count_strings(np, "dma-names") < 0) ||
+-                  (!of_find_property(np, "dmas", NULL))) {
++              if ((device_property_read_string_array(dev, "dma-names",
++                                                     NULL, 0) < 0) ||
++                  !device_property_present(dev, "dmas")) {
+                       goto no_dma;
+               }
+               host->dma_ops = &dw_mci_edmac_ops;
+@@ -2931,7 +2931,6 @@ static struct dw_mci_board *dw_mci_parse
+ {
+       struct dw_mci_board *pdata;
+       struct device *dev = host->dev;
+-      struct device_node *np = dev->of_node;
+       const struct dw_mci_drv_data *drv_data = host->drv_data;
+       int ret;
+       u32 clock_frequency;
+@@ -2948,15 +2947,15 @@ static struct dw_mci_board *dw_mci_parse
+       }
+       /* find out number of slots supported */
+-      of_property_read_u32(np, "num-slots", &pdata->num_slots);
++      device_property_read_u32(np, "num-slots", &pdata->num_slots);
+-      if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
++      if (device_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
+               dev_info(dev,
+                        "fifo-depth property not found, using value of FIFOTH register as default\n");
+-      of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
++      device_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
+-      if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
++      if (!device_property_read_u32(np, "clock-frequency", &clock_frequency))
+               pdata->bus_hz = clock_frequency;
+       if (drv_data && drv_data->parse_dt) {
diff --git a/queue-4.9/mmc-sdhci-of-at91-force-card-detect-value-for-non-removable-devices.patch b/queue-4.9/mmc-sdhci-of-at91-force-card-detect-value-for-non-removable-devices.patch
new file mode 100644 (file)
index 0000000..f96a70a
--- /dev/null
@@ -0,0 +1,92 @@
+From 7a1e3f143176e8ebdb2f5a9b3b47abc18b879d90 Mon Sep 17 00:00:00 2001
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+Date: Wed, 26 Jul 2017 16:02:46 +0200
+Subject: mmc: sdhci-of-at91: force card detect value for non removable devices
+
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+
+commit 7a1e3f143176e8ebdb2f5a9b3b47abc18b879d90 upstream.
+
+When the device is non removable, the card detect signal is often used
+for another purpose i.e. muxed to another SoC peripheral or used as a
+GPIO. It could lead to wrong behaviors depending the default value of
+this signal if not muxed to the SDHCI controller.
+
+Fixes: bb5f8ea4d514 ("mmc: sdhci-of-at91: introduce driver for the Atmel SDMMC")
+Signed-off-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-at91.c |   35 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 34 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -31,6 +31,7 @@
+ #define SDMMC_MC1R    0x204
+ #define               SDMMC_MC1R_DDR          BIT(3)
++#define               SDMMC_MC1R_FCD          BIT(7)
+ #define SDMMC_CACR    0x230
+ #define               SDMMC_CACR_CAPWREN      BIT(0)
+ #define               SDMMC_CACR_KEY          (0x46 << 8)
+@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
+       struct clk *mainck;
+ };
++static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
++{
++      u8 mc1r;
++
++      mc1r = readb(host->ioaddr + SDMMC_MC1R);
++      mc1r |= SDMMC_MC1R_FCD;
++      writeb(mc1r, host->ioaddr + SDMMC_MC1R);
++}
++
+ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+       u16 clk;
+@@ -112,10 +122,18 @@ void sdhci_at91_set_uhs_signaling(struct
+       sdhci_set_uhs_signaling(host, timing);
+ }
++static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
++{
++      sdhci_reset(host, mask);
++
++      if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++              sdhci_at91_set_force_card_detect(host);
++}
++
+ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+       .set_clock              = sdhci_at91_set_clock,
+       .set_bus_width          = sdhci_set_bus_width,
+-      .reset                  = sdhci_reset,
++      .reset                  = sdhci_at91_reset,
+       .set_uhs_signaling      = sdhci_at91_set_uhs_signaling,
+       .set_power              = sdhci_at91_set_power,
+ };
+@@ -322,6 +340,21 @@ static int sdhci_at91_probe(struct platf
+               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+       }
++      /*
++       * If the device attached to the MMC bus is not removable, it is safer
++       * to set the Force Card Detect bit. People often don't connect the
++       * card detect signal and use this pin for another purpose. If the card
++       * detect pin is not muxed to SDHCI controller, a default value is
++       * used. This value can be different from a SoC revision to another
++       * one. Problems come when this default value is not card present. To
++       * avoid this case, if the device is non removable then the card
++       * detection procedure using the SDMCC_CD signal is bypassed.
++       * This bit is reset when a software reset for all command is performed
++       * so we need to implement our own reset function to set back this bit.
++       */
++      if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++              sdhci_at91_set_force_card_detect(host);
++
+       pm_runtime_put_autosuspend(&pdev->dev);
+       return 0;
diff --git a/queue-4.9/nfsv4-fix-exchange_id-corrupt-verifier-issue.patch b/queue-4.9/nfsv4-fix-exchange_id-corrupt-verifier-issue.patch
new file mode 100644 (file)
index 0000000..0e3db29
--- /dev/null
@@ -0,0 +1,88 @@
+From fd40559c8657418385e42f797e0b04bfc0add748 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Tue, 1 Aug 2017 16:02:47 -0400
+Subject: NFSv4: Fix EXCHANGE_ID corrupt verifier issue
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit fd40559c8657418385e42f797e0b04bfc0add748 upstream.
+
+The verifier is allocated on the stack, but the EXCHANGE_ID RPC call was
+changed to be asynchronous by commit 8d89bd70bc939. If we interrrupt
+the call to rpc_wait_for_completion_task(), we can therefore end up
+transmitting random stack contents in lieu of the verifier.
+
+Fixes: 8d89bd70bc939 ("NFS setup async exchange_id")
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c       |   11 ++++-------
+ fs/nfs/nfs4xdr.c        |    2 +-
+ include/linux/nfs_xdr.h |    2 +-
+ 3 files changed, 6 insertions(+), 9 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7410,7 +7410,7 @@ static void nfs4_exchange_id_done(struct
+                       cdata->res.server_scope = NULL;
+               }
+               /* Save the EXCHANGE_ID verifier session trunk tests */
+-              memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
++              memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
+                      sizeof(clp->cl_confirm.data));
+       }
+ out:
+@@ -7447,7 +7447,6 @@ static const struct rpc_call_ops nfs4_ex
+ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+                       u32 sp4_how, struct rpc_xprt *xprt)
+ {
+-      nfs4_verifier verifier;
+       struct rpc_message msg = {
+               .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+               .rpc_cred = cred,
+@@ -7470,8 +7469,7 @@ static int _nfs4_proc_exchange_id(struct
+       if (!calldata)
+               goto out;
+-      if (!xprt)
+-              nfs4_init_boot_verifier(clp, &verifier);
++      nfs4_init_boot_verifier(clp, &calldata->args.verifier);
+       status = nfs4_init_uniform_client_string(clp);
+       if (status)
+@@ -7516,9 +7514,8 @@ static int _nfs4_proc_exchange_id(struct
+               task_setup_data.rpc_xprt = xprt;
+               task_setup_data.flags =
+                               RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
+-              calldata->args.verifier = &clp->cl_confirm;
+-      } else {
+-              calldata->args.verifier = &verifier;
++              memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
++                              sizeof(calldata->args.verifier.data));
+       }
+       calldata->args.client = clp;
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -1761,7 +1761,7 @@ static void encode_exchange_id(struct xd
+       int len = 0;
+       encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
+-      encode_nfs4_verifier(xdr, args->verifier);
++      encode_nfs4_verifier(xdr, &args->verifier);
+       encode_string(xdr, strlen(args->client->cl_owner_id),
+                       args->client->cl_owner_id);
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1199,7 +1199,7 @@ struct nfs41_state_protection {
+ struct nfs41_exchange_id_args {
+       struct nfs_client               *client;
+-      nfs4_verifier                   *verifier;
++      nfs4_verifier                   verifier;
+       u32                             flags;
+       struct nfs41_state_protection   state_protect;
+ };
diff --git a/queue-4.9/parisc-handle-vma-s-whose-context-is-not-current-in-flush_cache_range.patch b/queue-4.9/parisc-handle-vma-s-whose-context-is-not-current-in-flush_cache_range.patch
new file mode 100644 (file)
index 0000000..a71c490
--- /dev/null
@@ -0,0 +1,75 @@
+From 13d57093c141db2036364d6be35e394fc5b64728 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Sun, 30 Jul 2017 16:20:19 -0400
+Subject: parisc: Handle vma's whose context is not current in flush_cache_range
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit 13d57093c141db2036364d6be35e394fc5b64728 upstream.
+
+In testing James' patch to drivers/parisc/pdc_stable.c, I hit the BUG
+statement in flush_cache_range() during a system shutdown:
+
+kernel BUG at arch/parisc/kernel/cache.c:595!
+CPU: 2 PID: 6532 Comm: kworker/2:0 Not tainted 4.13.0-rc2+ #1
+Workqueue: events free_ioctx
+
+ IAOQ[0]: flush_cache_range+0x144/0x148
+ IAOQ[1]: flush_cache_page+0x0/0x1a8
+ RP(r2): flush_cache_range+0xec/0x148
+Backtrace:
+ [<00000000402910ac>] unmap_page_range+0x84/0x880
+ [<00000000402918f4>] unmap_single_vma+0x4c/0x60
+ [<0000000040291a18>] zap_page_range_single+0x110/0x160
+ [<0000000040291c34>] unmap_mapping_range+0x174/0x1a8
+ [<000000004026ccd8>] truncate_pagecache+0x50/0xa8
+ [<000000004026cd84>] truncate_setsize+0x54/0x70
+ [<000000004033d534>] put_aio_ring_file+0x44/0xb0
+ [<000000004033d5d8>] aio_free_ring+0x38/0x140
+ [<000000004033d714>] free_ioctx+0x34/0xa8
+ [<00000000401b0028>] process_one_work+0x1b8/0x4d0
+ [<00000000401b04f4>] worker_thread+0x1b4/0x648
+ [<00000000401b9128>] kthread+0x1b0/0x208
+ [<0000000040150020>] end_fault_vector+0x20/0x28
+ [<0000000040639518>] nf_ip_reroute+0x50/0xa8
+ [<0000000040638ed0>] nf_ip_route+0x10/0x78
+ [<0000000040638c90>] xfrm4_mode_tunnel_input+0x180/0x1f8
+
+CPU: 2 PID: 6532 Comm: kworker/2:0 Not tainted 4.13.0-rc2+ #1
+Workqueue: events free_ioctx
+Backtrace:
+ [<0000000040163bf0>] show_stack+0x20/0x38
+ [<0000000040688480>] dump_stack+0xa8/0x120
+ [<0000000040163dc4>] die_if_kernel+0x19c/0x2b0
+ [<0000000040164d0c>] handle_interruption+0xa24/0xa48
+
+This patch modifies flush_cache_range() to handle non current contexts.
+In as much as this occurs infrequently, the simplest approach is to
+flush the entire cache when this happens.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/cache.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -604,13 +604,12 @@ void flush_cache_range(struct vm_area_st
+       if (parisc_requires_coherency())
+               flush_tlb_range(vma, start, end);
+-      if ((end - start) >= parisc_cache_flush_threshold) {
++      if ((end - start) >= parisc_cache_flush_threshold
++          || vma->vm_mm->context != mfsp(3)) {
+               flush_cache_all();
+               return;
+       }
+-      BUG_ON(vma->vm_mm->context != mfsp(3));
+-
+       flush_user_dcache_range_asm(start, end);
+       if (vma->vm_flags & VM_EXEC)
+               flush_user_icache_range_asm(start, end);
index 350f1f21e306f74a71e87c41bcc46178bccbcbdc..5b7c32b405d3c835b04e02ee1179bf590a0b998f 100644 (file)
@@ -12,3 +12,8 @@ mmc-core-use-device_property_read-instead-of-of_property_read.patch
 mmc-dw_mmc-use-device_property_read-instead-of-of_property_read.patch
 mmc-core-fix-access-to-hs400-es-devices.patch
 mm-mprotect-flush-tlb-if-potentially-racing-with-a-parallel-reclaim-leaving-stale-tlb-entries.patch
+cpuset-fix-a-deadlock-due-to-incomplete-patching-of-cpusets_enabled.patch
+alsa-hda-fix-speaker-output-from-vaio-vpcl14m1r.patch
+drm-amdgpu-fix-undue-fallthroughs-in-golden-registers-initialization.patch
+asoc-do-not-close-shared-backend-dailink.patch
+kvm-async_pf-make-rcu-irq-exit-if-not-triggered-from-idle-task.patch
diff --git a/queue-4.9/workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch b/queue-4.9/workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch
new file mode 100644 (file)
index 0000000..b60c877
--- /dev/null
@@ -0,0 +1,53 @@
+From 5c0338c68706be53b3dc472e4308961c36e4ece1 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 18 Jul 2017 18:41:52 -0400
+Subject: workqueue: restore WQ_UNBOUND/max_active==1 to be ordered
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 5c0338c68706be53b3dc472e4308961c36e4ece1 upstream.
+
+The combination of WQ_UNBOUND and max_active == 1 used to imply
+ordered execution.  After NUMA affinity 4c16bd327c74 ("workqueue:
+implement NUMA affinity for unbound workqueues"), this is no longer
+true due to per-node worker pools.
+
+While the right way to create an ordered workqueue is
+alloc_ordered_workqueue(), the documentation has been misleading for a
+long time and people do use WQ_UNBOUND and max_active == 1 for ordered
+workqueues which can lead to subtle bugs which are very difficult to
+trigger.
+
+It's unlikely that we'd see noticeable performance impact by enforcing
+ordering on WQ_UNBOUND / max_active == 1 workqueues.  Let's
+automatically set __WQ_ORDERED for those workqueues.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Christoph Hellwig <hch@infradead.org>
+Reported-by: Alexei Potashnik <alexei@purestorage.com>
+Fixes: 4c16bd327c74 ("workqueue: implement NUMA affinity for unbound workqueues")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/workqueue.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3915,6 +3915,16 @@ struct workqueue_struct *__alloc_workque
+       struct workqueue_struct *wq;
+       struct pool_workqueue *pwq;
++      /*
++       * Unbound && max_active == 1 used to imply ordered, which is no
++       * longer the case on NUMA machines due to per-node pools.  While
++       * alloc_ordered_workqueue() is the right way to create an ordered
++       * workqueue, keep the previous behavior to avoid subtle breakages
++       * on NUMA.
++       */
++      if ((flags & WQ_UNBOUND) && max_active == 1)
++              flags |= __WQ_ORDERED;
++
+       /* see the comment above the definition of WQ_POWER_EFFICIENT */
+       if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
+               flags |= WQ_UNBOUND;