tile-add-asm-word-at-a-time.h-and-enable-support-functions.patch
word-at-a-time.h-support-zero_bytemask-on-alpha-and-tile.patch
make-asm-word-at-a-time.h-available-on-all-architectures.patch
+arch-powerpc-provide-zero_bytemask-for-big-endian.patch
+tile-use-global-strscpy-rather-than-private-copy.patch
+libata-array-underflow-in-ata_find_dev.patch
+workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch
--- /dev/null
+From dd242a080d178c36442a0bb28b6acf6f126d0569 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 6 Jul 2017 18:49:27 +0200
+Subject: ACPI / LPSS: Only call pwm_add_table() for the first PWM controller
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit dd242a080d178c36442a0bb28b6acf6f126d0569 upstream.
+
+At least on the UP board SBC both PWMs are enabled leading to us
+trying to add the same pwm_lookup twice, which leads to the following:
+
+[ 0.902224] list_add double add: new=ffffffffb8efd400,
+ prev=ffffffffb8efd400, next=ffffffffb8eeede0.
+[ 0.912466] ------------[ cut here ]------------
+[ 0.917624] kernel BUG at lib/list_debug.c:31!
+[ 0.922588] invalid opcode: 0000 [#1] SMP
+...
+[ 1.027450] Call Trace:
+[ 1.030185] pwm_add_table+0x4c/0x90
+[ 1.034181] bsw_pwm_setup+0x1a/0x20
+[ 1.038175] acpi_lpss_create_device+0xfe/0x420
+...
+
+This commit fixes this by only calling pwm_add_table() for the first
+PWM controller (which is the one used for the backlight).
+
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=1458599
+Fixes: bf7696a12071 (acpi: lpss: call pwm_add_table() for BSW...)
+Fixes: 04434ab5120a (ACPI / LPSS: Call pwm_add_table() for Bay Trail...)
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpi_lpss.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -85,6 +85,7 @@ static const struct lpss_device_desc lps
+ };
+
+ struct lpss_private_data {
++ struct acpi_device *adev;
+ void __iomem *mmio_base;
+ resource_size_t mmio_size;
+ unsigned int fixed_clk_rate;
+@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[
+
+ static void byt_pwm_setup(struct lpss_private_data *pdata)
+ {
++ struct acpi_device *adev = pdata->adev;
++
++ /* Only call pwm_add_table for the first PWM controller */
++ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
++ return;
++
+ if (!acpi_dev_present("INT33FD", NULL, -1))
+ pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+ }
+@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[
+
+ static void bsw_pwm_setup(struct lpss_private_data *pdata)
+ {
++ struct acpi_device *adev = pdata->adev;
++
++ /* Only call pwm_add_table for the first PWM controller */
++ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
++ return;
++
+ pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
+ }
+
+@@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struc
+ goto err_out;
+ }
+
++ pdata->adev = adev;
+ pdata->dev_desc = dev_desc;
+
+ if (dev_desc->setup)
--- /dev/null
+From 3f3c371421e601fa93b6cb7fb52da9ad59ec90b4 Mon Sep 17 00:00:00 2001
+From: "Sergei A. Trusov" <sergei.a.trusov@ya.ru>
+Date: Wed, 2 Aug 2017 20:23:48 +1000
+Subject: ALSA: hda - Fix speaker output from VAIO VPCL14M1R
+
+From: Sergei A. Trusov <sergei.a.trusov@ya.ru>
+
+commit 3f3c371421e601fa93b6cb7fb52da9ad59ec90b4 upstream.
+
+Sony VAIO VPCL14M1R needs the quirk to make the speaker working properly.
+
+Tested-by: Dmitriy <mexx400@yandex.ru>
+Signed-off-by: Sergei A. Trusov <sergei.a.trusov@ya.ru>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2296,6 +2296,7 @@ static const struct snd_pci_quirk alc882
+ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
++ SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+
--- /dev/null
+From b1cd2e34c69a2f3988786af451b6e17967c293a0 Mon Sep 17 00:00:00 2001
+From: Banajit Goswami <bgoswami@codeaurora.org>
+Date: Fri, 14 Jul 2017 23:15:05 -0700
+Subject: ASoC: do not close shared backend dailink
+
+From: Banajit Goswami <bgoswami@codeaurora.org>
+
+commit b1cd2e34c69a2f3988786af451b6e17967c293a0 upstream.
+
+Multiple frontend dailinks may be connected to a backend
+dailink at the same time. When one of frontend dailinks is
+closed, the associated backend dailink should not be closed
+if it is connected to other active frontend dailinks. Change
+ensures that backend dailink is closed only after all
+connected frontend dailinks are closed.
+
+Signed-off-by: Gopikrishnaiah Anandan <agopik@codeaurora.org>
+Signed-off-by: Banajit Goswami <bgoswami@codeaurora.org>
+Signed-off-by: Patrick Lai <plai@codeaurora.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/soc-pcm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_so
+ dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
+ be->dai_link->name, event, dir);
+
++ if ((event == SND_SOC_DAPM_STREAM_STOP) &&
++ (be->dpcm[dir].users >= 1))
++ continue;
++
+ snd_soc_dapm_stream_event(be, dir, event);
+ }
+
--- /dev/null
+From c641e5b207ed7dfaa692820aeb5b6dde3de3e9b0 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Wed, 12 Jul 2017 17:55:29 +0200
+Subject: ASoC: fix pcm-creation regression
+
+From: Johan Hovold <johan@kernel.org>
+
+commit c641e5b207ed7dfaa692820aeb5b6dde3de3e9b0 upstream.
+
+This reverts commit 99b04f4c4051 ("ASoC: add Component level
+pcm_new/pcm_free"), which started calling the pcm_new callback for every
+component in a *card* when creating a new pcm, something which does not
+seem to make any sense.
+
+This specifically led to memory leaks in systems with more than one
+platform component and where DMA memory is allocated in the
+platform-driver callback. For example, when both mcasp devices are being
+used on an am335x board, DMA memory would be allocated twice for every
+DAI link during probe.
+
+When CONFIG_SND_VERBOSE_PROCFS was set this fortunately also led to
+warnings such as:
+
+WARNING: CPU: 0 PID: 565 at ../fs/proc/generic.c:346 proc_register+0x110/0x154
+proc_dir_entry 'sub0/prealloc' already registered
+
+Since there seems to be no users of the new component callbacks, and the
+current implementation introduced a regression, let's revert the
+offending commit for now.
+
+Fixes: 99b04f4c4051 ("ASoC: add Component level pcm_new/pcm_free")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Tested-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/soc.h | 6 ------
+ sound/soc/soc-core.c | 25 -------------------------
+ sound/soc/soc-pcm.c | 32 +++++++++-----------------------
+ 3 files changed, 9 insertions(+), 54 deletions(-)
+
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -795,10 +795,6 @@ struct snd_soc_component_driver {
+ int (*suspend)(struct snd_soc_component *);
+ int (*resume)(struct snd_soc_component *);
+
+- /* pcm creation and destruction */
+- int (*pcm_new)(struct snd_soc_pcm_runtime *);
+- void (*pcm_free)(struct snd_pcm *);
+-
+ /* DT */
+ int (*of_xlate_dai_name)(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+@@ -872,8 +868,6 @@ struct snd_soc_component {
+ void (*remove)(struct snd_soc_component *);
+ int (*suspend)(struct snd_soc_component *);
+ int (*resume)(struct snd_soc_component *);
+- int (*pcm_new)(struct snd_soc_pcm_runtime *);
+- void (*pcm_free)(struct snd_pcm *);
+
+ /* machine specific init */
+ int (*init)(struct snd_soc_component *component);
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -3139,8 +3139,6 @@ static int snd_soc_component_initialize(
+ component->remove = component->driver->remove;
+ component->suspend = component->driver->suspend;
+ component->resume = component->driver->resume;
+- component->pcm_new = component->driver->pcm_new;
+- component->pcm_free = component->driver->pcm_free;
+
+ dapm = &component->dapm;
+ dapm->dev = dev;
+@@ -3328,25 +3326,6 @@ static void snd_soc_platform_drv_remove(
+ platform->driver->remove(platform);
+ }
+
+-static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
+-{
+- struct snd_soc_platform *platform = rtd->platform;
+-
+- if (platform->driver->pcm_new)
+- return platform->driver->pcm_new(rtd);
+- else
+- return 0;
+-}
+-
+-static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
+-{
+- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+- struct snd_soc_platform *platform = rtd->platform;
+-
+- if (platform->driver->pcm_free)
+- platform->driver->pcm_free(pcm);
+-}
+-
+ /**
+ * snd_soc_add_platform - Add a platform to the ASoC core
+ * @dev: The parent device for the platform
+@@ -3370,10 +3349,6 @@ int snd_soc_add_platform(struct device *
+ platform->component.probe = snd_soc_platform_drv_probe;
+ if (platform_drv->remove)
+ platform->component.remove = snd_soc_platform_drv_remove;
+- if (platform_drv->pcm_new)
+- platform->component.pcm_new = snd_soc_platform_drv_pcm_new;
+- if (platform_drv->pcm_free)
+- platform->component.pcm_free = snd_soc_platform_drv_pcm_free;
+
+ #ifdef CONFIG_DEBUG_FS
+ platform->component.debugfs_prefix = "platform";
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -2628,25 +2628,12 @@ static int dpcm_fe_dai_close(struct snd_
+ return ret;
+ }
+
+-static void soc_pcm_free(struct snd_pcm *pcm)
+-{
+- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+- struct snd_soc_component *component;
+-
+- list_for_each_entry(component, &rtd->card->component_dev_list,
+- card_list) {
+- if (component->pcm_free)
+- component->pcm_free(pcm);
+- }
+-}
+-
+ /* create a new pcm */
+ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
+ {
+ struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+- struct snd_soc_component *component;
+ struct snd_pcm *pcm;
+ char new_name[64];
+ int ret = 0, playback = 0, capture = 0;
+@@ -2755,18 +2742,17 @@ int soc_new_pcm(struct snd_soc_pcm_runti
+ if (capture)
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
+
+- list_for_each_entry(component, &rtd->card->component_dev_list, card_list) {
+- if (component->pcm_new) {
+- ret = component->pcm_new(rtd);
+- if (ret < 0) {
+- dev_err(component->dev,
+- "ASoC: pcm constructor failed: %d\n",
+- ret);
+- return ret;
+- }
++ if (platform->driver->pcm_new) {
++ ret = platform->driver->pcm_new(rtd);
++ if (ret < 0) {
++ dev_err(platform->dev,
++ "ASoC: pcm constructor failed: %d\n",
++ ret);
++ return ret;
+ }
+ }
+- pcm->private_free = soc_pcm_free;
++
++ pcm->private_free = platform->driver->pcm_free;
+ out:
+ dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
+ (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
--- /dev/null
+From 651e9268fb9b9944e063d731b09c0d2ad339bedb Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Wed, 12 Jul 2017 17:55:30 +0200
+Subject: ASoC: ux500: Restore platform DAI assignments
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 651e9268fb9b9944e063d731b09c0d2ad339bedb upstream.
+
+This reverts commit f1013cdeeeb9 ("ASoC: ux500: drop platform DAI
+assignments"), which seems to have been based on a misunderstanding and
+prevents the platform driver callbacks from being made (e.g. to
+preallocate DMA memory).
+
+The real culprit for the warnings about attempts to create duplicate
+procfs entries was commit 99b04f4c4051 ("ASoC: add Component level
+pcm_new/pcm_free" that broke PCM creation on systems that use more than
+one platform component.
+
+Fixes: f1013cdeeeb9 ("ASoC: ux500: drop platform DAI assignments")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Tested-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/ux500/mop500.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/sound/soc/ux500/mop500.c
++++ b/sound/soc/ux500/mop500.c
+@@ -33,6 +33,7 @@ static struct snd_soc_dai_link mop500_da
+ .stream_name = "ab8500_0",
+ .cpu_dai_name = "ux500-msp-i2s.1",
+ .codec_dai_name = "ab8500-codec-dai.0",
++ .platform_name = "ux500-msp-i2s.1",
+ .codec_name = "ab8500-codec.0",
+ .init = mop500_ab8500_machine_init,
+ .ops = mop500_ab8500_ops,
+@@ -42,6 +43,7 @@ static struct snd_soc_dai_link mop500_da
+ .stream_name = "ab8500_1",
+ .cpu_dai_name = "ux500-msp-i2s.3",
+ .codec_dai_name = "ab8500-codec-dai.1",
++ .platform_name = "ux500-msp-i2s.3",
+ .codec_name = "ab8500-codec.0",
+ .init = NULL,
+ .ops = mop500_ab8500_ops,
+@@ -85,6 +87,8 @@ static int mop500_of_probe(struct platfo
+ for (i = 0; i < 2; i++) {
+ mop500_dai_links[i].cpu_of_node = msp_np[i];
+ mop500_dai_links[i].cpu_dai_name = NULL;
++ mop500_dai_links[i].platform_of_node = msp_np[i];
++ mop500_dai_links[i].platform_name = NULL;
+ mop500_dai_links[i].codec_of_node = codec_np;
+ mop500_dai_links[i].codec_name = NULL;
+ }
--- /dev/null
+From 5f5d03143de5e0c593da4ab18fc6393c2815e108 Mon Sep 17 00:00:00 2001
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+Date: Wed, 26 Jul 2017 13:09:24 +0100
+Subject: brcmfmac: fix memleak due to calling brcmf_sdiod_sgtable_alloc() twice
+
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+
+commit 5f5d03143de5e0c593da4ab18fc6393c2815e108 upstream.
+
+Due to a bugfix in wireless tree and the commit mentioned below a merge
+was needed which went haywire. So the submitted change resulted in the
+function brcmf_sdiod_sgtable_alloc() being called twice during the probe
+thus leaking the memory of the first call.
+
+Fixes: 4d7928959832 ("brcmfmac: switch to new platform data")
+Reported-by: Stefan Wahren <stefan.wahren@i2se.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Reviewed-by: Hante Meuleman <hante.meuleman@broadcom.com>
+Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4168,11 +4168,6 @@ struct brcmf_sdio *brcmf_sdio_probe(stru
+ goto fail;
+ }
+
+- /* allocate scatter-gather table. sg support
+- * will be disabled upon allocation failure.
+- */
+- brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+-
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
--- /dev/null
+From 7af608e4f9530372aec6e940552bf76595f2e265 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 18 Jul 2017 17:57:46 -0400
+Subject: cgroup: create dfl_root files on subsys registration
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 7af608e4f9530372aec6e940552bf76595f2e265 upstream.
+
+On subsystem registration, css_populate_dir() is not called on the new
+root css, so the interface files for the subsystem on cgrp_dfl_root
+aren't created on registration. This is a residue from the days when
+cgrp_dfl_root was used only as the parking spot for unused subsystems,
+which no longer is true as it's used as the root for cgroup2.
+
+This is often fine as later operations tend to create them as a part
+of mount (cgroup1) or subtree_control operations (cgroup2); however,
+it's not difficult to mount cgroup2 with the controller interface
+files missing as Waiman found out.
+
+Fix it by invoking css_populate_dir() on the root css on subsys
+registration.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-and-tested-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup/cgroup.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -4578,6 +4578,10 @@ int __init cgroup_init(void)
+
+ if (ss->bind)
+ ss->bind(init_css_set.subsys[ssid]);
++
++ mutex_lock(&cgroup_mutex);
++ css_populate_dir(init_css_set.subsys[ssid]);
++ mutex_unlock(&cgroup_mutex);
+ }
+
+ /* init_css_set.subsys[] has been updated, re-hash */
--- /dev/null
+From 610467270fb368584b74567edd21c8cc5104490f Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Sat, 8 Jul 2017 07:17:02 -0400
+Subject: cgroup: don't call migration methods if there are no tasks to migrate
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 610467270fb368584b74567edd21c8cc5104490f upstream.
+
+Subsystem migration methods shouldn't be called for empty migrations.
+cgroup_migrate_execute() implements this guarantee by bailing early if
+there are no source css_sets. This used to be correct before
+a79a908fd2b0 ("cgroup: introduce cgroup namespaces"), but no longer
+since the commit because css_sets can stay pinned without tasks in
+them.
+
+This caused cgroup_migrate_execute() call into cpuset migration
+methods with an empty cgroup_taskset. cpuset migration methods
+correctly assume that cgroup_taskset_first() never returns NULL;
+however, due to the bug, it can, leading to the following oops.
+
+ Unable to handle kernel paging request for data at address 0x00000960
+ Faulting instruction address: 0xc0000000001d6868
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ ...
+ CPU: 14 PID: 16947 Comm: kworker/14:0 Tainted: G W
+ 4.12.0-rc4-next-20170609 #2
+ Workqueue: events cpuset_hotplug_workfn
+ task: c00000000ca60580 task.stack: c00000000c728000
+ NIP: c0000000001d6868 LR: c0000000001d6858 CTR: c0000000001d6810
+ REGS: c00000000c72b720 TRAP: 0300 Tainted: GW (4.12.0-rc4-next-20170609)
+ MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 44722422 XER: 20000000
+ CFAR: c000000000008710 DAR: 0000000000000960 DSISR: 40000000 SOFTE: 1
+ GPR00: c0000000001d6858 c00000000c72b9a0 c000000001536e00 0000000000000000
+ GPR04: c00000000c72b9c0 0000000000000000 c00000000c72bad0 c000000766367678
+ GPR08: c000000766366d10 c00000000c72b958 c000000001736e00 0000000000000000
+ GPR12: c0000000001d6810 c00000000e749300 c000000000123ef8 c000000775af4180
+ GPR16: 0000000000000000 0000000000000000 c00000075480e9c0 c00000075480e9e0
+ GPR20: c00000075480e8c0 0000000000000001 0000000000000000 c00000000c72ba20
+ GPR24: c00000000c72baa0 c00000000c72bac0 c000000001407248 c00000000c72ba20
+ GPR28: c00000000141fc80 c00000000c72bac0 c00000000c6bc790 0000000000000000
+ NIP [c0000000001d6868] cpuset_can_attach+0x58/0x1b0
+ LR [c0000000001d6858] cpuset_can_attach+0x48/0x1b0
+ Call Trace:
+ [c00000000c72b9a0] [c0000000001d6858] cpuset_can_attach+0x48/0x1b0 (unreliable)
+ [c00000000c72ba00] [c0000000001cbe80] cgroup_migrate_execute+0xb0/0x450
+ [c00000000c72ba80] [c0000000001d3754] cgroup_transfer_tasks+0x1c4/0x360
+ [c00000000c72bba0] [c0000000001d923c] cpuset_hotplug_workfn+0x86c/0xa20
+ [c00000000c72bca0] [c00000000011aa44] process_one_work+0x1e4/0x580
+ [c00000000c72bd30] [c00000000011ae78] worker_thread+0x98/0x5c0
+ [c00000000c72bdc0] [c000000000124058] kthread+0x168/0x1b0
+ [c00000000c72be30] [c00000000000b2e8] ret_from_kernel_thread+0x5c/0x74
+ Instruction dump:
+ f821ffa1 7c7d1b78 60000000 60000000 38810020 7fa3eb78 3f42ffed 4bff4c25
+ 60000000 3b5a0448 3d420020 eb610020 <e9230960> 7f43d378 e9290000 f92af200
+ ---[ end trace dcaaf98fb36d9e64 ]---
+
+This patch fixes the bug by adding an explicit nr_tasks counter to
+cgroup_taskset and skipping calling the migration methods if the
+counter is zero. While at it, remove the now spurious check on no
+source css_sets.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-and-tested-by: Abdul Haleem <abdhalee@linux.vnet.ibm.com>
+Cc: Roman Gushchin <guro@fb.com>
+Fixes: a79a908fd2b0 ("cgroup: introduce cgroup namespaces")
+Link: http://lkml.kernel.org/r/1497266622.15415.39.camel@abdul.in.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup/cgroup-internal.h | 3 ++
+ kernel/cgroup/cgroup.c | 58 +++++++++++++++++++++-------------------
+ 2 files changed, 34 insertions(+), 27 deletions(-)
+
+--- a/kernel/cgroup/cgroup-internal.h
++++ b/kernel/cgroup/cgroup-internal.h
+@@ -33,6 +33,9 @@ struct cgroup_taskset {
+ struct list_head src_csets;
+ struct list_head dst_csets;
+
++ /* the number of tasks in the set */
++ int nr_tasks;
++
+ /* the subsys currently being processed */
+ int ssid;
+
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1948,6 +1948,8 @@ static void cgroup_migrate_add_task(stru
+ if (!cset->mg_src_cgrp)
+ return;
+
++ mgctx->tset.nr_tasks++;
++
+ list_move_tail(&task->cg_list, &cset->mg_tasks);
+ if (list_empty(&cset->mg_node))
+ list_add_tail(&cset->mg_node,
+@@ -2036,21 +2038,19 @@ static int cgroup_migrate_execute(struct
+ struct css_set *cset, *tmp_cset;
+ int ssid, failed_ssid, ret;
+
+- /* methods shouldn't be called if no task is actually migrating */
+- if (list_empty(&tset->src_csets))
+- return 0;
+-
+ /* check that we can legitimately attach to the cgroup */
+- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+- if (ss->can_attach) {
+- tset->ssid = ssid;
+- ret = ss->can_attach(tset);
+- if (ret) {
+- failed_ssid = ssid;
+- goto out_cancel_attach;
++ if (tset->nr_tasks) {
++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++ if (ss->can_attach) {
++ tset->ssid = ssid;
++ ret = ss->can_attach(tset);
++ if (ret) {
++ failed_ssid = ssid;
++ goto out_cancel_attach;
++ }
+ }
+- }
+- } while_each_subsys_mask();
++ } while_each_subsys_mask();
++ }
+
+ /*
+ * Now that we're guaranteed success, proceed to move all tasks to
+@@ -2077,25 +2077,29 @@ static int cgroup_migrate_execute(struct
+ */
+ tset->csets = &tset->dst_csets;
+
+- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+- if (ss->attach) {
+- tset->ssid = ssid;
+- ss->attach(tset);
+- }
+- } while_each_subsys_mask();
++ if (tset->nr_tasks) {
++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++ if (ss->attach) {
++ tset->ssid = ssid;
++ ss->attach(tset);
++ }
++ } while_each_subsys_mask();
++ }
+
+ ret = 0;
+ goto out_release_tset;
+
+ out_cancel_attach:
+- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+- if (ssid == failed_ssid)
+- break;
+- if (ss->cancel_attach) {
+- tset->ssid = ssid;
+- ss->cancel_attach(tset);
+- }
+- } while_each_subsys_mask();
++ if (tset->nr_tasks) {
++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++ if (ssid == failed_ssid)
++ break;
++ if (ss->cancel_attach) {
++ tset->ssid = ssid;
++ ss->cancel_attach(tset);
++ }
++ } while_each_subsys_mask();
++ }
+ out_release_tset:
+ spin_lock_irq(&css_set_lock);
+ list_splice_init(&tset->dst_csets, &tset->src_csets);
--- /dev/null
+From 3c74541777302eec43a0d1327c4d58b8659a776b Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Sun, 23 Jul 2017 08:14:15 -0400
+Subject: cgroup: fix error return value from cgroup_subtree_control()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 3c74541777302eec43a0d1327c4d58b8659a776b upstream.
+
+While refactoring, f7b2814bb9b6 ("cgroup: factor out
+cgroup_{apply|finalize}_control() from
+cgroup_subtree_control_write()") broke error return value from the
+function. The return value from the last operation is always
+overridden to zero. Fix it.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup/cgroup.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2921,11 +2921,11 @@ static ssize_t cgroup_subtree_control_wr
+ cgrp->subtree_control &= ~disable;
+
+ ret = cgroup_apply_control(cgrp);
+-
+ cgroup_finalize_control(cgrp, ret);
++ if (ret)
++ goto out_unlock;
+
+ kernfs_activate(cgrp->kn);
+- ret = 0;
+ out_unlock:
+ cgroup_kn_unlock(of->kn);
+ return ret ?: nbytes;
--- /dev/null
+From 89affbf5d9ebb15c6460596822e8857ea2f9e735 Mon Sep 17 00:00:00 2001
+From: Dima Zavin <dmitriyz@waymo.com>
+Date: Wed, 2 Aug 2017 13:32:18 -0700
+Subject: cpuset: fix a deadlock due to incomplete patching of cpusets_enabled()
+
+From: Dima Zavin <dmitriyz@waymo.com>
+
+commit 89affbf5d9ebb15c6460596822e8857ea2f9e735 upstream.
+
+In codepaths that use the begin/retry interface for reading
+mems_allowed_seq with irqs disabled, there exists a race condition that
+stalls the patch process after only modifying a subset of the
+static_branch call sites.
+
+This problem manifested itself as a deadlock in the slub allocator,
+inside get_any_partial. The loop reads mems_allowed_seq value (via
+read_mems_allowed_begin), performs the defrag operation, and then
+verifies the consistency of mem_allowed via the read_mems_allowed_retry
+and the cookie returned by xxx_begin.
+
+The issue here is that both begin and retry first check if cpusets are
+enabled via cpusets_enabled() static branch. This branch can be
+rewritted dynamically (via cpuset_inc) if a new cpuset is created. The
+x86 jump label code fully synchronizes across all CPUs for every entry
+it rewrites. If it rewrites only one of the callsites (specifically the
+one in read_mems_allowed_retry) and then waits for the
+smp_call_function(do_sync_core) to complete while a CPU is inside the
+begin/retry section with IRQs off and the mems_allowed value is changed,
+we can hang.
+
+This is because begin() will always return 0 (since it wasn't patched
+yet) while retry() will test the 0 against the actual value of the seq
+counter.
+
+The fix is to use two different static keys: one for begin
+(pre_enable_key) and one for retry (enable_key). In cpuset_inc(), we
+first bump the pre_enable key to ensure that cpuset_mems_allowed_begin()
+always return a valid seqcount if are enabling cpusets. Similarly, when
+disabling cpusets via cpuset_dec(), we first ensure that callers of
+cpuset_mems_allowed_retry() will start ignoring the seqcount value
+before we let cpuset_mems_allowed_begin() return 0.
+
+The relevant stack traces of the two stuck threads:
+
+ CPU: 1 PID: 1415 Comm: mkdir Tainted: G L 4.9.36-00104-g540c51286237 #4
+ Hardware name: Default string Default string/Hardware, BIOS 4.29.1-20170526215256 05/26/2017
+ task: ffff8817f9c28000 task.stack: ffffc9000ffa4000
+ RIP: smp_call_function_many+0x1f9/0x260
+ Call Trace:
+ smp_call_function+0x3b/0x70
+ on_each_cpu+0x2f/0x90
+ text_poke_bp+0x87/0xd0
+ arch_jump_label_transform+0x93/0x100
+ __jump_label_update+0x77/0x90
+ jump_label_update+0xaa/0xc0
+ static_key_slow_inc+0x9e/0xb0
+ cpuset_css_online+0x70/0x2e0
+ online_css+0x2c/0xa0
+ cgroup_apply_control_enable+0x27f/0x3d0
+ cgroup_mkdir+0x2b7/0x420
+ kernfs_iop_mkdir+0x5a/0x80
+ vfs_mkdir+0xf6/0x1a0
+ SyS_mkdir+0xb7/0xe0
+ entry_SYSCALL_64_fastpath+0x18/0xad
+
+ ...
+
+ CPU: 2 PID: 1 Comm: init Tainted: G L 4.9.36-00104-g540c51286237 #4
+ Hardware name: Default string Default string/Hardware, BIOS 4.29.1-20170526215256 05/26/2017
+ task: ffff8818087c0000 task.stack: ffffc90000030000
+ RIP: int3+0x39/0x70
+ Call Trace:
+ <#DB> ? ___slab_alloc+0x28b/0x5a0
+ <EOE> ? copy_process.part.40+0xf7/0x1de0
+ __slab_alloc.isra.80+0x54/0x90
+ copy_process.part.40+0xf7/0x1de0
+ copy_process.part.40+0xf7/0x1de0
+ kmem_cache_alloc_node+0x8a/0x280
+ copy_process.part.40+0xf7/0x1de0
+ _do_fork+0xe7/0x6c0
+ _raw_spin_unlock_irq+0x2d/0x60
+ trace_hardirqs_on_caller+0x136/0x1d0
+ entry_SYSCALL_64_fastpath+0x5/0xad
+ do_syscall_64+0x27/0x350
+ SyS_clone+0x19/0x20
+ do_syscall_64+0x60/0x350
+ entry_SYSCALL64_slow_path+0x25/0x25
+
+Link: http://lkml.kernel.org/r/20170731040113.14197-1-dmitriyz@waymo.com
+Fixes: 46e700abc44c ("mm, page_alloc: remove unnecessary taking of a seqlock when cpusets are disabled")
+Signed-off-by: Dima Zavin <dmitriyz@waymo.com>
+Reported-by: Cliff Spradlin <cspradlin@waymo.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Christopher Lameter <cl@linux.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpuset.h | 19 +++++++++++++++++--
+ kernel/cgroup/cpuset.c | 1 +
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -18,6 +18,19 @@
+
+ #ifdef CONFIG_CPUSETS
+
++/*
++ * Static branch rewrites can happen in an arbitrary order for a given
++ * key. In code paths where we need to loop with read_mems_allowed_begin() and
++ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
++ * to ensure that begin() always gets rewritten before retry() in the
++ * disabled -> enabled transition. If not, then if local irqs are disabled
++ * around the loop, we can deadlock since retry() would always be
++ * comparing the latest value of the mems_allowed seqcount against 0 as
++ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
++ * transition should happen in reverse order for the same reasons (want to stop
++ * looking at real value of mems_allowed.sequence in retry() first).
++ */
++extern struct static_key_false cpusets_pre_enable_key;
+ extern struct static_key_false cpusets_enabled_key;
+ static inline bool cpusets_enabled(void)
+ {
+@@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
+
+ static inline void cpuset_inc(void)
+ {
++ static_branch_inc(&cpusets_pre_enable_key);
+ static_branch_inc(&cpusets_enabled_key);
+ }
+
+ static inline void cpuset_dec(void)
+ {
+ static_branch_dec(&cpusets_enabled_key);
++ static_branch_dec(&cpusets_pre_enable_key);
+ }
+
+ extern int cpuset_init(void);
+@@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_al
+ */
+ static inline unsigned int read_mems_allowed_begin(void)
+ {
+- if (!cpusets_enabled())
++ if (!static_branch_unlikely(&cpusets_pre_enable_key))
+ return 0;
+
+ return read_seqcount_begin(¤t->mems_allowed_seq);
+@@ -129,7 +144,7 @@ static inline unsigned int read_mems_all
+ */
+ static inline bool read_mems_allowed_retry(unsigned int seq)
+ {
+- if (!cpusets_enabled())
++ if (!static_branch_unlikely(&cpusets_enabled_key))
+ return false;
+
+ return read_seqcount_retry(¤t->mems_allowed_seq, seq);
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -63,6 +63,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+
++DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
+ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
+
+ /* See "Frequency meter" comments, below. */
--- /dev/null
+From c471e70b187e62efc77bcdf6f58795907f8f4851 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 26 Jul 2017 22:10:53 -0400
+Subject: drm/amdgpu: fix header on gfx9 clear state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit c471e70b187e62efc77bcdf6f58795907f8f4851 upstream.
+
+This got missed when we open sourced this.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h | 41 +++++++++++++--------------
+ 1 file changed, 21 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
++++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+@@ -1,24 +1,25 @@
+-
+ /*
+-***************************************************************************************************
+-*
+-* Trade secret of Advanced Micro Devices, Inc.
+-* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished)
+-*
+-* All rights reserved. This notice is intended as a precaution against inadvertent publication and
+-* does not imply publication or any waiver of confidentiality. The year included in the foregoing
+-* notice is the year of creation of the work.
+-*
+-***************************************************************************************************
+-*/
+-/**
+-***************************************************************************************************
+-* @brief gfx9 Clearstate Definitions
+-***************************************************************************************************
+-*
+-* Do not edit! This is a machine-generated file!
+-*
+-*/
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
+
+ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
+ {
--- /dev/null
+From 5694785cf09bf0e7bd8e5f62361ea34fa162a4a0 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Sun, 30 Jul 2017 10:18:25 +0200
+Subject: drm/amdgpu: Fix undue fallthroughs in golden registers initialization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jean Delvare <jdelvare@suse.de>
+
+commit 5694785cf09bf0e7bd8e5f62361ea34fa162a4a0 upstream.
+
+As I was staring at the si_init_golden_registers code, I noticed that
+the Pitcairn initialization silently falls through the Cape Verde
+initialization, and the Oland initialization falls through the Hainan
+initialization. However there is no comment stating that this is
+intentional, and the radeon driver doesn't have any such fallthrough,
+so I suspect this is not supposed to happen.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Fixes: 62a37553414a ("drm/amdgpu: add si implementation v10")
+Cc: Ken Wang <Qingqing.Wang@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Marek Olšák" <maraeo@gmail.com>
+Cc: "Christian König" <christian.koenig@amd.com>
+Cc: Flora Cui <Flora.Cui@amd.com>
+Reviewed-by: Marek Olšák <marek.olsak@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/si.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1385,6 +1385,7 @@ static void si_init_golden_registers(str
+ amdgpu_program_register_sequence(adev,
+ pitcairn_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
++ break;
+ case CHIP_VERDE:
+ amdgpu_program_register_sequence(adev,
+ verde_golden_registers,
+@@ -1409,6 +1410,7 @@ static void si_init_golden_registers(str
+ amdgpu_program_register_sequence(adev,
+ oland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
++ break;
+ case CHIP_HAINAN:
+ amdgpu_program_register_sequence(adev,
+ hainan_golden_registers,
--- /dev/null
+From 0b0f934e92a8eaed2e6c48a50eae6f84661f74f3 Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Thu, 8 Jun 2017 10:55:26 +0300
+Subject: iwlwifi: dvm: prevent an out of bounds access
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit 0b0f934e92a8eaed2e6c48a50eae6f84661f74f3 upstream.
+
+iwlagn_check_ratid_empty takes the tid as a parameter, but
+it doesn't check that it is not IWL_TID_NON_QOS.
+Since IWL_TID_NON_QOS = 8 and iwl_priv::tid_data is an array
+with 8 entries, accessing iwl_priv::tid_data[IWL_TID_NON_QOS]
+is a bad idea.
+This happened in iwlagn_rx_reply_tx. Since
+iwlagn_check_ratid_empty is relevant only to check whether
+we can open A-MPDU, this flow is irrelevant if tid is
+IWL_TID_NON_QOS. Call iwlagn_check_ratid_empty only inside
+the
+ if (tid != IWL_TID_NON_QOS)
+
+a few lines earlier in the function.
+
+Reported-by: Seraphime Kirkovski <kirkseraph@gmail.com>
+Tested-by: Seraphime Kirkovski <kirkseraph@gmail.com>
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/intel/iwlwifi/dvm/tx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+ next_reclaimed);
++ iwlagn_check_ratid_empty(priv, sta_id, tid);
+ }
+
+ iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
+
+- iwlagn_check_ratid_empty(priv, sta_id, tid);
+ freed = 0;
+
+ /* process frames */
--- /dev/null
+From 7e5a672289c9754d07e1c3b33649786d3d70f5e4 Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <Suzuki.Poulose@arm.com>
+Date: Wed, 5 Jul 2017 09:57:00 +0100
+Subject: KVM: arm/arm64: Handle hva aging while destroying the vm
+
+From: Suzuki K Poulose <Suzuki.Poulose@arm.com>
+
+commit 7e5a672289c9754d07e1c3b33649786d3d70f5e4 upstream.
+
+The mmu_notifier_release() callback of KVM triggers cleaning up
+the stage2 page table on kvm-arm. However there could be other
+notifier callbacks in parallel with the mmu_notifier_release(),
+which could cause the call backs ending up in an empty stage2
+page table. Make sure we check it for all the notifier callbacks.
+
+Fixes: commit 293f29363 ("kvm-arm: Unmap shadow pagetables properly")
+Reported-by: Alex Graf <agraf@suse.de>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/mmu.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1665,12 +1665,16 @@ static int kvm_test_age_hva_handler(stru
+
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+ {
++ if (!kvm->arch.pgd)
++ return 0;
+ trace_kvm_age_hva(start, end);
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+ }
+
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+ {
++ if (!kvm->arch.pgd)
++ return 0;
+ trace_kvm_test_age_hva(hva);
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+ }
--- /dev/null
+From 337c017ccdf2653d0040099433fc1a2b1beb5926 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Tue, 1 Aug 2017 05:20:03 -0700
+Subject: KVM: async_pf: make rcu irq exit if not triggered from idle task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit 337c017ccdf2653d0040099433fc1a2b1beb5926 upstream.
+
+ WARNING: CPU: 5 PID: 1242 at kernel/rcu/tree_plugin.h:323 rcu_note_context_switch+0x207/0x6b0
+ CPU: 5 PID: 1242 Comm: unity-settings- Not tainted 4.13.0-rc2+ #1
+ RIP: 0010:rcu_note_context_switch+0x207/0x6b0
+ Call Trace:
+ __schedule+0xda/0xba0
+ ? kvm_async_pf_task_wait+0x1b2/0x270
+ schedule+0x40/0x90
+ kvm_async_pf_task_wait+0x1cc/0x270
+ ? prepare_to_swait+0x22/0x70
+ do_async_page_fault+0x77/0xb0
+ ? do_async_page_fault+0x77/0xb0
+ async_page_fault+0x28/0x30
+ RIP: 0010:__d_lookup_rcu+0x90/0x1e0
+
+I encounter this when trying to stress the async page fault in L1 guest w/
+L2 guests running.
+
+Commit 9b132fbe5419 (Add rcu user eqs exception hooks for async page
+fault) adds rcu_irq_enter/exit() to kvm_async_pf_task_wait() to exit cpu
+idle eqs when needed, to protect the code that needs use rcu. However,
+we need to call the pair even if the function calls schedule(), as seen
+from the above backtrace.
+
+This patch fixes it by informing the RCU subsystem exit/enter the irq
+towards/away from idle for both n.halted and !n.halted.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kvm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
+ if (hlist_unhashed(&n.link))
+ break;
+
++ rcu_irq_exit();
++
+ if (!n.halted) {
+ local_irq_enable();
+ schedule();
+@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
+ /*
+ * We cannot reschedule. So halt.
+ */
+- rcu_irq_exit();
+ native_safe_halt();
+ local_irq_disable();
+- rcu_irq_enter();
+ }
++
++ rcu_irq_enter();
+ }
+ if (!n.halted)
+ finish_swait(&n.wq, &wait);
--- /dev/null
+From 59a5e266c3f5c1567508888dd61a45b86daed0fa Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 19 Jul 2017 13:06:41 +0300
+Subject: libata: array underflow in ata_find_dev()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 59a5e266c3f5c1567508888dd61a45b86daed0fa upstream.
+
+My static checker complains that "devno" can be negative, meaning that
+we read before the start of the loop. I've looked at the code, and I
+think the warning is right. This come from /proc so it's root only or
+it would be quite a quite a serious bug. The call tree looks like this:
+
+proc_scsi_write() <- gets id and channel from simple_strtoul()
+-> scsi_add_single_device() <- calls shost->transportt->user_scan()
+ -> ata_scsi_user_scan()
+ -> ata_find_dev()
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-scsi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3028,10 +3028,12 @@ static unsigned int atapi_xlat(struct at
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+ {
+ if (!sata_pmp_attached(ap)) {
+- if (likely(devno < ata_link_max_devices(&ap->link)))
++ if (likely(devno >= 0 &&
++ devno < ata_link_max_devices(&ap->link)))
+ return &ap->link.device[devno];
+ } else {
+- if (likely(devno < ap->nr_pmp_links))
++ if (likely(devno >= 0 &&
++ devno < ap->nr_pmp_links))
+ return &ap->pmp_link[devno].device[0];
+ }
+
--- /dev/null
+From 2be7cfed995e25de1ffaffe14cc065b7ffb528e0 Mon Sep 17 00:00:00 2001
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+Date: Wed, 2 Aug 2017 13:31:47 -0700
+Subject: mm/hugetlb.c: __get_user_pages ignores certain follow_hugetlb_page errors
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+commit 2be7cfed995e25de1ffaffe14cc065b7ffb528e0 upstream.
+
+Commit 9a291a7c9428 ("mm/hugetlb: report -EHWPOISON not -EFAULT when
+FOLL_HWPOISON is specified") causes __get_user_pages to ignore certain
+errors from follow_hugetlb_page. After such error, __get_user_pages
+subsequently calls faultin_page on the same VMA and start address that
+follow_hugetlb_page failed on instead of returning the error immediately
+as it should.
+
+In follow_hugetlb_page, when hugetlb_fault returns a value covered under
+VM_FAULT_ERROR, follow_hugetlb_page returns it without setting nr_pages
+to 0 as __get_user_pages expects in this case, which causes the
+following to happen in __get_user_pages: the "while (nr_pages)" check
+succeeds, we skip the "if (!vma..." check because we got a VMA the last
+time around, we find no page with follow_page_mask, and we call
+faultin_page, which calls hugetlb_fault for the second time.
+
+This issue also slightly changes how __get_user_pages works. Before, it
+only returned error if it had made no progress (i = 0). But now,
+follow_hugetlb_page can clobber "i" with an error code since its new
+return path doesn't check for progress. So if "i" is nonzero before a
+failing call to follow_hugetlb_page, that indication of progress is lost
+and __get_user_pages can return error even if some pages were
+successfully pinned.
+
+To fix this, change follow_hugetlb_page so that it updates nr_pages,
+allowing __get_user_pages to fail immediately and restoring the "error
+only if no progress" behavior to __get_user_pages.
+
+Tested that __get_user_pages returns when expected on error from
+hugetlb_fault in follow_hugetlb_page.
+
+Fixes: 9a291a7c9428 ("mm/hugetlb: report -EHWPOISON not -EFAULT when FOLL_HWPOISON is specified")
+Link: http://lkml.kernel.org/r/1500406795-58462-1-git-send-email-daniel.m.jordan@oracle.com
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Acked-by: Punit Agrawal <punit.agrawal@arm.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: zhong jiang <zhongjiang@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4095,6 +4095,7 @@ long follow_hugetlb_page(struct mm_struc
+ unsigned long vaddr = *position;
+ unsigned long remainder = *nr_pages;
+ struct hstate *h = hstate_vma(vma);
++ int err = -EFAULT;
+
+ while (vaddr < vma->vm_end && remainder) {
+ pte_t *pte;
+@@ -4170,11 +4171,7 @@ long follow_hugetlb_page(struct mm_struc
+ }
+ ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
+ if (ret & VM_FAULT_ERROR) {
+- int err = vm_fault_to_errno(ret, flags);
+-
+- if (err)
+- return err;
+-
++ err = vm_fault_to_errno(ret, flags);
+ remainder = 0;
+ break;
+ }
+@@ -4229,7 +4226,7 @@ same_page:
+ */
+ *position = vaddr;
+
+- return i ? i : -EFAULT;
++ return i ? i : err;
+ }
+
+ #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
--- /dev/null
+From 3ea277194daaeaa84ce75180ec7c7a2075027a68 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 2 Aug 2017 13:31:52 -0700
+Subject: mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 3ea277194daaeaa84ce75180ec7c7a2075027a68 upstream.
+
+Nadav Amit identified a theoritical race between page reclaim and
+mprotect due to TLB flushes being batched outside of the PTL being held.
+
+He described the race as follows:
+
+ CPU0 CPU1
+ ---- ----
+ user accesses memory using RW PTE
+ [PTE now cached in TLB]
+ try_to_unmap_one()
+ ==> ptep_get_and_clear()
+ ==> set_tlb_ubc_flush_pending()
+ mprotect(addr, PROT_READ)
+ ==> change_pte_range()
+ ==> [ PTE non-present - no flush ]
+
+ user writes using cached RW PTE
+ ...
+
+ try_to_unmap_flush()
+
+The same type of race exists for reads when protecting for PROT_NONE and
+also exists for operations that can leave an old TLB entry behind such
+as munmap, mremap and madvise.
+
+For some operations like mprotect, it's not necessarily a data integrity
+issue but it is a correctness issue as there is a window where an
+mprotect that limits access still allows access. For munmap, it's
+potentially a data integrity issue although the race is massive as an
+munmap, mmap and return to userspace must all complete between the
+window when reclaim drops the PTL and flushes the TLB. However, it's
+theoritically possible so handle this issue by flushing the mm if
+reclaim is potentially currently batching TLB flushes.
+
+Other instances where a flush is required for a present pte should be ok
+as either the page lock is held preventing parallel reclaim or a page
+reference count is elevated preventing a parallel free leading to
+corruption. In the case of page_mkclean there isn't an obvious path
+that userspace could take advantage of without using the operations that
+are guarded by this patch. Other users such as gup as a race with
+reclaim looks just at PTEs. huge page variants should be ok as they
+don't race with reclaim. mincore only looks at PTEs. userfault also
+should be ok as if a parallel reclaim takes place, it will either fault
+the page back in or read some of the data before the flush occurs
+triggering a fault.
+
+Note that a variant of this patch was acked by Andy Lutomirski but this
+was for the x86 parts on top of his PCID work which didn't make the 4.13
+merge window as expected. His ack is dropped from this version and
+there will be a follow-on patch on top of PCID that will include his
+ack.
+
+[akpm@linux-foundation.org: tweak comments]
+[akpm@linux-foundation.org: fix spello]
+Link: http://lkml.kernel.org/r/20170717155523.emckq2esjro6hf3z@suse.de
+Reported-by: Nadav Amit <nadav.amit@gmail.com>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mm_types.h | 4 ++++
+ mm/internal.h | 5 ++++-
+ mm/madvise.c | 1 +
+ mm/memory.c | 1 +
+ mm/mprotect.c | 1 +
+ mm/mremap.c | 1 +
+ mm/rmap.c | 36 ++++++++++++++++++++++++++++++++++++
+ 7 files changed, 48 insertions(+), 1 deletion(-)
+
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -495,6 +495,10 @@ struct mm_struct {
+ */
+ bool tlb_flush_pending;
+ #endif
++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++ /* See flush_tlb_batched_pending() */
++ bool tlb_flush_batched;
++#endif
+ struct uprobes_state uprobes_state;
+ #ifdef CONFIG_HUGETLB_PAGE
+ atomic_long_t hugetlb_usage;
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -498,6 +498,7 @@ extern struct workqueue_struct *mm_percp
+ #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ void try_to_unmap_flush(void);
+ void try_to_unmap_flush_dirty(void);
++void flush_tlb_batched_pending(struct mm_struct *mm);
+ #else
+ static inline void try_to_unmap_flush(void)
+ {
+@@ -505,7 +506,9 @@ static inline void try_to_unmap_flush(vo
+ static inline void try_to_unmap_flush_dirty(void)
+ {
+ }
+-
++static inline void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++}
+ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
+ extern const struct trace_print_flags pageflag_names[];
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -320,6 +320,7 @@ static int madvise_free_pte_range(pmd_t
+
+ tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+ orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ flush_tlb_batched_pending(mm);
+ arch_enter_lazy_mmu_mode();
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1197,6 +1197,7 @@ again:
+ init_rss_vec(rss);
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte = start_pte;
++ flush_tlb_batched_pending(mm);
+ arch_enter_lazy_mmu_mode();
+ do {
+ pte_t ptent = *pte;
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -66,6 +66,7 @@ static unsigned long change_pte_range(st
+ atomic_read(&vma->vm_mm->mm_users) == 1)
+ target_node = numa_node_id();
+
++ flush_tlb_batched_pending(vma->vm_mm);
+ arch_enter_lazy_mmu_mode();
+ do {
+ oldpte = *pte;
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -152,6 +152,7 @@ static void move_ptes(struct vm_area_str
+ new_ptl = pte_lockptr(mm, new_pmd);
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
++ flush_tlb_batched_pending(vma->vm_mm);
+ arch_enter_lazy_mmu_mode();
+
+ for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -617,6 +617,13 @@ static void set_tlb_ubc_flush_pending(st
+ tlb_ubc->flush_required = true;
+
+ /*
++ * Ensure compiler does not re-order the setting of tlb_flush_batched
++ * before the PTE is cleared.
++ */
++ barrier();
++ mm->tlb_flush_batched = true;
++
++ /*
+ * If the PTE was dirty then it's best to assume it's writable. The
+ * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+ * before the page is queued for IO.
+@@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm
+
+ return should_defer;
+ }
++
++/*
++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
++ * operation such as mprotect or munmap to race between reclaim unmapping
++ * the page and flushing the page. If this race occurs, it potentially allows
++ * access to data via a stale TLB entry. Tracking all mm's that have TLB
++ * batching in flight would be expensive during reclaim so instead track
++ * whether TLB batching occurred in the past and if so then do a flush here
++ * if required. This will cost one additional flush per reclaim cycle paid
++ * by the first operation at risk such as mprotect and mumap.
++ *
++ * This must be called under the PTL so that an access to tlb_flush_batched
++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
++ * via the PTL.
++ */
++void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++ if (mm->tlb_flush_batched) {
++ flush_tlb_mm(mm);
++
++ /*
++ * Do not allow the compiler to re-order the clearing of
++ * tlb_flush_batched before the tlb is flushed.
++ */
++ barrier();
++ mm->tlb_flush_batched = false;
++ }
++}
+ #else
+ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+ {
--- /dev/null
+From 73a47a9bb3e2c4a9c553c72456e63ab991b1a4d9 Mon Sep 17 00:00:00 2001
+From: David Woods <dwoods@mellanox.com>
+Date: Fri, 26 May 2017 17:53:21 -0400
+Subject: mmc: core: Use device_property_read instead of of_property_read
+
+From: David Woods <dwoods@mellanox.com>
+
+commit 73a47a9bb3e2c4a9c553c72456e63ab991b1a4d9 upstream.
+
+Using the device_property interfaces allows mmc drivers to work
+on platforms which run on either device tree or ACPI.
+
+Signed-off-by: David Woods <dwoods@mellanox.com>
+Reviewed-by: Chris Metcalf <cmetcalf@mellanox.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/host.c | 72 +++++++++++++++++++++++-------------------------
+ 1 file changed, 35 insertions(+), 37 deletions(-)
+
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -176,19 +176,17 @@ static void mmc_retune_timer(unsigned lo
+ */
+ int mmc_of_parse(struct mmc_host *host)
+ {
+- struct device_node *np;
++ struct device *dev = host->parent;
+ u32 bus_width;
+ int ret;
+ bool cd_cap_invert, cd_gpio_invert = false;
+ bool ro_cap_invert, ro_gpio_invert = false;
+
+- if (!host->parent || !host->parent->of_node)
++ if (!dev || !dev_fwnode(dev))
+ return 0;
+
+- np = host->parent->of_node;
+-
+ /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+- if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
++ if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
+ dev_dbg(host->parent,
+ "\"bus-width\" property is missing, assuming 1 bit.\n");
+ bus_width = 1;
+@@ -210,7 +208,7 @@ int mmc_of_parse(struct mmc_host *host)
+ }
+
+ /* f_max is obtained from the optional "max-frequency" property */
+- of_property_read_u32(np, "max-frequency", &host->f_max);
++ device_property_read_u32(dev, "max-frequency", &host->f_max);
+
+ /*
+ * Configure CD and WP pins. They are both by default active low to
+@@ -225,12 +223,12 @@ int mmc_of_parse(struct mmc_host *host)
+ */
+
+ /* Parse Card Detection */
+- if (of_property_read_bool(np, "non-removable")) {
++ if (device_property_read_bool(dev, "non-removable")) {
+ host->caps |= MMC_CAP_NONREMOVABLE;
+ } else {
+- cd_cap_invert = of_property_read_bool(np, "cd-inverted");
++ cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+
+- if (of_property_read_bool(np, "broken-cd"))
++ if (device_property_read_bool(dev, "broken-cd"))
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+@@ -256,7 +254,7 @@ int mmc_of_parse(struct mmc_host *host)
+ }
+
+ /* Parse Write Protection */
+- ro_cap_invert = of_property_read_bool(np, "wp-inverted");
++ ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+ if (!ret)
+@@ -264,64 +262,64 @@ int mmc_of_parse(struct mmc_host *host)
+ else if (ret != -ENOENT && ret != -ENOSYS)
+ return ret;
+
+- if (of_property_read_bool(np, "disable-wp"))
++ if (device_property_read_bool(dev, "disable-wp"))
+ host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
+ /* See the comment on CD inversion above */
+ if (ro_cap_invert ^ ro_gpio_invert)
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+- if (of_property_read_bool(np, "cap-sd-highspeed"))
++ if (device_property_read_bool(dev, "cap-sd-highspeed"))
+ host->caps |= MMC_CAP_SD_HIGHSPEED;
+- if (of_property_read_bool(np, "cap-mmc-highspeed"))
++ if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+ host->caps |= MMC_CAP_MMC_HIGHSPEED;
+- if (of_property_read_bool(np, "sd-uhs-sdr12"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr12"))
+ host->caps |= MMC_CAP_UHS_SDR12;
+- if (of_property_read_bool(np, "sd-uhs-sdr25"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr25"))
+ host->caps |= MMC_CAP_UHS_SDR25;
+- if (of_property_read_bool(np, "sd-uhs-sdr50"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr50"))
+ host->caps |= MMC_CAP_UHS_SDR50;
+- if (of_property_read_bool(np, "sd-uhs-sdr104"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr104"))
+ host->caps |= MMC_CAP_UHS_SDR104;
+- if (of_property_read_bool(np, "sd-uhs-ddr50"))
++ if (device_property_read_bool(dev, "sd-uhs-ddr50"))
+ host->caps |= MMC_CAP_UHS_DDR50;
+- if (of_property_read_bool(np, "cap-power-off-card"))
++ if (device_property_read_bool(dev, "cap-power-off-card"))
+ host->caps |= MMC_CAP_POWER_OFF_CARD;
+- if (of_property_read_bool(np, "cap-mmc-hw-reset"))
++ if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
+ host->caps |= MMC_CAP_HW_RESET;
+- if (of_property_read_bool(np, "cap-sdio-irq"))
++ if (device_property_read_bool(dev, "cap-sdio-irq"))
+ host->caps |= MMC_CAP_SDIO_IRQ;
+- if (of_property_read_bool(np, "full-pwr-cycle"))
++ if (device_property_read_bool(dev, "full-pwr-cycle"))
+ host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
+- if (of_property_read_bool(np, "keep-power-in-suspend"))
++ if (device_property_read_bool(dev, "keep-power-in-suspend"))
+ host->pm_caps |= MMC_PM_KEEP_POWER;
+- if (of_property_read_bool(np, "wakeup-source") ||
+- of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
++ if (device_property_read_bool(dev, "wakeup-source") ||
++ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
+ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+- if (of_property_read_bool(np, "mmc-ddr-3_3v"))
++ if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
+ host->caps |= MMC_CAP_3_3V_DDR;
+- if (of_property_read_bool(np, "mmc-ddr-1_8v"))
++ if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
+ host->caps |= MMC_CAP_1_8V_DDR;
+- if (of_property_read_bool(np, "mmc-ddr-1_2v"))
++ if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
+ host->caps |= MMC_CAP_1_2V_DDR;
+- if (of_property_read_bool(np, "mmc-hs200-1_8v"))
++ if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
+ host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+- if (of_property_read_bool(np, "mmc-hs200-1_2v"))
++ if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
+ host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+- if (of_property_read_bool(np, "mmc-hs400-1_8v"))
++ if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
+ host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+- if (of_property_read_bool(np, "mmc-hs400-1_2v"))
++ if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
+ host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+- if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
++ if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
+ host->caps2 |= MMC_CAP2_HS400_ES;
+- if (of_property_read_bool(np, "no-sdio"))
++ if (device_property_read_bool(dev, "no-sdio"))
+ host->caps2 |= MMC_CAP2_NO_SDIO;
+- if (of_property_read_bool(np, "no-sd"))
++ if (device_property_read_bool(dev, "no-sd"))
+ host->caps2 |= MMC_CAP2_NO_SD;
+- if (of_property_read_bool(np, "no-mmc"))
++ if (device_property_read_bool(dev, "no-mmc"))
+ host->caps2 |= MMC_CAP2_NO_MMC;
+
+- host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
++ host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
+ if (host->dsr_req && (host->dsr & ~0xffff)) {
+ dev_err(host->parent,
+ "device tree specified broken value for DSR: 0x%x, ignoring\n",
--- /dev/null
+From 852ff5fea9eb6a9799f1881d6df2cd69a9e6eed5 Mon Sep 17 00:00:00 2001
+From: David Woods <dwoods@mellanox.com>
+Date: Fri, 26 May 2017 17:53:20 -0400
+Subject: mmc: dw_mmc: Use device_property_read instead of of_property_read
+
+From: David Woods <dwoods@mellanox.com>
+
+commit 852ff5fea9eb6a9799f1881d6df2cd69a9e6eed5 upstream.
+
+Using the device_property interfaces allows the dw_mmc driver to work
+on platforms which run on either device tree or ACPI.
+
+Signed-off-by: David Woods <dwoods@mellanox.com>
+Reviewed-by: Chris Metcalf <cmetcalf@mellanox.com>
+Acked-by: Jaehoon Chung <jh80.chung@samsung.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/dw_mmc.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2707,8 +2707,8 @@ static int dw_mci_init_slot(struct dw_mc
+ host->slot[id] = slot;
+
+ mmc->ops = &dw_mci_ops;
+- if (of_property_read_u32_array(host->dev->of_node,
+- "clock-freq-min-max", freq, 2)) {
++ if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
++ freq, 2)) {
+ mmc->f_min = DW_MCI_FREQ_MIN;
+ mmc->f_max = DW_MCI_FREQ_MAX;
+ } else {
+@@ -2808,7 +2808,6 @@ static void dw_mci_init_dma(struct dw_mc
+ {
+ int addr_config;
+ struct device *dev = host->dev;
+- struct device_node *np = dev->of_node;
+
+ /*
+ * Check tansfer mode from HCON[17:16]
+@@ -2869,8 +2868,9 @@ static void dw_mci_init_dma(struct dw_mc
+ dev_info(host->dev, "Using internal DMA controller.\n");
+ } else {
+ /* TRANS_MODE_EDMAC: check dma bindings again */
+- if ((of_property_count_strings(np, "dma-names") < 0) ||
+- (!of_find_property(np, "dmas", NULL))) {
++ if ((device_property_read_string_array(dev, "dma-names",
++ NULL, 0) < 0) ||
++ !device_property_present(dev, "dmas")) {
+ goto no_dma;
+ }
+ host->dma_ops = &dw_mci_edmac_ops;
+@@ -2937,7 +2937,6 @@ static struct dw_mci_board *dw_mci_parse
+ {
+ struct dw_mci_board *pdata;
+ struct device *dev = host->dev;
+- struct device_node *np = dev->of_node;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ int ret;
+ u32 clock_frequency;
+@@ -2954,20 +2953,21 @@ static struct dw_mci_board *dw_mci_parse
+ }
+
+ /* find out number of slots supported */
+- of_property_read_u32(np, "num-slots", &pdata->num_slots);
++ device_property_read_u32(dev, "num-slots", &pdata->num_slots);
+
+- if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
++ if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
+ dev_info(dev,
+ "fifo-depth property not found, using value of FIFOTH register as default\n");
+
+- of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
++ device_property_read_u32(dev, "card-detect-delay",
++ &pdata->detect_delay_ms);
+
+- of_property_read_u32(np, "data-addr", &host->data_addr_override);
++ device_property_read_u32(dev, "data-addr", &host->data_addr_override);
+
+- if (of_get_property(np, "fifo-watermark-aligned", NULL))
++ if (device_property_present(dev, "fifo-watermark-aligned"))
+ host->wm_aligned = true;
+
+- if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
++ if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
+ pdata->bus_hz = clock_frequency;
+
+ if (drv_data && drv_data->parse_dt) {
--- /dev/null
+From 7a1e3f143176e8ebdb2f5a9b3b47abc18b879d90 Mon Sep 17 00:00:00 2001
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+Date: Wed, 26 Jul 2017 16:02:46 +0200
+Subject: mmc: sdhci-of-at91: force card detect value for non removable devices
+
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+
+commit 7a1e3f143176e8ebdb2f5a9b3b47abc18b879d90 upstream.
+
+When the device is non removable, the card detect signal is often used
+for another purpose i.e. muxed to another SoC peripheral or used as a
+GPIO. It could lead to wrong behaviors depending the default value of
+this signal if not muxed to the SDHCI controller.
+
+Fixes: bb5f8ea4d514 ("mmc: sdhci-of-at91: introduce driver for the Atmel SDMMC")
+Signed-off-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-at91.c | 35 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 34 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -31,6 +31,7 @@
+
+ #define SDMMC_MC1R 0x204
+ #define SDMMC_MC1R_DDR BIT(3)
++#define SDMMC_MC1R_FCD BIT(7)
+ #define SDMMC_CACR 0x230
+ #define SDMMC_CACR_CAPWREN BIT(0)
+ #define SDMMC_CACR_KEY (0x46 << 8)
+@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
+ struct clk *mainck;
+ };
+
++static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
++{
++ u8 mc1r;
++
++ mc1r = readb(host->ioaddr + SDMMC_MC1R);
++ mc1r |= SDMMC_MC1R_FCD;
++ writeb(mc1r, host->ioaddr + SDMMC_MC1R);
++}
++
+ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+ u16 clk;
+@@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct
+ sdhci_set_uhs_signaling(host, timing);
+ }
+
++static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
++{
++ sdhci_reset(host, mask);
++
++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++ sdhci_at91_set_force_card_detect(host);
++}
++
+ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+ .set_clock = sdhci_at91_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+- .reset = sdhci_reset,
++ .reset = sdhci_at91_reset,
+ .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
+ .set_power = sdhci_at91_set_power,
+ };
+@@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platf
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+
++ /*
++ * If the device attached to the MMC bus is not removable, it is safer
++ * to set the Force Card Detect bit. People often don't connect the
++ * card detect signal and use this pin for another purpose. If the card
++ * detect pin is not muxed to SDHCI controller, a default value is
++ * used. This value can be different from a SoC revision to another
++ * one. Problems come when this default value is not card present. To
++ * avoid this case, if the device is non removable then the card
++ * detection procedure using the SDMCC_CD signal is bypassed.
++ * This bit is reset when a software reset for all command is performed
++ * so we need to implement our own reset function to set back this bit.
++ */
++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++ sdhci_at91_set_force_card_detect(host);
++
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
--- /dev/null
+From fd40559c8657418385e42f797e0b04bfc0add748 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Tue, 1 Aug 2017 16:02:47 -0400
+Subject: NFSv4: Fix EXCHANGE_ID corrupt verifier issue
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit fd40559c8657418385e42f797e0b04bfc0add748 upstream.
+
+The verifier is allocated on the stack, but the EXCHANGE_ID RPC call was
+changed to be asynchronous by commit 8d89bd70bc939. If we interrrupt
+the call to rpc_wait_for_completion_task(), we can therefore end up
+transmitting random stack contents in lieu of the verifier.
+
+Fixes: 8d89bd70bc939 ("NFS setup async exchange_id")
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 11 ++++-------
+ fs/nfs/nfs4xdr.c | 2 +-
+ include/linux/nfs_xdr.h | 2 +-
+ 3 files changed, 6 insertions(+), 9 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7407,7 +7407,7 @@ static void nfs4_exchange_id_done(struct
+ cdata->res.server_scope = NULL;
+ }
+ /* Save the EXCHANGE_ID verifier session trunk tests */
+- memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
++ memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
+ sizeof(clp->cl_confirm.data));
+ }
+ out:
+@@ -7444,7 +7444,6 @@ static const struct rpc_call_ops nfs4_ex
+ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ u32 sp4_how, struct rpc_xprt *xprt)
+ {
+- nfs4_verifier verifier;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+ .rpc_cred = cred,
+@@ -7468,8 +7467,7 @@ static int _nfs4_proc_exchange_id(struct
+ return -ENOMEM;
+ }
+
+- if (!xprt)
+- nfs4_init_boot_verifier(clp, &verifier);
++ nfs4_init_boot_verifier(clp, &calldata->args.verifier);
+
+ status = nfs4_init_uniform_client_string(clp);
+ if (status)
+@@ -7510,9 +7508,8 @@ static int _nfs4_proc_exchange_id(struct
+ task_setup_data.rpc_xprt = xprt;
+ task_setup_data.flags =
+ RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
+- calldata->args.verifier = &clp->cl_confirm;
+- } else {
+- calldata->args.verifier = &verifier;
++ memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
++ sizeof(calldata->args.verifier.data));
+ }
+ calldata->args.client = clp;
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -1765,7 +1765,7 @@ static void encode_exchange_id(struct xd
+ int len = 0;
+
+ encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
+- encode_nfs4_verifier(xdr, args->verifier);
++ encode_nfs4_verifier(xdr, &args->verifier);
+
+ encode_string(xdr, strlen(args->client->cl_owner_id),
+ args->client->cl_owner_id);
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1222,7 +1222,7 @@ struct nfs41_state_protection {
+
+ struct nfs41_exchange_id_args {
+ struct nfs_client *client;
+- nfs4_verifier *verifier;
++ nfs4_verifier verifier;
+ u32 flags;
+ struct nfs41_state_protection state_protect;
+ };
--- /dev/null
+From 19ec8e48582670c021e998b9deb88e39a842ff45 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 2 Aug 2017 13:32:30 -0700
+Subject: ocfs2: don't clear SGID when inheriting ACLs
+
+From: Jan Kara <jack@suse.cz>
+
+commit 19ec8e48582670c021e998b9deb88e39a842ff45 upstream.
+
+When new directory 'DIR1' is created in a directory 'DIR0' with SGID bit
+set, DIR1 is expected to have SGID bit set (and owning group equal to
+the owning group of 'DIR0'). However when 'DIR0' also has some default
+ACLs that 'DIR1' inherits, setting these ACLs will result in SGID bit on
+'DIR1' to get cleared if user is not member of the owning group.
+
+Fix the problem by moving posix_acl_update_mode() out of ocfs2_set_acl()
+into ocfs2_iop_set_acl(). That way the function will not be called when
+inheriting ACLs which is what we want as it prevents SGID bit clearing
+and the mode has been properly set by posix_acl_create() anyway. Also
+posix_acl_chmod() that is calling ocfs2_set_acl() takes care of updating
+mode itself.
+
+Fixes: 073931017b4 ("posix_acl: Clear SGID bit when setting file permissions")
+Link: http://lkml.kernel.org/r/20170801141252.19675-3-jack@suse.cz
+Signed-off-by: Jan Kara <jack@suse.cz>
+Cc: Mark Fasheh <mfasheh@versity.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Joseph Qi <jiangqi903@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/acl.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -240,18 +240,6 @@ int ocfs2_set_acl(handle_t *handle,
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
+- if (acl) {
+- umode_t mode;
+-
+- ret = posix_acl_update_mode(inode, &mode, &acl);
+- if (ret)
+- return ret;
+-
+- ret = ocfs2_acl_set_mode(inode, di_bh,
+- handle, mode);
+- if (ret)
+- return ret;
+- }
+ break;
+ case ACL_TYPE_DEFAULT:
+ name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
+@@ -289,7 +277,19 @@ int ocfs2_iop_set_acl(struct inode *inod
+ had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
+ if (had_lock < 0)
+ return had_lock;
++ if (type == ACL_TYPE_ACCESS && acl) {
++ umode_t mode;
++
++ status = posix_acl_update_mode(inode, &mode, &acl);
++ if (status)
++ goto unlock;
++
++ status = ocfs2_acl_set_mode(inode, bh, NULL, mode);
++ if (status)
++ goto unlock;
++ }
+ status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
++unlock:
+ ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
+ brelse(bh);
+ return status;
--- /dev/null
+From 13d57093c141db2036364d6be35e394fc5b64728 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Sun, 30 Jul 2017 16:20:19 -0400
+Subject: parisc: Handle vma's whose context is not current in flush_cache_range
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit 13d57093c141db2036364d6be35e394fc5b64728 upstream.
+
+In testing James' patch to drivers/parisc/pdc_stable.c, I hit the BUG
+statement in flush_cache_range() during a system shutdown:
+
+kernel BUG at arch/parisc/kernel/cache.c:595!
+CPU: 2 PID: 6532 Comm: kworker/2:0 Not tainted 4.13.0-rc2+ #1
+Workqueue: events free_ioctx
+
+ IAOQ[0]: flush_cache_range+0x144/0x148
+ IAOQ[1]: flush_cache_page+0x0/0x1a8
+ RP(r2): flush_cache_range+0xec/0x148
+Backtrace:
+ [<00000000402910ac>] unmap_page_range+0x84/0x880
+ [<00000000402918f4>] unmap_single_vma+0x4c/0x60
+ [<0000000040291a18>] zap_page_range_single+0x110/0x160
+ [<0000000040291c34>] unmap_mapping_range+0x174/0x1a8
+ [<000000004026ccd8>] truncate_pagecache+0x50/0xa8
+ [<000000004026cd84>] truncate_setsize+0x54/0x70
+ [<000000004033d534>] put_aio_ring_file+0x44/0xb0
+ [<000000004033d5d8>] aio_free_ring+0x38/0x140
+ [<000000004033d714>] free_ioctx+0x34/0xa8
+ [<00000000401b0028>] process_one_work+0x1b8/0x4d0
+ [<00000000401b04f4>] worker_thread+0x1b4/0x648
+ [<00000000401b9128>] kthread+0x1b0/0x208
+ [<0000000040150020>] end_fault_vector+0x20/0x28
+ [<0000000040639518>] nf_ip_reroute+0x50/0xa8
+ [<0000000040638ed0>] nf_ip_route+0x10/0x78
+ [<0000000040638c90>] xfrm4_mode_tunnel_input+0x180/0x1f8
+
+CPU: 2 PID: 6532 Comm: kworker/2:0 Not tainted 4.13.0-rc2+ #1
+Workqueue: events free_ioctx
+Backtrace:
+ [<0000000040163bf0>] show_stack+0x20/0x38
+ [<0000000040688480>] dump_stack+0xa8/0x120
+ [<0000000040163dc4>] die_if_kernel+0x19c/0x2b0
+ [<0000000040164d0c>] handle_interruption+0xa24/0xa48
+
+This patch modifies flush_cache_range() to handle non current contexts.
+In as much as this occurs infrequently, the simplest approach is to
+flush the entire cache when this happens.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/cache.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -587,13 +587,12 @@ void flush_cache_range(struct vm_area_st
+ if (parisc_requires_coherency())
+ flush_tlb_range(vma, start, end);
+
+- if ((end - start) >= parisc_cache_flush_threshold) {
++ if ((end - start) >= parisc_cache_flush_threshold
++ || vma->vm_mm->context != mfsp(3)) {
+ flush_cache_all();
+ return;
+ }
+
+- BUG_ON(vma->vm_mm->context != mfsp(3));
+-
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
--- /dev/null
+From 8f8201dfed91a43ac38c899c82f81eef3d36afd9 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 31 Jul 2017 08:38:27 +0200
+Subject: parisc: Increase thread and stack size to 32kb
+
+From: Helge Deller <deller@gmx.de>
+
+commit 8f8201dfed91a43ac38c899c82f81eef3d36afd9 upstream.
+
+Since kernel 4.11 the thread and irq stacks on parisc randomly overflow
+the default size of 16k. The reason why stack usage suddenly grew is yet
+unknown.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/thread_info.h | 2 +-
+ arch/parisc/kernel/irq.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/include/asm/thread_info.h
++++ b/arch/parisc/include/asm/thread_info.h
+@@ -34,7 +34,7 @@ struct thread_info {
+
+ /* thread information allocation */
+
+-#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */
++#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */
+ /* Be sure to hunt all references to this down when you change the size of
+ * the kernel stack */
+ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned l
+ /*
+ * IRQ STACK - used for irq handler
+ */
+-#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
++#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
+
+ union irq_stack_union {
+ unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
--- /dev/null
+From cd069bb9f9ff13b23492fda6ccdb458ac4f641ca Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 21 Jul 2017 22:53:46 +0200
+Subject: scsi: lpfc: fix linking against modular NVMe support
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit cd069bb9f9ff13b23492fda6ccdb458ac4f641ca upstream.
+
+When LPFC is built-in but NVMe is a loadable module, we fail to link the
+kernel:
+
+drivers/scsi/built-in.o: In function `lpfc_nvme_create_localport':
+(.text+0x156a82): undefined reference to `nvme_fc_register_localport'
+drivers/scsi/built-in.o: In function `lpfc_nvme_destroy_localport':
+(.text+0x156eaa): undefined reference to `nvme_fc_unregister_remoteport'
+
+We can avoid this either by forcing lpfc to be a module, or by disabling
+NVMe support in this case. This implements the former.
+
+Fixes: 7d7080335f8d ("scsi: lpfc: Finalize Kconfig options for nvme")
+Link: https://patchwork.kernel.org/patch/9636569/
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -1241,6 +1241,8 @@ config SCSI_LPFC
+ tristate "Emulex LightPulse Fibre Channel Support"
+ depends on PCI && SCSI
+ depends on SCSI_FC_ATTRS
++ depends on NVME_TARGET_FC || NVME_TARGET_FC=n
++ depends on NVME_FC || NVME_FC=n
+ select CRC_T10DIF
+ ---help---
+ This lpfc driver supports the Emulex LightPulse
--- /dev/null
+From 68c59fcea1f2c6a54c62aa896cc623c1b5bc9b47 Mon Sep 17 00:00:00 2001
+From: Johannes Thumshirn <jthumshirn@suse.de>
+Date: Fri, 7 Jul 2017 10:56:38 +0200
+Subject: scsi: sg: fix SG_DXFER_FROM_DEV transfers
+
+From: Johannes Thumshirn <jthumshirn@suse.de>
+
+commit 68c59fcea1f2c6a54c62aa896cc623c1b5bc9b47 upstream.
+
+SG_DXFER_FROM_DEV transfers do not necessarily have a dxferp as we set
+it to NULL for the old sg_io read/write interface, but must have a
+length bigger than 0. This fixes a regression introduced by commit
+28676d869bbb ("scsi: sg: check for valid direction before starting the
+request")
+
+Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de>
+Fixes: 28676d869bbb ("scsi: sg: check for valid direction before starting the request")
+Reported-by: Chris Clayton <chris2553@googlemail.com>
+Tested-by: Chris Clayton <chris2553@googlemail.com>
+Cc: Douglas Gilbert <dgilbert@interlog.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Tested-by: Chris Clayton <chris2553@googlemail.com>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sg.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -758,8 +758,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_
+ if (hp->dxferp || hp->dxfer_len > 0)
+ return false;
+ return true;
+- case SG_DXFER_TO_DEV:
+ case SG_DXFER_FROM_DEV:
++ if (hp->dxfer_len < 0)
++ return false;
++ return true;
++ case SG_DXFER_TO_DEV:
+ case SG_DXFER_TO_FROM_DEV:
+ if (!hp->dxferp || hp->dxfer_len == 0)
+ return false;
--- /dev/null
+parisc-increase-thread-and-stack-size-to-32kb.patch
+parisc-handle-vma-s-whose-context-is-not-current-in-flush_cache_range.patch
+scsi-lpfc-fix-linking-against-modular-nvme-support.patch
+scsi-sg-fix-sg_dxfer_from_dev-transfers.patch
+acpi-lpss-only-call-pwm_add_table-for-the-first-pwm-controller.patch
+cgroup-don-t-call-migration-methods-if-there-are-no-tasks-to-migrate.patch
+cgroup-create-dfl_root-files-on-subsys-registration.patch
+cgroup-fix-error-return-value-from-cgroup_subtree_control.patch
+libata-array-underflow-in-ata_find_dev.patch
+workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch
+iwlwifi-dvm-prevent-an-out-of-bounds-access.patch
+brcmfmac-fix-memleak-due-to-calling-brcmf_sdiod_sgtable_alloc-twice.patch
+nfsv4-fix-exchange_id-corrupt-verifier-issue.patch
+mmc-sdhci-of-at91-force-card-detect-value-for-non-removable-devices.patch
+mmc-core-use-device_property_read-instead-of-of_property_read.patch
+mmc-dw_mmc-use-device_property_read-instead-of-of_property_read.patch
+mm-mprotect-flush-tlb-if-potentially-racing-with-a-parallel-reclaim-leaving-stale-tlb-entries.patch
+mm-hugetlb.c-__get_user_pages-ignores-certain-follow_hugetlb_page-errors.patch
+userfaultfd-non-cooperative-notify-about-unmap-of-destination-during-mremap.patch
+userfaultfd_zeropage-return-enospc-in-case-mm-has-gone.patch
+userfaultfd-non-cooperative-flush-event_wqh-at-release-time.patch
+cpuset-fix-a-deadlock-due-to-incomplete-patching-of-cpusets_enabled.patch
+ocfs2-don-t-clear-sgid-when-inheriting-acls.patch
+alsa-hda-fix-speaker-output-from-vaio-vpcl14m1r.patch
+drm-amdgpu-fix-header-on-gfx9-clear-state.patch
+drm-amdgpu-fix-undue-fallthroughs-in-golden-registers-initialization.patch
+asoc-fix-pcm-creation-regression.patch
+asoc-ux500-restore-platform-dai-assignments.patch
+asoc-do-not-close-shared-backend-dailink.patch
+kvm-arm-arm64-handle-hva-aging-while-destroying-the-vm.patch
+kvm-async_pf-make-rcu-irq-exit-if-not-triggered-from-idle-task.patch
--- /dev/null
+From 5a18b64e3f02125be1c0ef777501ae38aafe2a24 Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Date: Wed, 2 Aug 2017 13:32:24 -0700
+Subject: userfaultfd: non-cooperative: flush event_wqh at release time
+
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+
+commit 5a18b64e3f02125be1c0ef777501ae38aafe2a24 upstream.
+
+There may still be threads waiting on event_wqh at the time the
+userfault file descriptor is closed. Flush the events wait-queue to
+prevent waiting threads from hanging.
+
+Link: http://lkml.kernel.org/r/1501398127-30419-1-git-send-email-rppt@linux.vnet.ibm.com
+Fixes: 9cd75c3cd4c3d ("userfaultfd: non-cooperative: add ability to report
+non-PF events from uffd descriptor")
+Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
+Cc: Pavel Emelyanov <xemul@virtuozzo.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/userfaultfd.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -851,6 +851,9 @@ wakeup:
+ __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
+ spin_unlock(&ctx->fault_pending_wqh.lock);
+
++ /* Flush pending events that may still wait on event_wqh */
++ wake_up_all(&ctx->event_wqh);
++
+ wake_up_poll(&ctx->fd_wqh, POLLHUP);
+ userfaultfd_ctx_put(ctx);
+ return 0;
--- /dev/null
+From b22823719302e88d0e2a6bb06433bd97b175a8d8 Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Date: Wed, 2 Aug 2017 13:31:55 -0700
+Subject: userfaultfd: non-cooperative: notify about unmap of destination during mremap
+
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+
+commit b22823719302e88d0e2a6bb06433bd97b175a8d8 upstream.
+
+When mremap is called with MREMAP_FIXED it unmaps memory at the
+destination address without notifying userfaultfd monitor.
+
+If the destination were registered with userfaultfd, the monitor has no
+way to distinguish between the old and new ranges and to properly relate
+the page faults that would occur in the destination region.
+
+Fixes: 897ab3e0c49e ("userfaultfd: non-cooperative: add event for memory unmaps")
+Link: http://lkml.kernel.org/r/1500276876-3350-1-git-send-email-rppt@linux.vnet.ibm.com
+Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Acked-by: Pavel Emelyanov <xemul@virtuozzo.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mremap.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -429,6 +429,7 @@ static struct vm_area_struct *vma_to_res
+ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
+ unsigned long new_addr, unsigned long new_len, bool *locked,
+ struct vm_userfaultfd_ctx *uf,
++ struct list_head *uf_unmap_early,
+ struct list_head *uf_unmap)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -447,7 +448,7 @@ static unsigned long mremap_to(unsigned
+ if (addr + old_len > new_addr && new_addr + new_len > addr)
+ goto out;
+
+- ret = do_munmap(mm, new_addr, new_len, NULL);
++ ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
+ if (ret)
+ goto out;
+
+@@ -515,6 +516,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
+ unsigned long charged = 0;
+ bool locked = false;
+ struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
++ LIST_HEAD(uf_unmap_early);
+ LIST_HEAD(uf_unmap);
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+@@ -542,7 +544,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
+
+ if (flags & MREMAP_FIXED) {
+ ret = mremap_to(addr, old_len, new_addr, new_len,
+- &locked, &uf, &uf_unmap);
++ &locked, &uf, &uf_unmap_early, &uf_unmap);
+ goto out;
+ }
+
+@@ -622,6 +624,7 @@ out:
+ up_write(¤t->mm->mmap_sem);
+ if (locked && new_len > old_len)
+ mm_populate(new_addr + old_len, new_len - old_len);
++ userfaultfd_unmap_complete(mm, &uf_unmap_early);
+ mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
+ userfaultfd_unmap_complete(mm, &uf_unmap);
+ return ret;
--- /dev/null
+From 9d95aa4bada24be35bb94827a55e1d6e243d866e Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Date: Wed, 2 Aug 2017 13:32:15 -0700
+Subject: userfaultfd_zeropage: return -ENOSPC in case mm has gone
+
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+
+commit 9d95aa4bada24be35bb94827a55e1d6e243d866e upstream.
+
+In the non-cooperative userfaultfd case, the process exit may race with
+outstanding mcopy_atomic called by the uffd monitor. Returning -ENOSPC
+instead of -EINVAL when mm is already gone will allow uffd monitor to
+distinguish this case from other error conditions.
+
+Unfortunately I overlooked userfaultfd_zeropage when updating
+userfaultd_copy().
+
+Link: http://lkml.kernel.org/r/1501136819-21857-1-git-send-email-rppt@linux.vnet.ibm.com
+Fixes: 96333187ab162 ("userfaultfd_copy: return -ENOSPC in case mm has gone")
+Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
+Cc: Pavel Emelyanov <xemul@virtuozzo.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/userfaultfd.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1645,6 +1645,8 @@ static int userfaultfd_zeropage(struct u
+ ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
+ uffdio_zeropage.range.len);
+ mmput(ctx->mm);
++ } else {
++ return -ENOSPC;
+ }
+ if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
+ return -EFAULT;
--- /dev/null
+From 5c0338c68706be53b3dc472e4308961c36e4ece1 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 18 Jul 2017 18:41:52 -0400
+Subject: workqueue: restore WQ_UNBOUND/max_active==1 to be ordered
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 5c0338c68706be53b3dc472e4308961c36e4ece1 upstream.
+
+The combination of WQ_UNBOUND and max_active == 1 used to imply
+ordered execution. After NUMA affinity 4c16bd327c74 ("workqueue:
+implement NUMA affinity for unbound workqueues"), this is no longer
+true due to per-node worker pools.
+
+While the right way to create an ordered workqueue is
+alloc_ordered_workqueue(), the documentation has been misleading for a
+long time and people do use WQ_UNBOUND and max_active == 1 for ordered
+workqueues which can lead to subtle bugs which are very difficult to
+trigger.
+
+It's unlikely that we'd see noticeable performance impact by enforcing
+ordering on WQ_UNBOUND / max_active == 1 workqueues. Let's
+automatically set __WQ_ORDERED for those workqueues.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Christoph Hellwig <hch@infradead.org>
+Reported-by: Alexei Potashnik <alexei@purestorage.com>
+Fixes: 4c16bd327c74 ("workqueue: implement NUMA affinity for unbound workqueues")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/workqueue.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3929,6 +3929,16 @@ struct workqueue_struct *__alloc_workque
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
+
++ /*
++ * Unbound && max_active == 1 used to imply ordered, which is no
++ * longer the case on NUMA machines due to per-node pools. While
++ * alloc_ordered_workqueue() is the right way to create an ordered
++ * workqueue, keep the previous behavior to avoid subtle breakages
++ * on NUMA.
++ */
++ if ((flags & WQ_UNBOUND) && max_active == 1)
++ flags |= __WQ_ORDERED;
++
+ /* see the comment above the definition of WQ_POWER_EFFICIENT */
+ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
+ flags |= WQ_UNBOUND;
--- /dev/null
+parisc-increase-thread-and-stack-size-to-32kb.patch
+libata-array-underflow-in-ata_find_dev.patch
+workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch
+mmc-core-fix-access-to-hs400-es-devices.patch
--- /dev/null
+parisc-handle-vma-s-whose-context-is-not-current-in-flush_cache_range.patch
+cgroup-create-dfl_root-files-on-subsys-registration.patch
+cgroup-fix-error-return-value-from-cgroup_subtree_control.patch
+libata-array-underflow-in-ata_find_dev.patch
+workqueue-restore-wq_unbound-max_active-1-to-be-ordered.patch
+iwlwifi-dvm-prevent-an-out-of-bounds-access.patch
+brcmfmac-fix-memleak-due-to-calling-brcmf_sdiod_sgtable_alloc-twice.patch
+nfsv4-fix-exchange_id-corrupt-verifier-issue.patch
+mmc-sdhci-of-at91-force-card-detect-value-for-non-removable-devices.patch
+device-property-make-dev_fwnode-public.patch
+mmc-core-use-device_property_read-instead-of-of_property_read.patch
+mmc-dw_mmc-use-device_property_read-instead-of-of_property_read.patch
+mmc-core-fix-access-to-hs400-es-devices.patch
+mm-mprotect-flush-tlb-if-potentially-racing-with-a-parallel-reclaim-leaving-stale-tlb-entries.patch