--- /dev/null
+From d05ea7da0e8f6df3c62cfee75538f347cb3d89ef Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@fedoraproject.org>
+Date: Fri, 2 Oct 2015 11:09:54 -0700
+Subject: ALSA: hda: Add dock support for ThinkPad T550
+
+From: Laura Abbott <labbott@fedoraproject.org>
+
+commit d05ea7da0e8f6df3c62cfee75538f347cb3d89ef upstream.
+
+Much like all the other Lenovo laptops, add a quirk to make
+sound work with docking.
+
+Reported-and-tested-by: lacknerflo@gmail.com
+Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5306,6 +5306,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
--- /dev/null
+From e8ff581f7ac2bc3b8886094b7ca635dcc4d1b0e9 Mon Sep 17 00:00:00 2001
+From: John Flatness <john@zerocrates.org>
+Date: Fri, 2 Oct 2015 17:07:49 -0400
+Subject: ALSA: hda - Apply SPDIF pin ctl to MacBookPro 12,1
+
+From: John Flatness <john@zerocrates.org>
+
+commit e8ff581f7ac2bc3b8886094b7ca635dcc4d1b0e9 upstream.
+
+The MacBookPro 12,1 has the same setup as the 11 for controlling the
+status of the optical audio light. Simply apply the existing workaround
+to the subsystem ID for the 12,1.
+
+[sorted the fixup entry by tiwai]
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=105401
+Signed-off-by: John Flatness <john@zerocrates.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_cirrus.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208
+ SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+ SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
++ SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
+ {} /* terminator */
+ };
+
--- /dev/null
+From c7e1008048a97148d3aecae742f66fb2f944644c Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sun, 4 Oct 2015 22:44:12 +0200
+Subject: ALSA: hda - Disable power_save_node for IDT 92HD73xx chips
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit c7e1008048a97148d3aecae742f66fb2f944644c upstream.
+
+The recent widget power saving introduced some unavoidable click
+noises on old IDT 92HD73xx chips while it still seems working on the
+compatible new chips. In the bugzilla, we tried lots of tests and
+workarounds, but they didn't help much. So, let's disable the feature
+for these specific chips as the least (but safest) fix.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=104981
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_sigmatel.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda
+ return err;
+
+ spec = codec->spec;
+- codec->power_save_node = 1;
++ /* enable power_save_node only for new 92HD89xx chips, as it causes
++ * click noises on old 92HD73xx chips.
++ */
++ if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
++ codec->power_save_node = 1;
+ spec->linear_tone_beep = 0;
+ spec->gen.mixer_nid = 0x1d;
+ spec->have_spdif_mux = 1;
--- /dev/null
+From 7f57d803ee03730d570dc59a9e3e4842b58dd5cc Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 24 Sep 2015 17:36:51 +0200
+Subject: ALSA: hda - Disable power_save_node for Thinkpads
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 7f57d803ee03730d570dc59a9e3e4842b58dd5cc upstream.
+
+Lenovo Thinkpads with recent Realtek codecs seem suffering from click
+noises at power transition since the introduction of widget power
+saving in 4.1 kernel. Although this might be solved by some delays in
+appropriate points, as a quick workaround, just disable the
+power_save_node feature for now. The gain it gives is relatively
+small, and this makes the situation back to pre 4.1 time.
+
+This patch ended up with a bit more code changes than usual because
+the existing fixup for Thinkpads is highly chained. Instead of adding
+yet another chain, combine a few of them into a single fixup entry, as
+a gratis cleanup.
+
+Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=943982
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 31 +++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4188,6 +4188,24 @@ static void alc_fixup_disable_aamix(stru
+ }
+ }
+
++/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */
++static void alc_fixup_tpt440_dock(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ static const struct hda_pintbl pincfgs[] = {
++ { 0x16, 0x21211010 }, /* dock headphone */
++ { 0x19, 0x21a11010 }, /* dock mic */
++ { }
++ };
++ struct alc_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++ codec->power_save_node = 0; /* avoid click noises */
++ snd_hda_apply_pincfgs(codec, pincfgs);
++ }
++}
++
+ static void alc_shutup_dell_xps13(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+@@ -4562,7 +4580,6 @@ enum {
+ ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
+ ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC292_FIXUP_TPT440_DOCK,
+- ALC292_FIXUP_TPT440_DOCK2,
+ ALC283_FIXUP_BXBT2807_MIC,
+ ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+ ALC282_FIXUP_ASPIRE_V5_PINS,
+@@ -5029,17 +5046,7 @@ static const struct hda_fixup alc269_fix
+ },
+ [ALC292_FIXUP_TPT440_DOCK] = {
+ .type = HDA_FIXUP_FUNC,
+- .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+- .chained = true,
+- .chain_id = ALC292_FIXUP_TPT440_DOCK2
+- },
+- [ALC292_FIXUP_TPT440_DOCK2] = {
+- .type = HDA_FIXUP_PINS,
+- .v.pins = (const struct hda_pintbl[]) {
+- { 0x16, 0x21211010 }, /* dock headphone */
+- { 0x19, 0x21a11010 }, /* dock mic */
+- { }
+- },
++ .v.func = alc_fixup_tpt440_dock,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ },
--- /dev/null
+From 83510441bc08bee201c0ded9d81da6dfd008d69a Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 24 Sep 2015 11:00:18 +0200
+Subject: ALSA: hda/tegra - async probe for avoiding module loading deadlock
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 83510441bc08bee201c0ded9d81da6dfd008d69a upstream.
+
+The Tegra HD-audio controller driver causes deadlocks when loaded as a
+module since the driver invokes request_module() at binding with the
+codec driver. This patch works around it by deferring the probe in a
+work like Intel HD-audio controller driver does. Although hovering
+the codec probe stuff into udev would be a better solution, it may
+cause other regressions, so let's try this band-aid fix until the more
+proper solution gets landed.
+
+Reported-by: Thierry Reding <treding@nvidia.com>
+Tested-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_tegra.c | 30 +++++++++++++++++++++++++-----
+ 1 file changed, 25 insertions(+), 5 deletions(-)
+
+--- a/sound/pci/hda/hda_tegra.c
++++ b/sound/pci/hda/hda_tegra.c
+@@ -73,6 +73,7 @@ struct hda_tegra {
+ struct clk *hda2codec_2x_clk;
+ struct clk *hda2hdmi_clk;
+ void __iomem *regs;
++ struct work_struct probe_work;
+ };
+
+ #ifdef CONFIG_PM
+@@ -294,7 +295,9 @@ static int hda_tegra_dev_disconnect(stru
+ static int hda_tegra_dev_free(struct snd_device *device)
+ {
+ struct azx *chip = device->device_data;
++ struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+
++ cancel_work_sync(&hda->probe_work);
+ if (azx_bus(chip)->chip_init) {
+ azx_stop_all_streams(chip);
+ azx_stop_chip(chip);
+@@ -426,6 +429,9 @@ static int hda_tegra_first_init(struct a
+ /*
+ * constructor
+ */
++
++static void hda_tegra_probe_work(struct work_struct *work);
++
+ static int hda_tegra_create(struct snd_card *card,
+ unsigned int driver_caps,
+ struct hda_tegra *hda)
+@@ -452,6 +458,8 @@ static int hda_tegra_create(struct snd_c
+ chip->single_cmd = false;
+ chip->snoop = true;
+
++ INIT_WORK(&hda->probe_work, hda_tegra_probe_work);
++
+ err = azx_bus_init(chip, NULL, &hda_tegra_io_ops);
+ if (err < 0)
+ return err;
+@@ -499,6 +507,21 @@ static int hda_tegra_probe(struct platfo
+ card->private_data = chip;
+
+ dev_set_drvdata(&pdev->dev, card);
++ schedule_work(&hda->probe_work);
++
++ return 0;
++
++out_free:
++ snd_card_free(card);
++ return err;
++}
++
++static void hda_tegra_probe_work(struct work_struct *work)
++{
++ struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work);
++ struct azx *chip = &hda->chip;
++ struct platform_device *pdev = to_platform_device(hda->dev);
++ int err;
+
+ err = hda_tegra_first_init(chip, pdev);
+ if (err < 0)
+@@ -520,11 +543,8 @@ static int hda_tegra_probe(struct platfo
+ chip->running = 1;
+ snd_hda_set_power_save(&chip->bus, power_save * 1000);
+
+- return 0;
+-
+-out_free:
+- snd_card_free(card);
+- return err;
++ out_free:
++ return; /* no error return from async probe */
+ }
+
+ static int hda_tegra_remove(struct platform_device *pdev)
--- /dev/null
+From 225db5762dc1a35b26850477ffa06e5cd0097243 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 5 Oct 2015 16:55:09 +0200
+Subject: ALSA: synth: Fix conflicting OSS device registration on AWE32
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 225db5762dc1a35b26850477ffa06e5cd0097243 upstream.
+
+When OSS emulation is loaded on ISA SB AWE32 chip, we get now kernel
+warnings like:
+ WARNING: CPU: 0 PID: 2791 at fs/sysfs/dir.c:31 sysfs_warn_dup+0x51/0x80()
+ sysfs: cannot create duplicate filename '/devices/isa/sbawe.0/sound/card0/seq-oss-0-0'
+
+It's because both emux synth and opl3 drivers try to register their
+OSS device object with the same static index number 0. This hasn't
+been a big problem until the recent rewrite of device management code
+(that exposes sysfs at the same time), but it's been an obvious bug.
+
+This patch works around it just by using a different index number of
+emux synth object. There can be a more elegant way to fix, but it's
+enough for now, as this code won't be touched so often, in anyway.
+
+Reported-and-tested-by: Michael Shell <list1@michaelshell.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/synth/emux/emux_oss.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/sound/synth/emux/emux_oss.c
++++ b/sound/synth/emux/emux_oss.c
+@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *e
+ struct snd_seq_oss_reg *arg;
+ struct snd_seq_device *dev;
+
+- if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
++ /* using device#1 here for avoiding conflicts with OPL3 */
++ if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
+ sizeof(struct snd_seq_oss_reg), &dev) < 0)
+ return;
+
--- /dev/null
+From e74679b38c9417c1c524081121cdcdb36f82264d Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Fri, 25 Sep 2015 11:07:04 +0200
+Subject: ASoC: db1200: Fix DAI link format for db1300 and db1550
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit e74679b38c9417c1c524081121cdcdb36f82264d upstream.
+
+Commit b4508d0f95fa ("ASoC: db1200: Use static DAI format setup") switched
+the db1200 driver over to using static DAI format setup instead of a
+callback function. But the commit only added the dai_fmt field to one of
+the three DAI links in the driver. This breaks audio on db1300 and db1550.
+
+Add the two missing dai_fmt settings to fix the issue.
+
+Fixes: b4508d0f95fa ("ASoC: db1200: Use static DAI format setup")
+Reported-by: Manuel Lauss <manuel.lauss@gmail.com>
+Tested-by: Manuel Lauss <manuel.lauss@gmail.com>
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/au1x/db1200.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/sound/soc/au1x/db1200.c
++++ b/sound/soc/au1x/db1200.c
+@@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2
+ .cpu_dai_name = "au1xpsc_i2s.2",
+ .platform_name = "au1xpsc-pcm.2",
+ .codec_name = "wm8731.0-001b",
++ .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBM_CFM,
+ .ops = &db1200_i2s_wm8731_ops,
+ };
+
+@@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2
+ .cpu_dai_name = "au1xpsc_i2s.3",
+ .platform_name = "au1xpsc-pcm.3",
+ .codec_name = "wm8731.0-001b",
++ .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBM_CFM,
+ .ops = &db1200_i2s_wm8731_ops,
+ };
+
--- /dev/null
+From 4873867e5f2bd90faad861dd94865099fc3140f3 Mon Sep 17 00:00:00 2001
+From: Yitian Bu <buyitian@gmail.com>
+Date: Fri, 2 Oct 2015 15:18:41 +0800
+Subject: ASoC: dwc: correct irq clear method
+
+From: Yitian Bu <buyitian@gmail.com>
+
+commit 4873867e5f2bd90faad861dd94865099fc3140f3 upstream.
+
+from Designware I2S datasheet, tx/rx XRUN irq is cleared by
+reading register TOR/ROR, rather than by writing into them.
+
+Signed-off-by: Yitian Bu <yitian.bu@tangramtek.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/dwc/designware_i2s.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/dwc/designware_i2s.c
++++ b/sound/soc/dwc/designware_i2s.c
+@@ -131,10 +131,10 @@ static inline void i2s_clear_irqs(struct
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < 4; i++)
+- i2s_write_reg(dev->i2s_base, TOR(i), 0);
++ i2s_read_reg(dev->i2s_base, TOR(i));
+ } else {
+ for (i = 0; i < 4; i++)
+- i2s_write_reg(dev->i2s_base, ROR(i), 0);
++ i2s_read_reg(dev->i2s_base, ROR(i));
+ }
+ }
+
--- /dev/null
+From 3c8f7710c1c44fb650bc29b6ef78ed8b60cfaa28 Mon Sep 17 00:00:00 2001
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+Date: Tue, 15 Sep 2015 20:51:31 +0200
+Subject: ASoC: fix broken pxa SoC support
+
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+
+commit 3c8f7710c1c44fb650bc29b6ef78ed8b60cfaa28 upstream.
+
+The previous fix of pxa library support, which was introduced to fix the
+library dependency, broke the previous SoC behavior, where a machine
+code binding pxa2xx-ac97 with a coded relied on :
+ - sound/soc/pxa/pxa2xx-ac97.c
+ - sound/soc/codecs/XXX.c
+
+For example, the mioa701_wm9713.c machine code is currently broken. The
+"select ARM" statement wrongly selects the soc/arm/pxa2xx-ac97 for
+compilation, as per an unfortunate fate SND_PXA2XX_AC97 is both declared
+in sound/arm/Kconfig and sound/soc/pxa/Kconfig.
+
+Fix this by ensuring that SND_PXA2XX_SOC correctly triggers the correct
+pxa2xx-ac97 compilation.
+
+Fixes: 846172dfe33c ("ASoC: fix SND_PXA2XX_LIB Kconfig warning")
+Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/arm/Kconfig | 15 ++++++++-------
+ sound/soc/pxa/Kconfig | 2 --
+ 2 files changed, 8 insertions(+), 9 deletions(-)
+
+--- a/sound/arm/Kconfig
++++ b/sound/arm/Kconfig
+@@ -9,6 +9,14 @@ menuconfig SND_ARM
+ Drivers that are implemented on ASoC can be found in
+ "ALSA for SoC audio support" section.
+
++config SND_PXA2XX_LIB
++ tristate
++ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
++ select SND_DMAENGINE_PCM
++
++config SND_PXA2XX_LIB_AC97
++ bool
++
+ if SND_ARM
+
+ config SND_ARMAACI
+@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
+ tristate
+ select SND_PCM
+
+-config SND_PXA2XX_LIB
+- tristate
+- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+-
+-config SND_PXA2XX_LIB_AC97
+- bool
+-
+ config SND_PXA2XX_AC97
+ tristate "AC97 driver for the Intel PXA2xx chip"
+ depends on ARCH_PXA
+--- a/sound/soc/pxa/Kconfig
++++ b/sound/soc/pxa/Kconfig
+@@ -1,7 +1,6 @@
+ config SND_PXA2XX_SOC
+ tristate "SoC Audio for the Intel PXA2xx chip"
+ depends on ARCH_PXA
+- select SND_ARM
+ select SND_PXA2XX_LIB
+ help
+ Say Y or M if you want to add support for codecs attached to
+@@ -25,7 +24,6 @@ config SND_PXA2XX_AC97
+ config SND_PXA2XX_SOC_AC97
+ tristate
+ select AC97_BUS
+- select SND_ARM
+ select SND_PXA2XX_LIB_AC97
+ select SND_SOC_AC97_BUS
+
--- /dev/null
+From 8811191fdf7ed02ee07cb8469428158572d355a2 Mon Sep 17 00:00:00 2001
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+Date: Tue, 22 Sep 2015 21:20:22 +0200
+Subject: ASoC: pxa: pxa2xx-ac97: fix dma requestor lines
+
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+
+commit 8811191fdf7ed02ee07cb8469428158572d355a2 upstream.
+
+PCM receive and transmit DMA requestor lines were reverted, breaking the
+PCM playback interface for PXA platforms using the sound/soc/ variant
+instead of the sound/arm variant.
+
+The commit below shows the inversion in the requestor lines.
+
+Fixes: d65a14587a9b ("ASoC: pxa: use snd_dmaengine_dai_dma_data")
+Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/pxa/pxa2xx-ac97.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/pxa/pxa2xx-ac97.c
++++ b/sound/soc/pxa/pxa2xx-ac97.c
+@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac
+ .reset = pxa2xx_ac97_cold_reset,
+ };
+
+-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
++static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data
+ .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
+ };
+
+-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
++static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
--- /dev/null
+From e256da84a04ea31c3c215997c847609af224e8f4 Mon Sep 17 00:00:00 2001
+From: Gianluca Renzi <gianlucarenzi@eurekelettronica.it>
+Date: Fri, 25 Sep 2015 21:33:41 +0200
+Subject: ASoC: sgtl5000: fix wrong register MIC_BIAS_VOLTAGE setup on probe
+
+From: Gianluca Renzi <gianlucarenzi@eurekelettronica.it>
+
+commit e256da84a04ea31c3c215997c847609af224e8f4 upstream.
+
+Signed-off-by: Gianluca Renzi <gianlucarenzi@eurekelettronica.it>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/sgtl5000.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1377,8 +1377,8 @@ static int sgtl5000_probe(struct snd_soc
+ sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
+- SGTL5000_BIAS_R_MASK,
+- sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
++ SGTL5000_BIAS_VOLT_MASK,
++ sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
+ /*
+ * disable DAP
+ * TODO:
--- /dev/null
+From e2600460bc3aa14ca1df86318a327cbbabedf9a8 Mon Sep 17 00:00:00 2001
+From: Andreas Dannenberg <dannenberg@ti.com>
+Date: Mon, 5 Oct 2015 15:00:14 -0500
+Subject: ASoC: tas2552: fix dBscale-min declaration
+
+From: Andreas Dannenberg <dannenberg@ti.com>
+
+commit e2600460bc3aa14ca1df86318a327cbbabedf9a8 upstream.
+
+The minimum volume level for the TAS2552 (control register value 0x00)
+is -7dB however the driver declares it as -0.07dB.
+
+Running amixer before the patch reports:
+dBscale-min=-0.07dB,step=1.00dB,mute=0
+
+Running amixer with the patch applied reports:
+dBscale-min=-7.00dB,step=1.00dB,mute=0
+
+Signed-off-by: Andreas Dannenberg <dannenberg@ti.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/tas2552.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/tas2552.c
++++ b/sound/soc/codecs/tas2552.c
+@@ -551,7 +551,7 @@ static struct snd_soc_dai_driver tas2552
+ /*
+ * DAC digital volumes. From -7 to 24 dB in 1 dB steps
+ */
+-static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0);
++static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0);
+
+ static const char * const tas2552_din_source_select[] = {
+ "Muted",
--- /dev/null
+From 5e55e3cbd1042cffa6249f22c10585e63f8a29bf Mon Sep 17 00:00:00 2001
+From: Michal Kazior <michal.kazior@tieto.com>
+Date: Wed, 19 Aug 2015 13:10:43 +0200
+Subject: ath10k: fix dma_mapping_error() handling
+
+From: Michal Kazior <michal.kazior@tieto.com>
+
+commit 5e55e3cbd1042cffa6249f22c10585e63f8a29bf upstream.
+
+The function returns 1 when DMA mapping fails. The
+driver would return bogus values and could
+possibly confuse itself if DMA failed.
+
+Fixes: 767d34fc67af ("ath10k: remove DMA mapping wrappers")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
+Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath10k/htc.c | 4 +++-
+ drivers/net/wireless/ath/ath10k/htt_tx.c | 8 ++++++--
+ drivers/net/wireless/ath/ath10k/pci.c | 8 ++++++--
+ drivers/net/wireless/ath/ath10k/wmi.c | 1 +
+ 4 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -145,8 +145,10 @@ int ath10k_htc_send(struct ath10k_htc *h
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+- if (ret)
++ if (ret) {
++ ret = -EIO;
+ goto err_credits;
++ }
+
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = skb;
+--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
+@@ -371,8 +371,10 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+- if (res)
++ if (res) {
++ res = -EIO;
+ goto err_free_txdesc;
++ }
+
+ skb_put(txdesc, len);
+ cmd = (struct htt_cmd *)txdesc->data;
+@@ -456,8 +458,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+- if (res)
++ if (res) {
++ res = -EIO;
+ goto err_free_txbuf;
++ }
+
+ switch (skb_cb->txmode) {
+ case ATH10K_HW_TXRX_RAW:
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1546,8 +1546,10 @@ static int ath10k_pci_hif_exchange_bmi_m
+
+ req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, req_paddr);
+- if (ret)
++ if (ret) {
++ ret = -EIO;
+ goto err_dma;
++ }
+
+ if (resp && resp_len) {
+ tresp = kzalloc(*resp_len, GFP_KERNEL);
+@@ -1559,8 +1561,10 @@ static int ath10k_pci_hif_exchange_bmi_m
+ resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(ar->dev, resp_paddr);
+- if (ret)
++ if (ret) {
++ ret = EIO;
+ goto err_req;
++ }
+
+ xfer.wait_for_resp = true;
+ xfer.resp_len = 0;
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -2391,6 +2391,7 @@ void ath10k_wmi_event_host_swba(struct a
+ ath10k_warn(ar, "failed to map beacon: %d\n",
+ ret);
+ dev_kfree_skb_any(bcn);
++ ret = -EIO;
+ goto skip;
+ }
+
--- /dev/null
+From 005efedf2c7d0a270ffbe28d8997b03844f3e3e7 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Mon, 14 Sep 2015 09:09:31 +0100
+Subject: Btrfs: fix read corruption of compressed and shared extents
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 005efedf2c7d0a270ffbe28d8997b03844f3e3e7 upstream.
+
+If a file has a range pointing to a compressed extent, followed by
+another range that points to the same compressed extent and a read
+operation attempts to read both ranges (either completely or part of
+them), the pages that correspond to the second range are incorrectly
+filled with zeroes.
+
+Consider the following example:
+
+ File layout
+ [0 - 8K] [8K - 24K]
+ | |
+ | |
+ points to extent X, points to extent X,
+ offset 4K, length of 8K offset 0, length 16K
+
+ [extent X, compressed length = 4K uncompressed length = 16K]
+
+If a readpages() call spans the 2 ranges, a single bio to read the extent
+is submitted - extent_io.c:submit_extent_page() would only create a new
+bio to cover the second range pointing to the extent if the extent it
+points to had a different logical address than the extent associated with
+the first range. This has a consequence of the compressed read end io
+handler (compression.c:end_compressed_bio_read()) finish once the extent
+is decompressed into the pages covering the first range, leaving the
+remaining pages (belonging to the second range) filled with zeroes (done
+by compression.c:btrfs_clear_biovec_end()).
+
+So fix this by submitting the current bio whenever we find a range
+pointing to a compressed extent that was preceded by a range with a
+different extent map. This is the simplest solution for this corner
+case. Making the end io callback populate both ranges (or more, if we
+have multiple pointing to the same extent) is a much more complex
+solution since each bio is tightly coupled with a single extent map and
+the extent maps associated to the ranges pointing to the shared extent
+can have different offsets and lengths.
+
+The following test case for fstests triggers the issue:
+
+ seq=`basename $0`
+ seqres=$RESULT_DIR/$seq
+ echo "QA output created by $seq"
+ tmp=/tmp/$$
+ status=1 # failure is the default!
+ trap "_cleanup; exit \$status" 0 1 2 3 15
+
+ _cleanup()
+ {
+ rm -f $tmp.*
+ }
+
+ # get standard environment, filters and checks
+ . ./common/rc
+ . ./common/filter
+
+ # real QA test starts here
+ _need_to_be_root
+ _supported_fs btrfs
+ _supported_os Linux
+ _require_scratch
+ _require_cloner
+
+ rm -f $seqres.full
+
+ test_clone_and_read_compressed_extent()
+ {
+ local mount_opts=$1
+
+ _scratch_mkfs >>$seqres.full 2>&1
+ _scratch_mount $mount_opts
+
+ # Create a test file with a single extent that is compressed (the
+ # data we write into it is highly compressible no matter which
+ # compression algorithm is used, zlib or lzo).
+ $XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 4K" \
+ -c "pwrite -S 0xbb 4K 8K" \
+ -c "pwrite -S 0xcc 12K 4K" \
+ $SCRATCH_MNT/foo | _filter_xfs_io
+
+ # Now clone our extent into an adjacent offset.
+ $CLONER_PROG -s $((4 * 1024)) -d $((16 * 1024)) -l $((8 * 1024)) \
+ $SCRATCH_MNT/foo $SCRATCH_MNT/foo
+
+ # Same as before but for this file we clone the extent into a lower
+ # file offset.
+ $XFS_IO_PROG -f -c "pwrite -S 0xaa 8K 4K" \
+ -c "pwrite -S 0xbb 12K 8K" \
+ -c "pwrite -S 0xcc 20K 4K" \
+ $SCRATCH_MNT/bar | _filter_xfs_io
+
+ $CLONER_PROG -s $((12 * 1024)) -d 0 -l $((8 * 1024)) \
+ $SCRATCH_MNT/bar $SCRATCH_MNT/bar
+
+ echo "File digests before unmounting filesystem:"
+ md5sum $SCRATCH_MNT/foo | _filter_scratch
+ md5sum $SCRATCH_MNT/bar | _filter_scratch
+
+ # Evicting the inode or clearing the page cache before reading
+ # again the file would also trigger the bug - reads were returning
+ # all bytes in the range corresponding to the second reference to
+ # the extent with a value of 0, but the correct data was persisted
+ # (it was a bug exclusively in the read path). The issue happened
+ # only if the same readpages() call targeted pages belonging to the
+ # first and second ranges that point to the same compressed extent.
+ _scratch_remount
+
+ echo "File digests after mounting filesystem again:"
+ # Must match the same digests we got before.
+ md5sum $SCRATCH_MNT/foo | _filter_scratch
+ md5sum $SCRATCH_MNT/bar | _filter_scratch
+ }
+
+ echo -e "\nTesting with zlib compression..."
+ test_clone_and_read_compressed_extent "-o compress=zlib"
+
+ _scratch_unmount
+
+ echo -e "\nTesting with lzo compression..."
+ test_clone_and_read_compressed_extent "-o compress=lzo"
+
+ status=0
+ exit
+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: Qu Wenruo<quwenruo@cn.fujitsu.com>
+Reviewed-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 65 ++++++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 57 insertions(+), 8 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2798,7 +2798,8 @@ static int submit_extent_page(int rw, st
+ bio_end_io_t end_io_func,
+ int mirror_num,
+ unsigned long prev_bio_flags,
+- unsigned long bio_flags)
++ unsigned long bio_flags,
++ bool force_bio_submit)
+ {
+ int ret = 0;
+ struct bio *bio;
+@@ -2816,6 +2817,7 @@ static int submit_extent_page(int rw, st
+ contig = bio_end_sector(bio) == sector;
+
+ if (prev_bio_flags != bio_flags || !contig ||
++ force_bio_submit ||
+ merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
+ bio_add_page(bio, page, page_size, offset) < page_size) {
+ ret = submit_one_bio(rw, bio, mirror_num,
+@@ -2909,7 +2911,8 @@ static int __do_readpage(struct extent_i
+ get_extent_t *get_extent,
+ struct extent_map **em_cached,
+ struct bio **bio, int mirror_num,
+- unsigned long *bio_flags, int rw)
++ unsigned long *bio_flags, int rw,
++ u64 *prev_em_start)
+ {
+ struct inode *inode = page->mapping->host;
+ u64 start = page_offset(page);
+@@ -2957,6 +2960,7 @@ static int __do_readpage(struct extent_i
+ }
+ while (cur <= end) {
+ unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
++ bool force_bio_submit = false;
+
+ if (cur >= last_byte) {
+ char *userpage;
+@@ -3007,6 +3011,49 @@ static int __do_readpage(struct extent_i
+ block_start = em->block_start;
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ block_start = EXTENT_MAP_HOLE;
++
++ /*
++ * If we have a file range that points to a compressed extent
++ * and it's followed by a consecutive file range that points to
++ * to the same compressed extent (possibly with a different
++ * offset and/or length, so it either points to the whole extent
++ * or only part of it), we must make sure we do not submit a
++ * single bio to populate the pages for the 2 ranges because
++ * this makes the compressed extent read zero out the pages
++ * belonging to the 2nd range. Imagine the following scenario:
++ *
++ * File layout
++ * [0 - 8K] [8K - 24K]
++ * | |
++ * | |
++ * points to extent X, points to extent X,
++ * offset 4K, length of 8K offset 0, length 16K
++ *
++ * [extent X, compressed length = 4K uncompressed length = 16K]
++ *
++ * If the bio to read the compressed extent covers both ranges,
++ * it will decompress extent X into the pages belonging to the
++ * first range and then it will stop, zeroing out the remaining
++ * pages that belong to the other range that points to extent X.
++ * So here we make sure we submit 2 bios, one for the first
++ * range and another one for the third range. Both will target
++ * the same physical extent from disk, but we can't currently
++ * make the compressed bio endio callback populate the pages
++ * for both ranges because each compressed bio is tightly
++ * coupled with a single extent map, and each range can have
++ * an extent map with a different offset value relative to the
++ * uncompressed data of our extent and different lengths. This
++ * is a corner case so we prioritize correctness over
++ * non-optimal behavior (submitting 2 bios for the same extent).
++ */
++ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
++ prev_em_start && *prev_em_start != (u64)-1 &&
++ *prev_em_start != em->orig_start)
++ force_bio_submit = true;
++
++ if (prev_em_start)
++ *prev_em_start = em->orig_start;
++
+ free_extent_map(em);
+ em = NULL;
+
+@@ -3056,7 +3103,8 @@ static int __do_readpage(struct extent_i
+ bdev, bio, pnr,
+ end_bio_extent_readpage, mirror_num,
+ *bio_flags,
+- this_bio_flag);
++ this_bio_flag,
++ force_bio_submit);
+ if (!ret) {
+ nr++;
+ *bio_flags = this_bio_flag;
+@@ -3088,6 +3136,7 @@ static inline void __do_contiguous_readp
+ struct inode *inode;
+ struct btrfs_ordered_extent *ordered;
+ int index;
++ u64 prev_em_start = (u64)-1;
+
+ inode = pages[0]->mapping->host;
+ while (1) {
+@@ -3103,7 +3152,7 @@ static inline void __do_contiguous_readp
+
+ for (index = 0; index < nr_pages; index++) {
+ __do_readpage(tree, pages[index], get_extent, em_cached, bio,
+- mirror_num, bio_flags, rw);
++ mirror_num, bio_flags, rw, &prev_em_start);
+ page_cache_release(pages[index]);
+ }
+ }
+@@ -3171,7 +3220,7 @@ static int __extent_read_full_page(struc
+ }
+
+ ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
+- bio_flags, rw);
++ bio_flags, rw, NULL);
+ return ret;
+ }
+
+@@ -3197,7 +3246,7 @@ int extent_read_full_page_nolock(struct
+ int ret;
+
+ ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
+- &bio_flags, READ);
++ &bio_flags, READ, NULL);
+ if (bio)
+ ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
+ return ret;
+@@ -3450,7 +3499,7 @@ static noinline_for_stack int __extent_w
+ sector, iosize, pg_offset,
+ bdev, &epd->bio, max_nr,
+ end_bio_extent_writepage,
+- 0, 0, 0);
++ 0, 0, 0, false);
+ if (ret)
+ SetPageError(page);
+ }
+@@ -3752,7 +3801,7 @@ static noinline_for_stack int write_one_
+ ret = submit_extent_page(rw, tree, p, offset >> 9,
+ PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+ -1, end_bio_extent_buffer_writepage,
+- 0, epd->bio_flags, bio_flags);
++ 0, epd->bio_flags, bio_flags, false);
+ epd->bio_flags = bio_flags;
+ if (ret) {
+ set_btree_ioerr(p);
--- /dev/null
+From a30e577c96f59b1e1678ea5462432b09bf7d5cbc Mon Sep 17 00:00:00 2001
+From: Jeff Mahoney <jeffm@suse.com>
+Date: Fri, 11 Sep 2015 21:44:17 -0400
+Subject: btrfs: skip waiting on ordered range for special files
+
+From: Jeff Mahoney <jeffm@suse.com>
+
+commit a30e577c96f59b1e1678ea5462432b09bf7d5cbc upstream.
+
+In btrfs_evict_inode, we properly truncate the page cache for evicted
+inodes but then we call btrfs_wait_ordered_range for every inode as well.
+It's the right thing to do for regular files but results in incorrect
+behavior for device inodes for block devices.
+
+filemap_fdatawrite_range gets called with inode->i_mapping which gets
+resolved to the block device inode before getting passed to
+wbc_attach_fdatawrite_inode and ultimately to inode_to_bdi. What happens
+next depends on whether there's an open file handle associated with the
+inode. If there is, we write to the block device, which is unexpected
+behavior. If there isn't, we through normally and inode->i_data is used.
+We can also end up racing against open/close which can result in crashes
+when i_mapping points to a block device inode that has been closed.
+
+Since there can't be any page cache associated with special file inodes,
+it's safe to skip the btrfs_wait_ordered_range call entirely and avoid
+the problem.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=100911
+Tested-by: Christoph Biedl <linux-kernel.bfrz@manchmal.in-ulm.de>
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/inode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5051,7 +5051,8 @@ void btrfs_evict_inode(struct inode *ino
+ goto no_delete;
+ }
+ /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
+- btrfs_wait_ordered_range(inode, 0, (u64)-1);
++ if (!special_file(inode->i_mode))
++ btrfs_wait_ordered_range(inode, 0, (u64)-1);
+
+ btrfs_free_io_failure_record(inode, 0, (u64)-1);
+
--- /dev/null
+From 808f80b46790f27e145c72112189d6a3be2bc884 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Mon, 28 Sep 2015 09:56:26 +0100
+Subject: Btrfs: update fix for read corruption of compressed and shared extents
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 808f80b46790f27e145c72112189d6a3be2bc884 upstream.
+
+My previous fix in commit 005efedf2c7d ("Btrfs: fix read corruption of
+compressed and shared extents") was effective only if the compressed
+extents cover a file range with a length that is not a multiple of 16
+pages. That's because the detection of when we reached a different range
+of the file that shares the same compressed extent as the previously
+processed range was done at extent_io.c:__do_contiguous_readpages(),
+which covers subranges with a length up to 16 pages, because
+extent_readpages() groups the pages in clusters no larger than 16 pages.
+So fix this by tracking the start of the previously processed file
+range's extent map at extent_readpages().
+
+The following test case for fstests reproduces the issue:
+
+ seq=`basename $0`
+ seqres=$RESULT_DIR/$seq
+ echo "QA output created by $seq"
+ tmp=/tmp/$$
+ status=1 # failure is the default!
+ trap "_cleanup; exit \$status" 0 1 2 3 15
+
+ _cleanup()
+ {
+ rm -f $tmp.*
+ }
+
+ # get standard environment, filters and checks
+ . ./common/rc
+ . ./common/filter
+
+ # real QA test starts here
+ _need_to_be_root
+ _supported_fs btrfs
+ _supported_os Linux
+ _require_scratch
+ _require_cloner
+
+ rm -f $seqres.full
+
+ test_clone_and_read_compressed_extent()
+ {
+ local mount_opts=$1
+
+ _scratch_mkfs >>$seqres.full 2>&1
+ _scratch_mount $mount_opts
+
+ # Create our test file with a single extent of 64Kb that is going to
+ # be compressed no matter which compression algo is used (zlib/lzo).
+ $XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 64K" \
+ $SCRATCH_MNT/foo | _filter_xfs_io
+
+ # Now clone the compressed extent into an adjacent file offset.
+ $CLONER_PROG -s 0 -d $((64 * 1024)) -l $((64 * 1024)) \
+ $SCRATCH_MNT/foo $SCRATCH_MNT/foo
+
+ echo "File digest before unmount:"
+ md5sum $SCRATCH_MNT/foo | _filter_scratch
+
+ # Remount the fs or clear the page cache to trigger the bug in
+ # btrfs. Because the extent has an uncompressed length that is a
+ # multiple of 16 pages, all the pages belonging to the second range
+ # of the file (64K to 128K), which points to the same extent as the
+ # first range (0K to 64K), had their contents full of zeroes instead
+ # of the byte 0xaa. This was a bug exclusively in the read path of
+ # compressed extents, the correct data was stored on disk, btrfs
+ # just failed to fill in the pages correctly.
+ _scratch_remount
+
+ echo "File digest after remount:"
+ # Must match the digest we got before.
+ md5sum $SCRATCH_MNT/foo | _filter_scratch
+ }
+
+ echo -e "\nTesting with zlib compression..."
+ test_clone_and_read_compressed_extent "-o compress=zlib"
+
+ _scratch_unmount
+
+ echo -e "\nTesting with lzo compression..."
+ test_clone_and_read_compressed_extent "-o compress=lzo"
+
+ status=0
+ exit
+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Tested-by: Timofey Titovets <nefelim4ag@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3131,12 +3131,12 @@ static inline void __do_contiguous_readp
+ get_extent_t *get_extent,
+ struct extent_map **em_cached,
+ struct bio **bio, int mirror_num,
+- unsigned long *bio_flags, int rw)
++ unsigned long *bio_flags, int rw,
++ u64 *prev_em_start)
+ {
+ struct inode *inode;
+ struct btrfs_ordered_extent *ordered;
+ int index;
+- u64 prev_em_start = (u64)-1;
+
+ inode = pages[0]->mapping->host;
+ while (1) {
+@@ -3152,7 +3152,7 @@ static inline void __do_contiguous_readp
+
+ for (index = 0; index < nr_pages; index++) {
+ __do_readpage(tree, pages[index], get_extent, em_cached, bio,
+- mirror_num, bio_flags, rw, &prev_em_start);
++ mirror_num, bio_flags, rw, prev_em_start);
+ page_cache_release(pages[index]);
+ }
+ }
+@@ -3162,7 +3162,8 @@ static void __extent_readpages(struct ex
+ int nr_pages, get_extent_t *get_extent,
+ struct extent_map **em_cached,
+ struct bio **bio, int mirror_num,
+- unsigned long *bio_flags, int rw)
++ unsigned long *bio_flags, int rw,
++ u64 *prev_em_start)
+ {
+ u64 start = 0;
+ u64 end = 0;
+@@ -3183,7 +3184,7 @@ static void __extent_readpages(struct ex
+ index - first_index, start,
+ end, get_extent, em_cached,
+ bio, mirror_num, bio_flags,
+- rw);
++ rw, prev_em_start);
+ start = page_start;
+ end = start + PAGE_CACHE_SIZE - 1;
+ first_index = index;
+@@ -3194,7 +3195,8 @@ static void __extent_readpages(struct ex
+ __do_contiguous_readpages(tree, &pages[first_index],
+ index - first_index, start,
+ end, get_extent, em_cached, bio,
+- mirror_num, bio_flags, rw);
++ mirror_num, bio_flags, rw,
++ prev_em_start);
+ }
+
+ static int __extent_read_full_page(struct extent_io_tree *tree,
+@@ -4205,6 +4207,7 @@ int extent_readpages(struct extent_io_tr
+ struct page *page;
+ struct extent_map *em_cached = NULL;
+ int nr = 0;
++ u64 prev_em_start = (u64)-1;
+
+ for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+ page = list_entry(pages->prev, struct page, lru);
+@@ -4221,12 +4224,12 @@ int extent_readpages(struct extent_io_tr
+ if (nr < ARRAY_SIZE(pagepool))
+ continue;
+ __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+- &bio, 0, &bio_flags, READ);
++ &bio, 0, &bio_flags, READ, &prev_em_start);
+ nr = 0;
+ }
+ if (nr)
+ __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+- &bio, 0, &bio_flags, READ);
++ &bio, 0, &bio_flags, READ, &prev_em_start);
+
+ if (em_cached)
+ free_extent_map(em_cached);
--- /dev/null
+From 586b286b110e94eb31840ac5afc0c24e0881fe34 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Wed, 9 Sep 2015 21:34:51 -0400
+Subject: dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 586b286b110e94eb31840ac5afc0c24e0881fe34 upstream.
+
+Setting the dm-crypt device's max_segment_size to PAGE_SIZE is an
+unfortunate constraint that is required to avoid the potential for
+exceeding dm-crypt's underlying device's max_segments limits -- due to
+crypt_alloc_buffer() possibly allocating pages for the encryption bio
+that are not as physically contiguous as the original bio.
+
+It is interesting to note that this problem was already fixed back in
+2007 via commit 91e106259 ("dm crypt: use bio_add_page"). But Linux 4.0
+commit cf2f1abfb ("dm crypt: don't allocate pages for a partial
+request") regressed dm-crypt back to _not_ using bio_add_page(). But
+given dm-crypt's cpu parallelization changes all depend on commit
+cf2f1abfb's abandoning of the more complex io fragments processing that
+dm-crypt previously had we cannot easily go back to using
+bio_add_page().
+
+So all said the cleanest way to resolve this issue is to fix dm-crypt to
+properly constrain the original bios entering dm-crypt so the encryption
+bios that dm-crypt generates from the original bios are always
+compatible with the underlying device's max_segments queue limits.
+
+It should be noted that technically Linux 4.3 does _not_ need this fix
+because of the block core's new late bio-splitting capability. But, it
+is reasoned, there is little to be gained by having the block core split
+the encrypted bio that is composed of PAGE_SIZE segments. That said, in
+the future we may revert this change.
+
+Fixes: cf2f1abfb ("dm crypt: don't allocate pages for a partial request")
+Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=104421
+Suggested-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(stru
+
+ /*
+ * Generate a new unfragmented bio with the given size
+- * This should never violate the device limitations
++ * This should never violate the device limitations (but only because
++ * max_segment_size is being constrained to PAGE_SIZE).
+ *
+ * This function may be called concurrently. If we allocate from the mempool
+ * concurrently, there is a possibility of deadlock. For example, if we have
+@@ -2058,9 +2059,20 @@ static int crypt_iterate_devices(struct
+ return fn(ti, cc->dev, cc->start, ti->len, data);
+ }
+
++static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
++{
++ /*
++ * Unfortunate constraint that is required to avoid the potential
++ * for exceeding underlying device's max_segments limits -- due to
++ * crypt_alloc_buffer() possibly allocating pages for the encryption
++ * bio that are not as physically contiguous as the original bio.
++ */
++ limits->max_segment_size = PAGE_SIZE;
++}
++
+ static struct target_type crypt_target = {
+ .name = "crypt",
+- .version = {1, 14, 0},
++ .version = {1, 14, 1},
+ .module = THIS_MODULE,
+ .ctr = crypt_ctr,
+ .dtr = crypt_dtr,
+@@ -2072,6 +2084,7 @@ static struct target_type crypt_target =
+ .message = crypt_message,
+ .merge = crypt_merge,
+ .iterate_devices = crypt_iterate_devices,
++ .io_hints = crypt_io_hints,
+ };
+
+ static int __init dm_crypt_init(void)
--- /dev/null
+From 042745ee53a0a7c1f5aff191a4a24213c6dcfb52 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 2 Oct 2015 11:17:37 -0400
+Subject: dm raid: fix round up of default region size
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 042745ee53a0a7c1f5aff191a4a24213c6dcfb52 upstream.
+
+Commit 3a0f9aaee028 ("dm raid: round region_size to power of two")
+intended to make sure that the default region size is a power of two.
+However, the logic in that commit is incorrect and sets the variable
+region_size to 0 or 1, depending on whether min_region_size is a power
+of two.
+
+Fix this logic, using roundup_pow_of_two(), so that region_size is
+properly rounded up to the next power of two.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Fixes: 3a0f9aaee028 ("dm raid: round region_size to power of two")
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-raid.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -329,8 +329,7 @@ static int validate_region_size(struct r
+ */
+ if (min_region_size > (1 << 13)) {
+ /* If not a power of 2, make it the next power of 2 */
+- if (min_region_size & (min_region_size - 1))
+- region_size = 1 << fls(region_size);
++ region_size = roundup_pow_of_two(min_region_size);
+ DMINFO("Choosing default region size of %lu sectors",
+ region_size);
+ } else {
--- /dev/null
+From 216076705d6ac291d42e0f8dd85e6a0da98c0fa3 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Tue, 8 Sep 2015 08:56:13 -0400
+Subject: dm thin: disable discard support for thin devices if pool's is disabled
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 216076705d6ac291d42e0f8dd85e6a0da98c0fa3 upstream.
+
+If the pool is configured with 'ignore_discard' its discard support is
+disabled. The pool's thin devices should also have queue_limits that
+reflect discards are disabled.
+
+Fixes: 34fbcf62 ("dm thin: range discard support")
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -4333,6 +4333,10 @@ static void thin_io_hints(struct dm_targ
+ {
+ struct thin_c *tc = ti->private;
+ struct pool *pool = tc->pool;
++ struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
++
++ if (!pool_limits->discard_granularity)
++ return; /* pool's discard support is disabled */
+
+ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+ limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
--- /dev/null
+From d046b770c9fc36ccb19c27afdb8322220108cbc7 Mon Sep 17 00:00:00 2001
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Date: Tue, 22 Sep 2015 14:59:20 -0700
+Subject: lib/iommu-common.c: do not try to deref a null iommu->lazy_flush() pointer when n < pool->hint
+
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+
+commit d046b770c9fc36ccb19c27afdb8322220108cbc7 upstream.
+
+The check for invoking iommu->lazy_flush() from iommu_tbl_range_alloc()
+has to be refactored so that we only call ->lazy_flush() if it is
+non-null.
+
+I had a sparc kernel that was crashing when I was trying to process some
+very large perf.data files- the crash happens when the scsi driver calls
+into dma_4v_map_sg and thus the iommu_tbl_range_alloc().
+
+Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: David S. Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/iommu-common.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/lib/iommu-common.c
++++ b/lib/iommu-common.c
+@@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iomm
+
+ static inline bool need_flush(struct iommu_map_table *iommu)
+ {
+- return (iommu->lazy_flush != NULL &&
+- (iommu->flags & IOMMU_NEED_FLUSH) != 0);
++ return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
+ }
+
+ static inline void set_flush(struct iommu_map_table *iommu)
+@@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(stru
+ goto bail;
+ }
+ }
+- if (n < pool->hint || need_flush(iommu)) {
++ if (iommu->lazy_flush &&
++ (n < pool->hint || need_flush(iommu))) {
+ clear_flush(iommu);
+ iommu->lazy_flush(iommu);
+ }
--- /dev/null
+From 66eefe5de11db1e0d8f2edc3880d50e7c36a9d43 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Thu, 24 Sep 2015 15:47:47 +1000
+Subject: md/raid0: apply base queue limits *before* disk_stack_limits
+
+From: NeilBrown <neilb@suse.com>
+
+commit 66eefe5de11db1e0d8f2edc3880d50e7c36a9d43 upstream.
+
+Calling e.g. blk_queue_max_hw_sectors() after calls to
+disk_stack_limits() discards the settings determined by
+disk_stack_limits().
+So we need to make those calls first.
+
+Fixes: 199dc6ed5179 ("md/raid0: update queue parameter in a safer location.")
+Reported-by: Jes Sorensen <Jes.Sorensen@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid0.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -431,12 +431,6 @@ static int raid0_run(struct mddev *mddev
+ struct md_rdev *rdev;
+ bool discard_supported = false;
+
+- rdev_for_each(rdev, mddev) {
+- disk_stack_limits(mddev->gendisk, rdev->bdev,
+- rdev->data_offset << 9);
+- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+- discard_supported = true;
+- }
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+@@ -445,6 +439,12 @@ static int raid0_run(struct mddev *mddev
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
++ rdev_for_each(rdev, mddev) {
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->data_offset << 9);
++ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
++ discard_supported = true;
++ }
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
--- /dev/null
+From 199dc6ed5179251fa6158a461499c24bdd99c836 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Mon, 3 Aug 2015 13:11:47 +1000
+Subject: md/raid0: update queue parameter in a safer location.
+
+From: NeilBrown <neilb@suse.com>
+
+commit 199dc6ed5179251fa6158a461499c24bdd99c836 upstream.
+
+When a (e.g.) RAID5 array is reshaped to RAID0, the updating
+of queue parameters (e.g. max number of sectors per bio) is
+done in the wrong place.
+It should be part of ->run, but it is actually part of ->takeover.
+This means it happens before level_store() calls:
+
+ blk_set_stacking_limits(&mddev->queue->limits);
+
+and so it ineffective. This can lead to errors from underlying
+devices.
+
+So move all the relevant settings out of create_stripe_zones()
+and into raid0_run().
+
+As this can lead to a bug-on it is suitable for any -stable
+kernel which supports reshape to RAID0. So 2.6.35 or later.
+As the bug has been present for five years there is no urgency,
+so no need to rush into -stable.
+
+Fixes: 9af204cf720c ("md: Add support for Raid5->Raid0 and Raid10->Raid0 takeover")
+Reported-by: Yi Zhang <yizhan@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid0.c | 75 +++++++++++++++++++++++++++--------------------------
+ 1 file changed, 39 insertions(+), 36 deletions(-)
+
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -83,7 +83,7 @@ static int create_strip_zones(struct mdd
+ char b[BDEVNAME_SIZE];
+ char b2[BDEVNAME_SIZE];
+ struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+- bool discard_supported = false;
++ unsigned short blksize = 512;
+
+ if (!conf)
+ return -ENOMEM;
+@@ -98,6 +98,9 @@ static int create_strip_zones(struct mdd
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev1->sectors = sectors * mddev->chunk_sectors;
+
++ blksize = max(blksize, queue_logical_block_size(
++ rdev1->bdev->bd_disk->queue));
++
+ rdev_for_each(rdev2, mddev) {
+ pr_debug("md/raid0:%s: comparing %s(%llu)"
+ " with %s(%llu)\n",
+@@ -134,6 +137,18 @@ static int create_strip_zones(struct mdd
+ }
+ pr_debug("md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
++ /*
++ * now since we have the hard sector sizes, we can make sure
++ * chunk size is a multiple of that sector size
++ */
++ if ((mddev->chunk_sectors << 9) % blksize) {
++ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
++ mdname(mddev),
++ mddev->chunk_sectors << 9, blksize);
++ err = -EINVAL;
++ goto abort;
++ }
++
+ err = -ENOMEM;
+ conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
+ conf->nr_strip_zones, GFP_KERNEL);
+@@ -188,19 +203,12 @@ static int create_strip_zones(struct mdd
+ }
+ dev[j] = rdev1;
+
+- if (mddev->queue)
+- disk_stack_limits(mddev->gendisk, rdev1->bdev,
+- rdev1->data_offset << 9);
+-
+ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
+ conf->has_merge_bvec = 1;
+
+ if (!smallest || (rdev1->sectors < smallest->sectors))
+ smallest = rdev1;
+ cnt++;
+-
+- if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
+- discard_supported = true;
+ }
+ if (cnt != mddev->raid_disks) {
+ printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+@@ -261,28 +269,6 @@ static int create_strip_zones(struct mdd
+ (unsigned long long)smallest->sectors);
+ }
+
+- /*
+- * now since we have the hard sector sizes, we can make sure
+- * chunk size is a multiple of that sector size
+- */
+- if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
+- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
+- mdname(mddev),
+- mddev->chunk_sectors << 9);
+- goto abort;
+- }
+-
+- if (mddev->queue) {
+- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+- blk_queue_io_opt(mddev->queue,
+- (mddev->chunk_sectors << 9) * mddev->raid_disks);
+-
+- if (!discard_supported)
+- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+- else
+- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+- }
+-
+ pr_debug("md/raid0:%s: done.\n", mdname(mddev));
+ *private_conf = conf;
+
+@@ -433,12 +419,6 @@ static int raid0_run(struct mddev *mddev
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+
+- if (mddev->queue) {
+- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+- }
+-
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+ ret = create_strip_zones(mddev, &conf);
+@@ -447,6 +427,29 @@ static int raid0_run(struct mddev *mddev
+ mddev->private = conf;
+ }
+ conf = mddev->private;
++ if (mddev->queue) {
++ struct md_rdev *rdev;
++ bool discard_supported = false;
++
++ rdev_for_each(rdev, mddev) {
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->data_offset << 9);
++ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
++ discard_supported = true;
++ }
++ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
++ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
++ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
++
++ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
++ blk_queue_io_opt(mddev->queue,
++ (mddev->chunk_sectors << 9) * mddev->raid_disks);
++
++ if (!discard_supported)
++ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
++ else
++ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
++ }
+
+ /* calculate array device size */
+ md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
--- /dev/null
+From 0610c25daa3e76e38ad5a8fae683a89ff9f71798 Mon Sep 17 00:00:00 2001
+From: Greg Thelen <gthelen@google.com>
+Date: Thu, 1 Oct 2015 15:37:02 -0700
+Subject: memcg: fix dirty page migration
+
+From: Greg Thelen <gthelen@google.com>
+
+commit 0610c25daa3e76e38ad5a8fae683a89ff9f71798 upstream.
+
+The problem starts with a file backed dirty page which is charged to a
+memcg. Then page migration is used to move oldpage to newpage.
+
+Migration:
+ - copies the oldpage's data to newpage
+ - clears oldpage.PG_dirty
+ - sets newpage.PG_dirty
+ - uncharges oldpage from memcg
+ - charges newpage to memcg
+
+Clearing oldpage.PG_dirty decrements the charged memcg's dirty page
+count.
+
+However, because newpage is not yet charged, setting newpage.PG_dirty
+does not increment the memcg's dirty page count. After migration
+completes newpage.PG_dirty is eventually cleared, often in
+account_page_cleaned(). At this time newpage is charged to a memcg so
+the memcg's dirty page count is decremented which causes underflow
+because the count was not previously incremented by migration. This
+underflow causes balance_dirty_pages() to see a very large unsigned
+number of dirty memcg pages which leads to aggressive throttling of
+buffered writes by processes in non root memcg.
+
+This issue:
+ - can harm performance of non root memcg buffered writes.
+ - can report too small (even negative) values in
+ memory.stat[(total_)dirty] counters of all memcg, including the root.
+
+To avoid polluting migrate.c with #ifdef CONFIG_MEMCG checks, introduce
+page_memcg() and set_page_memcg() helpers.
+
+Test:
+ 0) setup and enter limited memcg
+ mkdir /sys/fs/cgroup/test
+ echo 1G > /sys/fs/cgroup/test/memory.limit_in_bytes
+ echo $$ > /sys/fs/cgroup/test/cgroup.procs
+
+ 1) buffered writes baseline
+ dd if=/dev/zero of=/data/tmp/foo bs=1M count=1k
+ sync
+ grep ^dirty /sys/fs/cgroup/test/memory.stat
+
+ 2) buffered writes with compaction antagonist to induce migration
+ yes 1 > /proc/sys/vm/compact_memory &
+ rm -rf /data/tmp/foo
+ dd if=/dev/zero of=/data/tmp/foo bs=1M count=1k
+ kill %
+ sync
+ grep ^dirty /sys/fs/cgroup/test/memory.stat
+
+ 3) buffered writes without antagonist, should match baseline
+ rm -rf /data/tmp/foo
+ dd if=/dev/zero of=/data/tmp/foo bs=1M count=1k
+ sync
+ grep ^dirty /sys/fs/cgroup/test/memory.stat
+
+ (speed, dirty residue)
+ unpatched patched
+ 1) 841 MB/s 0 dirty pages 886 MB/s 0 dirty pages
+ 2) 611 MB/s -33427456 dirty pages 793 MB/s 0 dirty pages
+ 3) 114 MB/s -33427456 dirty pages 891 MB/s 0 dirty pages
+
+ Notice that unpatched baseline performance (1) fell after
+ migration (3): 841 -> 114 MB/s. In the patched kernel, post
+ migration performance matches baseline.
+
+Fixes: c4843a7593a9 ("memcg: add per cgroup dirty page accounting")
+Signed-off-by: Greg Thelen <gthelen@google.com>
+Reported-by: Dave Hansen <dave.hansen@intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mm.h | 21 +++++++++++++++++++++
+ mm/migrate.c | 12 +++++++++++-
+ 2 files changed, 32 insertions(+), 1 deletion(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -916,6 +916,27 @@ static inline void set_page_links(struct
+ #endif
+ }
+
++#ifdef CONFIG_MEMCG
++static inline struct mem_cgroup *page_memcg(struct page *page)
++{
++ return page->mem_cgroup;
++}
++
++static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
++{
++ page->mem_cgroup = memcg;
++}
++#else
++static inline struct mem_cgroup *page_memcg(struct page *page)
++{
++ return NULL;
++}
++
++static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
++{
++}
++#endif
++
+ /*
+ * Some inline functions in vmstat.h depend on page_zone()
+ */
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -734,6 +734,15 @@ static int move_to_new_page(struct page
+ if (PageSwapBacked(page))
+ SetPageSwapBacked(newpage);
+
++ /*
++ * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
++ * needs newpage's memcg set to transfer memcg dirty page accounting.
++ * So perform memcg migration in two steps:
++ * 1. set newpage->mem_cgroup (here)
++ * 2. clear page->mem_cgroup (below)
++ */
++ set_page_memcg(newpage, page_memcg(page));
++
+ mapping = page_mapping(page);
+ if (!mapping)
+ rc = migrate_page(mapping, newpage, page, mode);
+@@ -750,9 +759,10 @@ static int move_to_new_page(struct page
+ rc = fallback_migrate_page(mapping, newpage, page, mode);
+
+ if (rc != MIGRATEPAGE_SUCCESS) {
++ set_page_memcg(newpage, NULL);
+ newpage->mapping = NULL;
+ } else {
+- mem_cgroup_migrate(page, newpage, false);
++ set_page_memcg(page, NULL);
+ if (page_was_mapped)
+ remove_migration_ptes(page, newpage);
+ page->mapping = NULL;
--- /dev/null
+From 2f84a8990ebbe235c59716896e017c6b2ca1200f Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Thu, 1 Oct 2015 15:36:57 -0700
+Subject: mm: hugetlbfs: skip shared VMAs when unmapping private pages to satisfy a fault
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit 2f84a8990ebbe235c59716896e017c6b2ca1200f upstream.
+
+SunDong reported the following on
+
+ https://bugzilla.kernel.org/show_bug.cgi?id=103841
+
+ I think I find a linux bug, I have the test cases is constructed. I
+ can stable recurring problems in fedora22(4.0.4) kernel version,
+ arch for x86_64. I construct transparent huge page, when the parent
+ and child process with MAP_SHARE, MAP_PRIVATE way to access the same
+ huge page area, it has the opportunity to lead to huge page copy on
+ write failure, and then it will munmap the child corresponding mmap
+ area, but then the child mmap area with VM_MAYSHARE attributes, child
+ process munmap this area can trigger VM_BUG_ON in set_vma_resv_flags
+ functions (vma - > vm_flags & VM_MAYSHARE).
+
+There were a number of problems with the report (e.g. it's hugetlbfs that
+triggers this, not transparent huge pages) but it was fundamentally
+correct in that a VM_BUG_ON in set_vma_resv_flags() can be triggered that
+looks like this
+
+ vma ffff8804651fd0d0 start 00007fc474e00000 end 00007fc475e00000
+ next ffff8804651fd018 prev ffff8804651fd188 mm ffff88046b1b1800
+ prot 8000000000000027 anon_vma (null) vm_ops ffffffff8182a7a0
+ pgoff 0 file ffff88106bdb9800 private_data (null)
+ flags: 0x84400fb(read|write|shared|mayread|maywrite|mayexec|mayshare|dontexpand|hugetlb)
+ ------------
+ kernel BUG at mm/hugetlb.c:462!
+ SMP
+ Modules linked in: xt_pkttype xt_LOG xt_limit [..]
+ CPU: 38 PID: 26839 Comm: map Not tainted 4.0.4-default #1
+ Hardware name: Dell Inc. PowerEdge R810/0TT6JF, BIOS 2.7.4 04/26/2012
+ set_vma_resv_flags+0x2d/0x30
+
+The VM_BUG_ON is correct because private and shared mappings have
+different reservation accounting but the warning clearly shows that the
+VMA is shared.
+
+When a private COW fails to allocate a new page then only the process
+that created the VMA gets the page -- all the children unmap the page.
+If the children access that data in the future then they get killed.
+
+The problem is that the same file is mapped shared and private. During
+the COW, the allocation fails, the VMAs are traversed to unmap the other
+private pages but a shared VMA is found and the bug is triggered. This
+patch identifies such VMAs and skips them.
+
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Reported-by: SunDong <sund_sky@126.com>
+Reviewed-by: Michal Hocko <mhocko@suse.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: David Rientjes <rientjes@google.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2974,6 +2974,14 @@ static void unmap_ref_private(struct mm_
+ continue;
+
+ /*
++ * Shared VMAs have their own reserves and do not affect
++ * MAP_PRIVATE accounting but it is possible that a shared
++ * VMA is using the same page so check and skip such VMAs.
++ */
++ if (iter_vma->vm_flags & VM_MAYSHARE)
++ continue;
++
++ /*
+ * Unmap the page from other VMAs without their own reserves.
+ * They get marked to be SIGKILLed if they fault in these
+ * areas. This is because a future no-page fault on this VMA
--- /dev/null
+From 3aaa76e125c1dd58c9b599baa8c6021896874c12 Mon Sep 17 00:00:00 2001
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Date: Tue, 22 Sep 2015 14:59:14 -0700
+Subject: mm: migrate: hugetlb: putback destination hugepage to active list
+
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+
+commit 3aaa76e125c1dd58c9b599baa8c6021896874c12 upstream.
+
+Since commit bcc54222309c ("mm: hugetlb: introduce page_huge_active")
+each hugetlb page maintains its active flag to avoid a race condition
+betwe= en multiple calls of isolate_huge_page(), but current kernel
+doesn't set the f= lag on a hugepage allocated by migration because the
+proper putback routine isn= 't called. This means that users could
+still encounter the race referred to by bcc54222309c in this special
+case, so this patch fixes it.
+
+Fixes: bcc54222309c ("mm: hugetlb: introduce page_huge_active")
+Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/migrate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1068,7 +1068,7 @@ out:
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+ put_new_page(new_hpage, private);
+ else
+- put_page(new_hpage);
++ putback_active_hugepage(new_hpage);
+
+ if (result) {
+ if (rc)
--- /dev/null
+From 012572d4fc2e4ddd5c8ec8614d51414ec6cae02a Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@huawei.com>
+Date: Tue, 22 Sep 2015 14:59:20 -0700
+Subject: ocfs2/dlm: fix deadlock when dispatch assert master
+
+From: Joseph Qi <joseph.qi@huawei.com>
+
+commit 012572d4fc2e4ddd5c8ec8614d51414ec6cae02a upstream.
+
+The order of the following three spinlocks should be:
+dlm_domain_lock < dlm_ctxt->spinlock < dlm_lock_resource->spinlock
+
+But dlm_dispatch_assert_master() is called while holding
+dlm_ctxt->spinlock and dlm_lock_resource->spinlock, and then it calls
+dlm_grab() which will take dlm_domain_lock.
+
+Once another thread (for example, dlm_query_join_handler) has already
+taken dlm_domain_lock, and tries to take dlm_ctxt->spinlock deadlock
+happens.
+
+Signed-off-by: Joseph Qi <joseph.qi@huawei.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Mark Fasheh <mfasheh@suse.com>
+Cc: "Junxiao Bi" <junxiao.bi@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/dlm/dlmmaster.c | 9 ++++++---
+ fs/ocfs2/dlm/dlmrecovery.c | 8 ++++++--
+ 2 files changed, 12 insertions(+), 5 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2
+ int found, ret;
+ int set_maybe;
+ int dispatch_assert = 0;
++ int dispatched = 0;
+
+ if (!dlm_grab(dlm))
+ return DLM_MASTER_RESP_NO;
+@@ -1658,15 +1659,18 @@ send_response:
+ mlog(ML_ERROR, "failed to dispatch assert master work\n");
+ response = DLM_MASTER_RESP_ERROR;
+ dlm_lockres_put(res);
+- } else
++ } else {
++ dispatched = 1;
+ __dlm_lockres_grab_inflight_worker(dlm, res);
++ }
+ spin_unlock(&res->spinlock);
+ } else {
+ if (res)
+ dlm_lockres_put(res);
+ }
+
+- dlm_put(dlm);
++ if (!dispatched)
++ dlm_put(dlm);
+ return response;
+ }
+
+@@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dl
+
+
+ /* queue up work for dlm_assert_master_worker */
+- dlm_grab(dlm); /* get an extra ref for the work item */
+ dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
+ item->u.am.lockres = res; /* already have a ref */
+ /* can optionally ignore node numbers higher than this node */
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2
+ unsigned int hash;
+ int master = DLM_LOCK_RES_OWNER_UNKNOWN;
+ u32 flags = DLM_ASSERT_MASTER_REQUERY;
++ int dispatched = 0;
+
+ if (!dlm_grab(dlm)) {
+ /* since the domain has gone away on this
+@@ -1719,8 +1720,10 @@ int dlm_master_requery_handler(struct o2
+ dlm_put(dlm);
+ /* sender will take care of this and retry */
+ return ret;
+- } else
++ } else {
++ dispatched = 1;
+ __dlm_lockres_grab_inflight_worker(dlm, res);
++ }
+ spin_unlock(&res->spinlock);
+ } else {
+ /* put.. incase we are not the master */
+@@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2
+ }
+ spin_unlock(&dlm->spinlock);
+
+- dlm_put(dlm);
++ if (!dispatched)
++ dlm_put(dlm);
+ return master;
+ }
+
--- /dev/null
+From b838b39e930aa1cfd099ea82ac40ed6d6413af26 Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bhelgaas@google.com>
+Date: Tue, 22 Sep 2015 17:03:54 -0500
+Subject: PCI: Clear IORESOURCE_UNSET when clipping a bridge window
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+commit b838b39e930aa1cfd099ea82ac40ed6d6413af26 upstream.
+
+c770cb4cb505 ("PCI: Mark invalid BARs as unassigned") sets IORESOURCE_UNSET
+if we fail to claim a resource. If we tried to claim a bridge window,
+failed, clipped the window, and tried to claim the clipped window, we
+failed again because of IORESOURCE_UNSET:
+
+ pci_bus 0000:00: root bus resource [mem 0xc0000000-0xffffffff window]
+ pci 0000:00:01.0: can't claim BAR 15 [mem 0xbdf00000-0xddefffff 64bit pref]: no compatible bridge window
+ pci 0000:00:01.0: [mem size 0x20000000 64bit pref] clipped to [mem size 0x1df00000 64bit pref]
+ pci 0000:00:01.0: bridge window [mem size 0x1df00000 64bit pref]
+ pci 0000:00:01.0: can't claim BAR 15 [mem size 0x1df00000 64bit pref]: no address assigned
+
+The 00:01.0 window started as [mem 0xbdf00000-0xddefffff 64bit pref]. That
+starts before the host bridge window [mem 0xc0000000-0xffffffff window], so
+we clipped the 00:01.0 window to [mem 0xc0000000-0xddefffff 64bit pref].
+But we left it marked IORESOURCE_UNSET, so the second claim failed when it
+should have succeeded.
+
+This means downstream devices will also fail for lack of resources, e.g.,
+in the bugzilla below,
+
+ radeon 0000:01:00.0: Fatal error during GPU init
+
+Clear IORESOURCE_UNSET when we clip a bridge window. Also clear
+IORESOURCE_UNSET in our copy of the unclipped window so we can see exactly
+what the original window was and how it now fits inside the upstream
+window.
+
+Fixes: c770cb4cb505 ("PCI: Mark invalid BARs as unassigned")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=85491#c47
+Based-on-patch-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Based-on-patch-by: Yinghai Lu <yinghai@kernel.org>
+Tested-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Acked-by: Yinghai Lu <yinghai@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/bus.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_de
+
+ res->start = start;
+ res->end = end;
++ res->flags &= ~IORESOURCE_UNSET;
++ orig_res.flags &= ~IORESOURCE_UNSET;
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+ &orig_res, res);
+
--- /dev/null
+From 9d9240756e63dd87d6cbf5da8b98ceb8f8192b55 Mon Sep 17 00:00:00 2001
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Tue, 15 Sep 2015 11:17:21 -0600
+Subject: PCI: Fix devfn for VPD access through function 0
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit 9d9240756e63dd87d6cbf5da8b98ceb8f8192b55 upstream.
+
+Commit 932c435caba8 ("PCI: Add dev_flags bit to access VPD through function
+0") passes PCI_SLOT(devfn) for the devfn parameter of pci_get_slot().
+Generally this works because we're fairly well guaranteed that a PCIe
+device is at slot address 0, but for the general case, including
+conventional PCI, it's incorrect. We need to get the slot and then convert
+it back into a devfn.
+
+Fixes: 932c435caba8 ("PCI: Add dev_flags bit to access VPD through function 0")
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Bjorn Helgaas <helgaas@kernel.org>
+Acked-by: Myron Stowe <myron.stowe@redhat.com>
+Acked-by: Mark Rustad <mark.d.rustad@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/access.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_
+ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ void *arg)
+ {
+- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++ struct pci_dev *tdev = pci_get_slot(dev->bus,
++ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ssize_t ret;
+
+ if (!tdev)
+@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pc
+ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *arg)
+ {
+- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++ struct pci_dev *tdev = pci_get_slot(dev->bus,
++ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ssize_t ret;
+
+ if (!tdev)
+@@ -475,7 +477,8 @@ static const struct pci_vpd_ops pci_vpd_
+
+ static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+ {
+- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++ struct pci_dev *tdev = pci_get_slot(dev->bus,
++ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ int ret = 0;
+
+ if (!tdev)
--- /dev/null
+From da2d03ea27f6ed9d2005a67b20dd021ddacf1e4d Mon Sep 17 00:00:00 2001
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Tue, 15 Sep 2015 22:24:46 -0600
+Subject: PCI: Use function 0 VPD for identical functions, regular VPD for others
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit da2d03ea27f6ed9d2005a67b20dd021ddacf1e4d upstream.
+
+932c435caba8 ("PCI: Add dev_flags bit to access VPD through function 0")
+added PCI_DEV_FLAGS_VPD_REF_F0. Previously, we set the flag on every
+non-zero function of quirked devices. If a function turned out to be
+different from function 0, i.e., it had a different class, vendor ID, or
+device ID, the flag remained set but we didn't make VPD accessible at all.
+
+Flip this around so we only set PCI_DEV_FLAGS_VPD_REF_F0 for functions that
+are identical to function 0, and allow regular VPD access for any other
+functions.
+
+[bhelgaas: changelog, stable tag]
+Fixes: 932c435caba8 ("PCI: Add dev_flags bit to access VPD through function 0")
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Bjorn Helgaas <helgaas@kernel.org>
+Acked-by: Myron Stowe <myron.stowe@redhat.com>
+Acked-by: Mark Rustad <mark.d.rustad@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/access.c | 22 ----------------------
+ drivers/pci/quirks.c | 20 ++++++++++++++++++--
+ 2 files changed, 18 insertions(+), 24 deletions(-)
+
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -475,23 +475,6 @@ static const struct pci_vpd_ops pci_vpd_
+ .release = pci_vpd_pci22_release,
+ };
+
+-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+-{
+- struct pci_dev *tdev = pci_get_slot(dev->bus,
+- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+- int ret = 0;
+-
+- if (!tdev)
+- return -ENODEV;
+- if (!tdev->vpd || !tdev->multifunction ||
+- dev->class != tdev->class || dev->vendor != tdev->vendor ||
+- dev->device != tdev->device)
+- ret = -ENODEV;
+-
+- pci_dev_put(tdev);
+- return ret;
+-}
+-
+ int pci_vpd_pci22_init(struct pci_dev *dev)
+ {
+ struct pci_vpd_pci22 *vpd;
+@@ -500,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *d
+ cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ if (!cap)
+ return -ENODEV;
+- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+- int ret = pci_vpd_f0_dev_check(dev);
+
+- if (ret)
+- return ret;
+- }
+ vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+ if (!vpd)
+ return -ENOMEM;
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1906,11 +1906,27 @@ static void quirk_netmos(struct pci_dev
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+
++/*
++ * Quirk non-zero PCI functions to route VPD access through function 0 for
++ * devices that share VPD resources between functions. The functions are
++ * expected to be identical devices.
++ */
+ static void quirk_f0_vpd_link(struct pci_dev *dev)
+ {
+- if (!dev->multifunction || !PCI_FUNC(dev->devfn))
++ struct pci_dev *f0;
++
++ if (!PCI_FUNC(dev->devfn))
++ return;
++
++ f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
++ if (!f0)
+ return;
+- dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++ if (f0->vpd && dev->class == f0->class &&
++ dev->vendor == f0->vendor && dev->device == f0->device)
++ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++ pci_dev_put(f0);
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
revert-cgroup-simplify-threadgroup-locking.patch
revert-sched-cgroup-replace-signal_struct-group_rwsem-with-a-global-percpu_rwsem.patch
memcg-make-mem_cgroup_read_stat-unsigned.patch
+spi-fix-documentation-of-spi_alloc_master.patch
+spi-xtensa-xtfpga-fix-register-endianness.patch
+spi-bcm2835-bug-fix-wrong-use-of-page_mask.patch
+spi-spi-pxa2xx-check-status-register-to-determine-if-sssr_tint-is-disabled.patch
+spi-spidev-fix-possible-null-dereference.patch
+mm-migrate-hugetlb-putback-destination-hugepage-to-active-list.patch
+lib-iommu-common.c-do-not-try-to-deref-a-null-iommu-lazy_flush-pointer-when-n-pool-hint.patch
+ocfs2-dlm-fix-deadlock-when-dispatch-assert-master.patch
+mm-hugetlbfs-skip-shared-vmas-when-unmapping-private-pages-to-satisfy-a-fault.patch
+memcg-fix-dirty-page-migration.patch
+alsa-hda-tegra-async-probe-for-avoiding-module-loading-deadlock.patch
+alsa-hda-disable-power_save_node-for-thinkpads.patch
+alsa-synth-fix-conflicting-oss-device-registration-on-awe32.patch
+alsa-hda-add-dock-support-for-thinkpad-t550.patch
+alsa-hda-apply-spdif-pin-ctl-to-macbookpro-12-1.patch
+alsa-hda-disable-power_save_node-for-idt-92hd73xx-chips.patch
+asoc-pxa-pxa2xx-ac97-fix-dma-requestor-lines.patch
+asoc-fix-broken-pxa-soc-support.patch
+asoc-dwc-correct-irq-clear-method.patch
+asoc-db1200-fix-dai-link-format-for-db1300-and-db1550.patch
+asoc-sgtl5000-fix-wrong-register-mic_bias_voltage-setup-on-probe.patch
+asoc-tas2552-fix-dbscale-min-declaration.patch
+btrfs-skip-waiting-on-ordered-range-for-special-files.patch
+btrfs-fix-read-corruption-of-compressed-and-shared-extents.patch
+btrfs-update-fix-for-read-corruption-of-compressed-and-shared-extents.patch
+tools-lguest-fix-redefinition-of-struct-virtio_pci_cfg_cap.patch
+pci-fix-devfn-for-vpd-access-through-function-0.patch
+pci-use-function-0-vpd-for-identical-functions-regular-vpd-for-others.patch
+pci-clear-ioresource_unset-when-clipping-a-bridge-window.patch
+dm-thin-disable-discard-support-for-thin-devices-if-pool-s-is-disabled.patch
+dm-crypt-constrain-crypt-device-s-max_segment_size-to-page_size.patch
+ath10k-fix-dma_mapping_error-handling.patch
+svcrdma-fix-send_reply-scatter-gather-set-up.patch
+staging-ion-fix-corruption-of-ion_import_dma_buf.patch
+usb-option-add-zte-pids.patch
+md-raid0-update-queue-parameter-in-a-safer-location.patch
+md-raid0-apply-base-queue-limits-before-disk_stack_limits.patch
+dm-raid-fix-round-up-of-default-region-size.patch
--- /dev/null
+From 2a3fffd45822070309bcf0b1e1dae624d633824a Mon Sep 17 00:00:00 2001
+From: Martin Sperl <kernel@martin.sperl.org>
+Date: Thu, 10 Sep 2015 09:32:14 +0000
+Subject: spi: bcm2835: BUG: fix wrong use of PAGE_MASK
+
+From: Martin Sperl <kernel@martin.sperl.org>
+
+commit 2a3fffd45822070309bcf0b1e1dae624d633824a upstream.
+
+There is a bug in the alignment checking of transfers,
+that results in DMA not being used for un-aligned
+transfers that do not cross page-boundries, which is valid.
+
+This is due to a missconception of the meaning PAGE_MASK
+when implementing that check originally - (PAGE_SIZE - 1)
+should have been used instead.
+
+Also fixes a copy/paste error.
+
+Reported-by: <robert@axium.co.nz>
+Signed-off-by: Martin Sperl <kernel@martin.sperl.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-bcm2835.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct s
+ /* otherwise we only allow transfers within the same page
+ * to avoid wasting time on dma_mapping when it is not practical
+ */
+- if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
++ if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
+ dev_warn_once(&spi->dev,
+ "Unaligned spi tx-transfer bridging page\n");
+ return false;
+ }
+- if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
++ if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
+ dev_warn_once(&spi->dev,
+- "Unaligned spi tx-transfer bridging page\n");
++ "Unaligned spi rx-transfer bridging page\n");
+ return false;
+ }
+
--- /dev/null
+From a394d635193b641f2c86ead5ada5b115d57c51f8 Mon Sep 17 00:00:00 2001
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Sun, 6 Sep 2015 01:46:54 +0300
+Subject: spi: Fix documentation of spi_alloc_master()
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+commit a394d635193b641f2c86ead5ada5b115d57c51f8 upstream.
+
+Actually, spi_master_put() after spi_alloc_master() must _not_ be followed
+by kfree(). The memory is already freed with the call to spi_master_put()
+through spi_master_class, which registers a release function. Calling both
+spi_master_put() and kfree() results in often nasty (and delayed) crashes
+elsewhere in the kernel, often in the networking stack.
+
+This reverts commit eb4af0f5349235df2e4a5057a72fc8962d00308a.
+
+Link to patch and concerns: https://lkml.org/lkml/2012/9/3/269
+or
+http://lkml.iu.edu/hypermail/linux/kernel/1209.0/00790.html
+
+Alexey Klimov: This revert becomes valid after
+94c69f765f1b4a658d96905ec59928e3e3e07e6a when spi-imx.c
+has been fixed and there is no need to call kfree() so comment
+for spi_alloc_master() should be fixed.
+
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Alexey Klimov <alexey.klimov@linaro.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1437,8 +1437,7 @@ static struct class spi_master_class = {
+ *
+ * The caller is responsible for assigning the bus number and initializing
+ * the master's methods before calling spi_register_master(); and (after errors
+- * adding the device) calling spi_master_put() and kfree() to prevent a memory
+- * leak.
++ * adding the device) calling spi_master_put() to prevent a memory leak.
+ */
+ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+ {
--- /dev/null
+From 02bc933ebb59208f42c2e6305b2c17fd306f695d Mon Sep 17 00:00:00 2001
+From: "Tan, Jui Nee" <jui.nee.tan@intel.com>
+Date: Tue, 1 Sep 2015 10:22:51 +0800
+Subject: spi: spi-pxa2xx: Check status register to determine if SSSR_TINT is disabled
+
+From: "Tan, Jui Nee" <jui.nee.tan@intel.com>
+
+commit 02bc933ebb59208f42c2e6305b2c17fd306f695d upstream.
+
+On Intel Baytrail, there is case when interrupt handler get called, no SPI
+message is captured. The RX FIFO is indeed empty when RX timeout pending
+interrupt (SSSR_TINT) happens.
+
+Use the BIOS version where both HSUART and SPI are on the same IRQ. Both
+drivers are using IRQF_SHARED when calling the request_irq function. When
+running two separate and independent SPI and HSUART application that
+generate data traffic on both components, user will see messages like
+below on the console:
+
+ pxa2xx-spi pxa2xx-spi.0: bad message state in interrupt handler
+
+This commit will fix this by first checking Receiver Time-out Interrupt,
+if it is disabled, ignore the request and return without servicing.
+
+Signed-off-by: Tan, Jui Nee <jui.nee.tan@intel.com>
+Acked-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-pxa2xx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -643,6 +643,10 @@ static irqreturn_t ssp_int(int irq, void
+ if (!(sccr1_reg & SSCR1_TIE))
+ mask &= ~SSSR_TFS;
+
++ /* Ignore RX timeout interrupt if it is disabled */
++ if (!(sccr1_reg & SSCR1_TINTE))
++ mask &= ~SSSR_TINT;
++
+ if (!(status & mask))
+ return IRQ_NONE;
+
--- /dev/null
+From dd85ebf681ef0ee1fc985c353dd45e8b53b5dc1e Mon Sep 17 00:00:00 2001
+From: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Date: Thu, 10 Sep 2015 16:48:13 +0530
+Subject: spi: spidev: fix possible NULL dereference
+
+From: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+
+commit dd85ebf681ef0ee1fc985c353dd45e8b53b5dc1e upstream.
+
+During the last close we are freeing spidev if spidev->spi is NULL, but
+just before checking if spidev->spi is NULL we are dereferencing it.
+Lets add a check there to avoid the NULL dereference.
+
+Fixes: 9169051617df ("spi: spidev: Don't mangle max_speed_hz in underlying spi device")
+Signed-off-by: Sudip Mukherjee <sudip@vectorindia.org>
+Reviewed-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Tested-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spidev.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -651,7 +651,8 @@ static int spidev_release(struct inode *
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+
+- spidev->speed_hz = spidev->spi->max_speed_hz;
++ if (spidev->spi)
++ spidev->speed_hz = spidev->spi->max_speed_hz;
+
+ /* ... after we unbound from the underlying device? */
+ spin_lock_irq(&spidev->spi_lock);
--- /dev/null
+From b0b4855099e301c8603ea37da9a0103a96c2e0b1 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Tue, 22 Sep 2015 14:32:03 +0300
+Subject: spi: xtensa-xtfpga: fix register endianness
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit b0b4855099e301c8603ea37da9a0103a96c2e0b1 upstream.
+
+XTFPGA SPI controller has native endian registers.
+Fix register acessors so that they work in big-endian configurations.
+
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-xtensa-xtfpga.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/spi/spi-xtensa-xtfpga.c
++++ b/drivers/spi/spi-xtensa-xtfpga.c
+@@ -34,13 +34,13 @@ struct xtfpga_spi {
+ static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
+ unsigned addr, u32 val)
+ {
+- iowrite32(val, spi->regs + addr);
++ __raw_writel(val, spi->regs + addr);
+ }
+
+ static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
+ unsigned addr)
+ {
+- return ioread32(spi->regs + addr);
++ return __raw_readl(spi->regs + addr);
+ }
+
+ static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
--- /dev/null
+From 6fa92e2bcf6390e64895b12761e851c452d87bd8 Mon Sep 17 00:00:00 2001
+From: Shawn Lin <shawn.lin@rock-chips.com>
+Date: Wed, 9 Sep 2015 15:41:52 +0800
+Subject: staging: ion: fix corruption of ion_import_dma_buf
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+commit 6fa92e2bcf6390e64895b12761e851c452d87bd8 upstream.
+
+we found this issue but still exit in lastest kernel. Simply
+keep ion_handle_create under mutex_lock to avoid this race.
+
+WARNING: CPU: 2 PID: 2648 at drivers/staging/android/ion/ion.c:512 ion_handle_add+0xb4/0xc0()
+ion_handle_add: buffer already found.
+Modules linked in: iwlmvm iwlwifi mac80211 cfg80211 compat
+CPU: 2 PID: 2648 Comm: TimedEventQueue Tainted: G W 3.14.0 #7
+ 00000000 00000000 9a3efd2c 80faf273 9a3efd6c 9a3efd5c 80935dc9 811d7fd3
+ 9a3efd88 00000a58 812208a0 00000200 80e128d4 80e128d4 8d4ae00c a8cd8600
+ a8cd8094 9a3efd74 80935e0e 00000009 9a3efd6c 811d7fd3 9a3efd88 9a3efd9c
+Call Trace:
+ [<80faf273>] dump_stack+0x48/0x69
+ [<80935dc9>] warn_slowpath_common+0x79/0x90
+ [<80e128d4>] ? ion_handle_add+0xb4/0xc0
+ [<80e128d4>] ? ion_handle_add+0xb4/0xc0
+ [<80935e0e>] warn_slowpath_fmt+0x2e/0x30
+ [<80e128d4>] ion_handle_add+0xb4/0xc0
+ [<80e144cc>] ion_import_dma_buf+0x8c/0x110
+ [<80c517c4>] reg_init+0x364/0x7d0
+ [<80993363>] ? futex_wait+0x123/0x210
+ [<80992e0e>] ? get_futex_key+0x16e/0x1e0
+ [<8099308f>] ? futex_wake+0x5f/0x120
+ [<80c51e19>] vpu_service_ioctl+0x1e9/0x500
+ [<80994aec>] ? do_futex+0xec/0x8e0
+ [<80971080>] ? prepare_to_wait_event+0xc0/0xc0
+ [<80c51c30>] ? reg_init+0x7d0/0x7d0
+ [<80a22562>] do_vfs_ioctl+0x2d2/0x4c0
+ [<80b198ad>] ? inode_has_perm.isra.41+0x2d/0x40
+ [<80b199cf>] ? file_has_perm+0x7f/0x90
+ [<80b1a5f7>] ? selinux_file_ioctl+0x47/0xf0
+ [<80a227a8>] SyS_ioctl+0x58/0x80
+ [<80fb45e8>] syscall_call+0x7/0x7
+ [<80fb0000>] ? mmc_do_calc_max_discard+0xab/0xe4
+
+Fixes: 83271f626 ("ion: hold reference to handle...")
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Reviewed-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ion/ion.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(st
+ mutex_unlock(&client->lock);
+ goto end;
+ }
+- mutex_unlock(&client->lock);
+
+ handle = ion_handle_create(client, buffer);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
++ mutex_unlock(&client->lock);
+ goto end;
++ }
+
+- mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
--- /dev/null
+From 9d11b51ce7c150a69e761e30518f294fc73d55ff Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Thu, 9 Jul 2015 16:45:18 -0400
+Subject: svcrdma: Fix send_reply() scatter/gather set-up
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 9d11b51ce7c150a69e761e30518f294fc73d55ff upstream.
+
+The Linux NFS server returns garbage in the data payload of inline
+NFS/RDMA READ replies. These are READs of under 1000 bytes or so
+where the client has not provided either a reply chunk or a write
+list.
+
+The NFS server delivers the data payload for an NFS READ reply to
+the transport in an xdr_buf page list. If the NFS client did not
+provide a reply chunk or a write list, send_reply() is supposed to
+set up a separate sge for the page containing the READ data, and
+another sge for XDR padding if needed, then post all of the sges via
+a single SEND Work Request.
+
+The problem is send_reply() does not advance through the xdr_buf
+when setting up scatter/gather entries for SEND WR. It always calls
+dma_map_xdr with xdr_off set to zero. When there's more than one
+sge, dma_map_xdr() sets up the SEND sge's so they all point to the
+xdr_buf's head.
+
+The current Linux NFS/RDMA client always provides a reply chunk or
+a write list when performing an NFS READ over RDMA. Therefore, it
+does not exercise this particular case. The Linux server has never
+had to use more than one extra sge for building RPC/RDMA replies
+with a Linux client.
+
+However, an NFS/RDMA client _is_ allowed to send small NFS READs
+without setting up a write list or reply chunk. The NFS READ reply
+fits entirely within the inline reply buffer in this case. This is
+perhaps a more efficient way of performing NFS READs that the Linux
+NFS/RDMA client may some day adopt.
+
+Fixes: b432e6b3d9c1 ('svcrdma: Change DMA mapping logic to . . .')
+BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=285
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -384,6 +384,7 @@ static int send_reply(struct svcxprt_rdm
+ int byte_count)
+ {
+ struct ib_send_wr send_wr;
++ u32 xdr_off;
+ int sge_no;
+ int sge_bytes;
+ int page_no;
+@@ -418,8 +419,8 @@ static int send_reply(struct svcxprt_rdm
+ ctxt->direction = DMA_TO_DEVICE;
+
+ /* Map the payload indicated by 'byte_count' */
++ xdr_off = 0;
+ for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
+- int xdr_off = 0;
+ sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
+ byte_count -= sge_bytes;
+ ctxt->sge[sge_no].addr =
+@@ -457,6 +458,13 @@ static int send_reply(struct svcxprt_rdm
+ }
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
+
++ /* The loop above bumps sc_dma_used for each sge. The
++ * xdr_buf.tail gets a separate sge, but resides in the
++ * same page as xdr_buf.head. Don't count it twice.
++ */
++ if (sge_no > ctxt->count)
++ atomic_dec(&rdma->sc_dma_used);
++
+ if (sge_no > rdma->sc_max_sge) {
+ pr_err("svcrdma: Too many sges (%d)\n", sge_no);
+ goto err;
--- /dev/null
+From e523caa601f4a7c2fa1ecd040db921baf7453798 Mon Sep 17 00:00:00 2001
+From: Rusty Russell <rusty@rustcorp.com.au>
+Date: Wed, 26 Aug 2015 10:42:26 +0930
+Subject: tools/lguest: Fix redefinition of struct virtio_pci_cfg_cap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rusty Russell <rusty@rustcorp.com.au>
+
+commit e523caa601f4a7c2fa1ecd040db921baf7453798 upstream.
+
+Ours uses a u32 for the data, since we ensure it's always
+aligned and it's x86 so it doesn't matter anyway.
+
+ lguest.c:128:8: error: redefinition of ‘struct virtio_pci_cfg_cap’
+
+Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Fixes: 3121bb023e2db ("virtio: define virtio_pci_cfg_cap in header.")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/lguest/lguest.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/tools/lguest/lguest.c
++++ b/tools/lguest/lguest.c
+@@ -125,7 +125,11 @@ struct device_list {
+ /* The list of Guest devices, based on command line arguments. */
+ static struct device_list devices;
+
+-struct virtio_pci_cfg_cap {
++/*
++ * Just like struct virtio_pci_cfg_cap in uapi/linux/virtio_pci.h,
++ * but uses a u32 explicitly for the data.
++ */
++struct virtio_pci_cfg_cap_u32 {
+ struct virtio_pci_cap cap;
+ u32 pci_cfg_data; /* Data for BAR access. */
+ };
+@@ -157,7 +161,7 @@ struct pci_config {
+ struct virtio_pci_notify_cap notify;
+ struct virtio_pci_cap isr;
+ struct virtio_pci_cap device;
+- struct virtio_pci_cfg_cap cfg_access;
++ struct virtio_pci_cfg_cap_u32 cfg_access;
+ };
+
+ /* The device structure describes a single device. */
+@@ -1291,7 +1295,7 @@ static struct device *dev_and_reg(u32 *r
+ * only fault if they try to write with some invalid bar/offset/length.
+ */
+ static bool valid_bar_access(struct device *d,
+- struct virtio_pci_cfg_cap *cfg_access)
++ struct virtio_pci_cfg_cap_u32 *cfg_access)
+ {
+ /* We only have 1 bar (BAR0) */
+ if (cfg_access->cap.bar != 0)
--- /dev/null
+From 19ab6bc5674a30fdb6a2436b068d19a3c17dc73e Mon Sep 17 00:00:00 2001
+From: "Liu.Zhao" <lzsos369@163.com>
+Date: Mon, 24 Aug 2015 08:36:12 -0700
+Subject: USB: option: add ZTE PIDs
+
+From: "Liu.Zhao" <lzsos369@163.com>
+
+commit 19ab6bc5674a30fdb6a2436b068d19a3c17dc73e upstream.
+
+This is intended to add ZTE device PIDs on kernel.
+
+Signed-off-by: Liu.Zhao <lzsos369@163.com>
+[johan: sort the new entries ]
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/serial/option.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -278,6 +278,10 @@ static void option_instat_callback(struc
+ #define ZTE_PRODUCT_MF622 0x0001
+ #define ZTE_PRODUCT_MF628 0x0015
+ #define ZTE_PRODUCT_MF626 0x0031
++#define ZTE_PRODUCT_ZM8620_X 0x0396
++#define ZTE_PRODUCT_ME3620_MBIM 0x0426
++#define ZTE_PRODUCT_ME3620_X 0x1432
++#define ZTE_PRODUCT_ME3620_L 0x1433
+ #define ZTE_PRODUCT_AC2726 0xfff1
+ #define ZTE_PRODUCT_MG880 0xfffd
+ #define ZTE_PRODUCT_CDMA_TECH 0xfffe
+@@ -544,6 +548,18 @@ static const struct option_blacklist_inf
+ .sendsetup = BIT(1) | BIT(2) | BIT(3),
+ };
+
++static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
++ .reserved = BIT(2) | BIT(3) | BIT(4),
++};
++
++static const struct option_blacklist_info zte_me3620_xl_blacklist = {
++ .reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
++static const struct option_blacklist_info zte_zm8620_x_blacklist = {
++ .reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
+ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ .reserved = BIT(1) | BIT(2),
+ };
+@@ -1591,6 +1607,14 @@ static const struct usb_device_id option
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
++ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
++ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },