--- /dev/null
+From ca2b497253ad01c80061a1f3ee9eb91b5d54a849 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 5 Oct 2018 13:24:36 +0100
+Subject: arm64: perf: Reject stand-alone CHAIN events for PMUv3
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit ca2b497253ad01c80061a1f3ee9eb91b5d54a849 upstream.
+
+It doesn't make sense for a perf event to be configured as a CHAIN event
+in isolation, so extend the arm_pmu structure with a ->filter_match()
+function to allow the backend PMU implementation to reject CHAIN events
+early.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/perf_event.c | 7 +++++++
+ drivers/perf/arm_pmu.c | 8 +++++++-
+ include/linux/perf/arm_pmu.h | 1 +
+ 3 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -824,6 +824,12 @@ static int armv8pmu_set_event_filter(str
+ return 0;
+ }
+
++static int armv8pmu_filter_match(struct perf_event *event)
++{
++ unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
++ return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
++}
++
+ static void armv8pmu_reset(void *info)
+ {
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+@@ -970,6 +976,7 @@ static int armv8_pmu_init(struct arm_pmu
+ cpu_pmu->reset = armv8pmu_reset,
+ cpu_pmu->max_period = (1LLU << 32) - 1,
+ cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
++ cpu_pmu->filter_match = armv8pmu_filter_match;
+
+ return 0;
+ }
+--- a/drivers/perf/arm_pmu.c
++++ b/drivers/perf/arm_pmu.c
+@@ -483,7 +483,13 @@ static int armpmu_filter_match(struct pe
+ {
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ unsigned int cpu = smp_processor_id();
+- return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
++ int ret;
++
++ ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
++ if (ret && armpmu->filter_match)
++ return armpmu->filter_match(event);
++
++ return ret;
+ }
+
+ static ssize_t armpmu_cpumask_show(struct device *dev,
+--- a/include/linux/perf/arm_pmu.h
++++ b/include/linux/perf/arm_pmu.h
+@@ -110,6 +110,7 @@ struct arm_pmu {
+ void (*stop)(struct arm_pmu *);
+ void (*reset)(void *);
+ int (*map_event)(struct perf_event *event);
++ int (*filter_match)(struct perf_event *event);
+ int num_events;
+ u64 max_period;
+ bool secure_access; /* 32-bit ARM only */
--- /dev/null
+From 479adb89a97b0a33e5a9d702119872cc82ca21aa Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 4 Oct 2018 13:28:08 -0700
+Subject: cgroup: Fix dom_cgrp propagation when enabling threaded mode
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 479adb89a97b0a33e5a9d702119872cc82ca21aa upstream.
+
+A cgroup which is already a threaded domain may be converted into a
+threaded cgroup if the prerequisite conditions are met. When this
+happens, all threaded descendant should also have their ->dom_cgrp
+updated to the new threaded domain cgroup. Unfortunately, this
+propagation was missing leading to the following failure.
+
+ # cd /sys/fs/cgroup/unified
+ # cat cgroup.subtree_control # show that no controllers are enabled
+
+ # mkdir -p mycgrp/a/b/c
+ # echo threaded > mycgrp/a/b/cgroup.type
+
+ At this point, the hierarchy looks as follows:
+
+ mycgrp [d]
+ a [dt]
+ b [t]
+ c [inv]
+
+ Now let's make node "a" threaded (and thus "mycgrp" s made "domain threaded"):
+
+ # echo threaded > mycgrp/a/cgroup.type
+
+ By this point, we now have a hierarchy that looks as follows:
+
+ mycgrp [dt]
+ a [t]
+ b [t]
+ c [inv]
+
+ But, when we try to convert the node "c" from "domain invalid" to
+ "threaded", we get ENOTSUP on the write():
+
+ # echo threaded > mycgrp/a/b/c/cgroup.type
+ sh: echo: write error: Operation not supported
+
+This patch fixes the problem by
+
+* Moving the opencoded ->dom_cgrp save and restoration in
+ cgroup_enable_threaded() into cgroup_{save|restore}_control() so
+ that mulitple cgroups can be handled.
+
+* Updating all threaded descendants' ->dom_cgrp to point to the new
+ dom_cgrp when enabling threaded mode.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-and-tested-by: "Michael Kerrisk (man-pages)" <mtk.manpages@gmail.com>
+Reported-by: Amin Jamali <ajamali@pivotal.io>
+Reported-by: Joao De Almeida Pereira <jpereira@pivotal.io>
+Link: https://lore.kernel.org/r/CAKgNAkhHYCMn74TCNiMJ=ccLd7DcmXSbvw3CbZ1YREeG7iJM5g@mail.gmail.com
+Fixes: 454000adaa2a ("cgroup: introduce cgroup->dom_cgrp and threaded css_set handling")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cgroup-defs.h | 1 +
+ kernel/cgroup/cgroup.c | 25 ++++++++++++++++---------
+ 2 files changed, 17 insertions(+), 9 deletions(-)
+
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -353,6 +353,7 @@ struct cgroup {
+ * specific task are charged to the dom_cgrp.
+ */
+ struct cgroup *dom_cgrp;
++ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
+
+ /*
+ * list of pidlists, up to two for each namespace (one for procs, one
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2780,11 +2780,12 @@ restart:
+ }
+
+ /**
+- * cgroup_save_control - save control masks of a subtree
++ * cgroup_save_control - save control masks and dom_cgrp of a subtree
+ * @cgrp: root of the target subtree
+ *
+- * Save ->subtree_control and ->subtree_ss_mask to the respective old_
+- * prefixed fields for @cgrp's subtree including @cgrp itself.
++ * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
++ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
++ * itself.
+ */
+ static void cgroup_save_control(struct cgroup *cgrp)
+ {
+@@ -2794,6 +2795,7 @@ static void cgroup_save_control(struct c
+ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
+ dsct->old_subtree_control = dsct->subtree_control;
+ dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
++ dsct->old_dom_cgrp = dsct->dom_cgrp;
+ }
+ }
+
+@@ -2819,11 +2821,12 @@ static void cgroup_propagate_control(str
+ }
+
+ /**
+- * cgroup_restore_control - restore control masks of a subtree
++ * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
+ * @cgrp: root of the target subtree
+ *
+- * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
+- * prefixed fields for @cgrp's subtree including @cgrp itself.
++ * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
++ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
++ * itself.
+ */
+ static void cgroup_restore_control(struct cgroup *cgrp)
+ {
+@@ -2833,6 +2836,7 @@ static void cgroup_restore_control(struc
+ cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
+ dsct->subtree_control = dsct->old_subtree_control;
+ dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
++ dsct->dom_cgrp = dsct->old_dom_cgrp;
+ }
+ }
+
+@@ -3140,6 +3144,8 @@ static int cgroup_enable_threaded(struct
+ {
+ struct cgroup *parent = cgroup_parent(cgrp);
+ struct cgroup *dom_cgrp = parent->dom_cgrp;
++ struct cgroup *dsct;
++ struct cgroup_subsys_state *d_css;
+ int ret;
+
+ lockdep_assert_held(&cgroup_mutex);
+@@ -3169,12 +3175,13 @@ static int cgroup_enable_threaded(struct
+ */
+ cgroup_save_control(cgrp);
+
+- cgrp->dom_cgrp = dom_cgrp;
++ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
++ if (dsct == cgrp || cgroup_is_threaded(dsct))
++ dsct->dom_cgrp = dom_cgrp;
++
+ ret = cgroup_apply_control(cgrp);
+ if (!ret)
+ parent->nr_threaded_children++;
+- else
+- cgrp->dom_cgrp = cgrp;
+
+ cgroup_finalize_control(cgrp, ret);
+ return ret;
--- /dev/null
+From c7cd55504a5b0fc826a2cd9540845979d24ae542 Mon Sep 17 00:00:00 2001
+From: Shenghui Wang <shhuiw@foxmail.com>
+Date: Sun, 7 Oct 2018 14:45:41 +0800
+Subject: dm cache: destroy migration_cache if cache target registration failed
+
+From: Shenghui Wang <shhuiw@foxmail.com>
+
+commit c7cd55504a5b0fc826a2cd9540845979d24ae542 upstream.
+
+Commit 7e6358d244e47 ("dm: fix various targets to dm_register_target
+after module __init resources created") inadvertently introduced this
+bug when it moved dm_register_target() after the call to KMEM_CACHE().
+
+Fixes: 7e6358d244e47 ("dm: fix various targets to dm_register_target after module __init resources created")
+Cc: stable@vger.kernel.org
+Signed-off-by: Shenghui Wang <shhuiw@foxmail.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -3571,14 +3571,13 @@ static int __init dm_cache_init(void)
+ int r;
+
+ migration_cache = KMEM_CACHE(dm_cache_migration, 0);
+- if (!migration_cache) {
+- dm_unregister_target(&cache_target);
++ if (!migration_cache)
+ return -ENOMEM;
+- }
+
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
++ kmem_cache_destroy(migration_cache);
+ return r;
+ }
+
--- /dev/null
+From 9864cd5dc54cade89fd4b0954c2e522841aa247c Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Tue, 9 Oct 2018 14:24:31 +0900
+Subject: dm: fix report zone remapping to account for partition offset
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 9864cd5dc54cade89fd4b0954c2e522841aa247c upstream.
+
+If dm-linear or dm-flakey are layered on top of a partition of a zoned
+block device, remapping of the start sector and write pointer position
+of the zones reported by a report zones BIO must be modified to account
+for the target table entry mapping (start offset within the device and
+entry mapping with the dm device). If the target's backing device is a
+partition of a whole disk, the start sector on the physical device of
+the partition must also be accounted for when modifying the zone
+information. However, dm_remap_zone_report() was not considering this
+last case, resulting in incorrect zone information remapping with
+targets using disk partitions.
+
+Fix this by calculating the target backing device start sector using
+the position of the completed report zones BIO and the unchanged
+position and size of the original report zone BIO. With this value
+calculated, the start sector and write pointer position of the target
+zones can be correctly remapped.
+
+Fixes: 10999307c14e ("dm: introduce dm_remap_zone_report()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 27 ++++++++++++++++++++-------
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1034,12 +1034,14 @@ void dm_accept_partial_bio(struct bio *b
+ EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+
+ /*
+- * The zone descriptors obtained with a zone report indicate
+- * zone positions within the target device. The zone descriptors
+- * must be remapped to match their position within the dm device.
+- * A target may call dm_remap_zone_report after completion of a
+- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
+- * from the target device mapping to the dm device.
++ * The zone descriptors obtained with a zone report indicate zone positions
++ * within the target backing device, regardless of that device is a partition
++ * and regardless of the target mapping start sector on the device or partition.
++ * The zone descriptors start sector and write pointer position must be adjusted
++ * to match their relative position within the dm device.
++ * A target may call dm_remap_zone_report() after completion of a
++ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
++ * backing device.
+ */
+ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+ {
+@@ -1050,6 +1052,7 @@ void dm_remap_zone_report(struct dm_targ
+ struct blk_zone *zone;
+ unsigned int nr_rep = 0;
+ unsigned int ofst;
++ sector_t part_offset;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ void *addr;
+@@ -1058,6 +1061,15 @@ void dm_remap_zone_report(struct dm_targ
+ return;
+
+ /*
++ * bio sector was incremented by the request size on completion. Taking
++ * into account the original request sector, the target start offset on
++ * the backing device and the target mapping offset (ti->begin), the
++ * start sector of the backing device. The partition offset is always 0
++ * if the target uses a whole device.
++ */
++ part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
++
++ /*
+ * Remap the start sector of the reported zones. For sequential zones,
+ * also remap the write pointer position.
+ */
+@@ -1074,6 +1086,7 @@ void dm_remap_zone_report(struct dm_targ
+ /* Set zones start sector */
+ while (hdr->nr_zones && ofst < bvec.bv_len) {
+ zone = addr + ofst;
++ zone->start -= part_offset;
+ if (zone->start >= start + ti->len) {
+ hdr->nr_zones = 0;
+ break;
+@@ -1085,7 +1098,7 @@ void dm_remap_zone_report(struct dm_targ
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+- zone->wp = zone->wp + ti->begin - start;
++ zone->wp = zone->wp + ti->begin - start - part_offset;
+ }
+ ofst += sizeof(struct blk_zone);
+ hdr->nr_zones--;
--- /dev/null
+From beb9caac211c1be1bc118bb62d5cf09c4107e6a5 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Wed, 10 Oct 2018 12:01:55 -0400
+Subject: dm linear: eliminate linear_end_io call if CONFIG_DM_ZONED disabled
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit beb9caac211c1be1bc118bb62d5cf09c4107e6a5 upstream.
+
+It is best to avoid any extra overhead associated with bio completion.
+DM core will indirectly call a DM target's .end_io if it is defined.
+In the case of DM linear, there is no need to do so (for every bio that
+completes) if CONFIG_DM_ZONED is not enabled.
+
+Avoiding an extra indirect call for every bio completion is very
+important for ensuring DM linear doesn't incur more overhead that
+further widens the performance gap between dm-linear and raw block
+devices.
+
+Fixes: 0be12c1c7fce7 ("dm linear: add support for zoned block devices")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-linear.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -101,6 +101,7 @@ static int linear_map(struct dm_target *
+ return DM_MAPIO_REMAPPED;
+ }
+
++#ifdef CONFIG_DM_ZONED
+ static int linear_end_io(struct dm_target *ti, struct bio *bio,
+ blk_status_t *error)
+ {
+@@ -111,6 +112,7 @@ static int linear_end_io(struct dm_targe
+
+ return DM_ENDIO_DONE;
+ }
++#endif
+
+ static void linear_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+@@ -187,12 +189,16 @@ static size_t linear_dax_copy_from_iter(
+ static struct target_type linear_target = {
+ .name = "linear",
+ .version = {1, 4, 0},
++#ifdef CONFIG_DM_ZONED
++ .end_io = linear_end_io,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
++#else
++ .features = DM_TARGET_PASSES_INTEGRITY,
++#endif
+ .module = THIS_MODULE,
+ .ctr = linear_ctr,
+ .dtr = linear_dtr,
+ .map = linear_map,
+- .end_io = linear_end_io,
+ .status = linear_status,
+ .prepare_ioctl = linear_prepare_ioctl,
+ .iterate_devices = linear_iterate_devices,
--- /dev/null
+From 118aa47c7072bce05fc39bd40a1c0a90caed72ab Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Thu, 11 Oct 2018 11:45:30 +0900
+Subject: dm linear: fix linear_end_io conditional definition
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 118aa47c7072bce05fc39bd40a1c0a90caed72ab upstream.
+
+The dm-linear target is independent of the dm-zoned target. For code
+requiring support for zoned block devices, use CONFIG_BLK_DEV_ZONED
+instead of CONFIG_DM_ZONED.
+
+While at it, similarly to dm linear, also enable the DM_TARGET_ZONED_HM
+feature in dm-flakey only if CONFIG_BLK_DEV_ZONED is defined.
+
+Fixes: beb9caac211c1 ("dm linear: eliminate linear_end_io call if CONFIG_DM_ZONED disabled")
+Fixes: 0be12c1c7fce7 ("dm linear: add support for zoned block devices")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-flakey.c | 2 ++
+ drivers/md/dm-linear.c | 4 ++--
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -463,7 +463,9 @@ static int flakey_iterate_devices(struct
+ static struct target_type flakey_target = {
+ .name = "flakey",
+ .version = {1, 5, 0},
++#ifdef CONFIG_BLK_DEV_ZONED
+ .features = DM_TARGET_ZONED_HM,
++#endif
+ .module = THIS_MODULE,
+ .ctr = flakey_ctr,
+ .dtr = flakey_dtr,
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -101,7 +101,7 @@ static int linear_map(struct dm_target *
+ return DM_MAPIO_REMAPPED;
+ }
+
+-#ifdef CONFIG_DM_ZONED
++#ifdef CONFIG_BLK_DEV_ZONED
+ static int linear_end_io(struct dm_target *ti, struct bio *bio,
+ blk_status_t *error)
+ {
+@@ -189,7 +189,7 @@ static size_t linear_dax_copy_from_iter(
+ static struct target_type linear_target = {
+ .name = "linear",
+ .version = {1, 4, 0},
+-#ifdef CONFIG_DM_ZONED
++#ifdef CONFIG_BLK_DEV_ZONED
+ .end_io = linear_end_io,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+ #else
--- /dev/null
+From 76ebebd2464c5c8a4453c98b6dbf9c95a599e810 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 17 Aug 2018 15:19:37 -0400
+Subject: mach64: detect the dot clock divider correctly on sparc
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 76ebebd2464c5c8a4453c98b6dbf9c95a599e810 upstream.
+
+On Sun Ultra 5, it happens that the dot clock is not set up properly for
+some videomodes. For example, if we set the videomode "r1024x768x60" in
+the firmware, Linux would incorrectly set a videomode with refresh rate
+180Hz when booting (suprisingly, my LCD monitor can display it, although
+display quality is very low).
+
+The reason is this: Older mach64 cards set the divider in the register
+VCLK_POST_DIV. The register has four 2-bit fields (the field that is
+actually used is specified in the lowest two bits of the register
+CLOCK_CNTL). The 2 bits select divider "1, 2, 4, 8". On newer mach64 cards,
+there's another bit added - the top four bits of PLL_EXT_CNTL extend the
+divider selection, so we have possible dividers "1, 2, 4, 8, 3, 5, 6, 12".
+The Linux driver clears the top four bits of PLL_EXT_CNTL and never sets
+them, so it can work regardless if the card supports them. However, the
+sparc64 firmware may set these extended dividers during boot - and the
+mach64 driver detects incorrect dot clock in this case.
+
+This patch makes the driver read the additional divider bit from
+PLL_EXT_CNTL and calculate the initial refresh rate properly.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Acked-by: David S. Miller <davem@davemloft.net>
+Reviewed-by: Ville Syrjälä <syrjala@sci.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/fbdev/aty/atyfb.h | 3 ++-
+ drivers/video/fbdev/aty/atyfb_base.c | 7 ++++---
+ drivers/video/fbdev/aty/mach64_ct.c | 10 +++++-----
+ 3 files changed, 11 insertions(+), 9 deletions(-)
+
+--- a/drivers/video/fbdev/aty/atyfb.h
++++ b/drivers/video/fbdev/aty/atyfb.h
+@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_
+ extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
+ extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+
++extern const u8 aty_postdividers[8];
++
+
+ /*
+ * Hardware cursor support
+@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct
+
+ extern void aty_reset_engine(const struct atyfb_par *par);
+ extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
+-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+
+ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_
+ /*
+ * PLL Reference Divider M:
+ */
+- M = pll_regs[2];
++ M = pll_regs[PLL_REF_DIV];
+
+ /*
+ * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
+ */
+- N = pll_regs[7 + (clock_cntl & 3)];
++ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
+
+ /*
+ * PLL Post Divider P (Dependent on CLOCK_CNTL):
+ */
+- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
++ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
++ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
+
+ /*
+ * PLL Divider Q:
+--- a/drivers/video/fbdev/aty/mach64_ct.c
++++ b/drivers/video/fbdev/aty/mach64_ct.c
+@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8
+ */
+
+ #define Maximum_DSP_PRECISION 7
+-static u8 postdividers[] = {1,2,4,8,3};
++const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
+
+ static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
+ {
+@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct
+ pll->vclk_post_div += (q < 64*8);
+ pll->vclk_post_div += (q < 32*8);
+ }
+- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
++ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
+ // pll->vclk_post_div <<= 6;
+ pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
+ pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
+@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct
+ u8 mclk_fb_div, pll_ext_cntl;
+ pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
+ pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
+- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
++ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
+ mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
+ if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
+ mclk_fb_div <<= 1;
+@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct
+ xpost_div += (q < 64*8);
+ xpost_div += (q < 32*8);
+ }
+- pll->ct.xclk_post_div_real = postdividers[xpost_div];
++ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
+ pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
+
+ #ifdef CONFIG_PPC
+@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct
+ mpost_div += (q < 64*8);
+ mpost_div += (q < 32*8);
+ }
+- sclk_post_div_real = postdividers[mpost_div];
++ sclk_post_div_real = aty_postdividers[mpost_div];
+ pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
+ pll->ct.spll_cntl2 = mpost_div << 4;
+ #ifdef DEBUG
--- /dev/null
+From ea7e0480a4b695d0aa6b3fa99bd658a003122113 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Tue, 25 Sep 2018 15:51:26 -0700
+Subject: MIPS: VDSO: Always map near top of user memory
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit ea7e0480a4b695d0aa6b3fa99bd658a003122113 upstream.
+
+When using the legacy mmap layout, for example triggered using ulimit -s
+unlimited, get_unmapped_area() fills memory from bottom to top starting
+from a fairly low address near TASK_UNMAPPED_BASE.
+
+This placement is suboptimal if the user application wishes to allocate
+large amounts of heap memory using the brk syscall. With the VDSO being
+located low in the user's virtual address space, the amount of space
+available for access using brk is limited much more than it was prior to
+the introduction of the VDSO.
+
+For example:
+
+ # ulimit -s unlimited; cat /proc/self/maps
+ 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils
+ 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils
+ 004fd000-0050f000 rwxp 00000000 00:00 0
+ 00cc3000-00ce4000 rwxp 00000000 00:00 0 [heap]
+ 2ab96000-2ab98000 r--p 00000000 00:00 0 [vvar]
+ 2ab98000-2ab99000 r-xp 00000000 00:00 0 [vdso]
+ 2ab99000-2ab9d000 rwxp 00000000 00:00 0
+ ...
+
+Resolve this by adjusting STACK_TOP to reserve space for the VDSO &
+providing an address hint to get_unmapped_area() causing it to use this
+space even when using the legacy mmap layout.
+
+We reserve enough space for the VDSO, plus 1MB or 256MB for 32 bit & 64
+bit systems respectively within which we randomize the VDSO base
+address. Previously this randomization was taken care of by the mmap
+base address randomization performed by arch_mmap_rnd(). The 1MB & 256MB
+sizes are somewhat arbitrary but chosen such that we have some
+randomization without taking up too much of the user's virtual address
+space, which is often in short supply for 32 bit systems.
+
+With this the VDSO is always mapped at a high address, leaving lots of
+space for statically linked programs to make use of brk:
+
+ # ulimit -s unlimited; cat /proc/self/maps
+ 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils
+ 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils
+ 004fd000-0050f000 rwxp 00000000 00:00 0
+ 00c28000-00c49000 rwxp 00000000 00:00 0 [heap]
+ ...
+ 7f67c000-7f69d000 rwxp 00000000 00:00 0 [stack]
+ 7f7fc000-7f7fd000 rwxp 00000000 00:00 0
+ 7fcf1000-7fcf3000 r--p 00000000 00:00 0 [vvar]
+ 7fcf3000-7fcf4000 r-xp 00000000 00:00 0 [vdso]
+
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Reported-by: Huacai Chen <chenhc@lemote.com>
+Fixes: ebb5e78cc634 ("MIPS: Initial implementation of a VDSO")
+Cc: Huacai Chen <chenhc@lemote.com>
+Cc: linux-mips@linux-mips.org
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/processor.h | 10 +++++-----
+ arch/mips/kernel/process.c | 25 +++++++++++++++++++++++++
+ arch/mips/kernel/vdso.c | 18 +++++++++++++++++-
+ 3 files changed, 47 insertions(+), 6 deletions(-)
+
+--- a/arch/mips/include/asm/processor.h
++++ b/arch/mips/include/asm/processor.h
+@@ -13,6 +13,7 @@
+
+ #include <linux/atomic.h>
+ #include <linux/cpumask.h>
++#include <linux/sizes.h>
+ #include <linux/threads.h>
+
+ #include <asm/cachectl.h>
+@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_cou
+
+ #endif
+
+-/*
+- * One page above the stack is used for branch delay slot "emulation".
+- * See dsemul.c for details.
+- */
+-#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
++#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
++
++extern unsigned long mips_stack_top(void);
++#define STACK_TOP mips_stack_top()
+
+ /*
+ * This decides where the kernel will search for a free chunk of vm
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -31,6 +31,7 @@
+ #include <linux/prctl.h>
+ #include <linux/nmi.h>
+
++#include <asm/abi.h>
+ #include <asm/asm.h>
+ #include <asm/bootinfo.h>
+ #include <asm/cpu.h>
+@@ -38,6 +39,7 @@
+ #include <asm/dsp.h>
+ #include <asm/fpu.h>
+ #include <asm/irq.h>
++#include <asm/mips-cps.h>
+ #include <asm/msa.h>
+ #include <asm/pgtable.h>
+ #include <asm/mipsregs.h>
+@@ -644,6 +646,29 @@ out:
+ return pc;
+ }
+
++unsigned long mips_stack_top(void)
++{
++ unsigned long top = TASK_SIZE & PAGE_MASK;
++
++ /* One page for branch delay slot "emulation" */
++ top -= PAGE_SIZE;
++
++ /* Space for the VDSO, data page & GIC user page */
++ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
++ top -= PAGE_SIZE;
++ top -= mips_gic_present() ? PAGE_SIZE : 0;
++
++ /* Space for cache colour alignment */
++ if (cpu_has_dc_aliases)
++ top -= shm_align_mask + 1;
++
++ /* Space to randomize the VDSO base */
++ if (current->flags & PF_RANDOMIZE)
++ top -= VDSO_RANDOMIZE_SIZE;
++
++ return top;
++}
++
+ /*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+--- a/arch/mips/kernel/vdso.c
++++ b/arch/mips/kernel/vdso.c
+@@ -15,6 +15,7 @@
+ #include <linux/ioport.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
++#include <linux/random.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/timekeeper_internal.h>
+@@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
+ }
+ }
+
++static unsigned long vdso_base(void)
++{
++ unsigned long base;
++
++ /* Skip the delay slot emulation page */
++ base = STACK_TOP + PAGE_SIZE;
++
++ if (current->flags & PF_RANDOMIZE) {
++ base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
++ base = PAGE_ALIGN(base);
++ }
++
++ return base;
++}
++
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ struct mips_vdso_image *image = current->thread.abi->vdso;
+@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct l
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
+- base = get_unmapped_area(NULL, 0, size, 0, 0);
++ base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
--- /dev/null
+From 4628a64591e6cee181237060961e98c615c33966 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 9 Oct 2018 12:19:17 +0200
+Subject: mm: Preserve _PAGE_DEVMAP across mprotect() calls
+
+From: Jan Kara <jack@suse.cz>
+
+commit 4628a64591e6cee181237060961e98c615c33966 upstream.
+
+Currently _PAGE_DEVMAP bit is not preserved in mprotect(2) calls. As a
+result we will see warnings such as:
+
+BUG: Bad page map in process JobWrk0013 pte:800001803875ea25 pmd:7624381067
+addr:00007f0930720000 vm_flags:280000f9 anon_vma: (null) mapping:ffff97f2384056f0 index:0
+file:457-000000fe00000030-00000009-000000ca-00000001_2001.fileblock fault:xfs_filemap_fault [xfs] mmap:xfs_file_mmap [xfs] readpage: (null)
+CPU: 3 PID: 15848 Comm: JobWrk0013 Tainted: G W 4.12.14-2.g7573215-default #1 SLE12-SP4 (unreleased)
+Hardware name: Intel Corporation S2600WFD/S2600WFD, BIOS SE5C620.86B.01.00.0833.051120182255 05/11/2018
+Call Trace:
+ dump_stack+0x5a/0x75
+ print_bad_pte+0x217/0x2c0
+ ? enqueue_task_fair+0x76/0x9f0
+ _vm_normal_page+0xe5/0x100
+ zap_pte_range+0x148/0x740
+ unmap_page_range+0x39a/0x4b0
+ unmap_vmas+0x42/0x90
+ unmap_region+0x99/0xf0
+ ? vma_gap_callbacks_rotate+0x1a/0x20
+ do_munmap+0x255/0x3a0
+ vm_munmap+0x54/0x80
+ SyS_munmap+0x1d/0x30
+ do_syscall_64+0x74/0x150
+ entry_SYSCALL_64_after_hwframe+0x3d/0xa2
+...
+
+when mprotect(2) gets used on DAX mappings. Also there is a wide variety
+of other failures that can result from the missing _PAGE_DEVMAP flag
+when the area gets used by get_user_pages() later.
+
+Fix the problem by including _PAGE_DEVMAP in a set of flags that get
+preserved by mprotect(2).
+
+Fixes: 69660fd797c3 ("x86, mm: introduce _PAGE_DEVMAP")
+Fixes: ebd31197931d ("powerpc/mm: Add devmap support for ppc64")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/book3s/64/pgtable.h | 4 ++--
+ arch/x86/include/asm/pgtable_types.h | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
+@@ -102,7 +102,7 @@
+ */
+ #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
+- _PAGE_SOFT_DIRTY)
++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ /*
+ * user access blocked by key
+ */
+@@ -120,7 +120,7 @@
+ */
+ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
+- _PAGE_SOFT_DIRTY)
++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ /*
+ * Mask of bits returned by pte_pgprot()
+ */
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -124,7 +124,7 @@
+ */
+ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
+- _PAGE_SOFT_DIRTY)
++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
+
+ /*
--- /dev/null
+From bfba8e5cf28f413aa05571af493871d74438979f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= <jglisse@redhat.com>
+Date: Fri, 12 Oct 2018 21:34:36 -0700
+Subject: mm/thp: fix call to mmu_notifier in set_pmd_migration_entry() v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jérôme Glisse <jglisse@redhat.com>
+
+commit bfba8e5cf28f413aa05571af493871d74438979f upstream.
+
+Inside set_pmd_migration_entry() we are holding page table locks and thus
+we can not sleep so we can not call invalidate_range_start/end()
+
+So remove call to mmu_notifier_invalidate_range_start/end() because they
+are call inside the function calling set_pmd_migration_entry() (see
+try_to_unmap_one()).
+
+Link: http://lkml.kernel.org/r/20181012181056.7864-1-jglisse@redhat.com
+Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
+Reported-by: Andrea Arcangeli <aarcange@redhat.com>
+Reviewed-by: Zi Yan <zi.yan@cs.rutgers.edu>
+Acked-by: Michal Hocko <mhocko@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: David Nellans <dnellans@nvidia.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2843,9 +2843,6 @@ void set_pmd_migration_entry(struct page
+ if (!(pvmw->pmd && !pvmw->pte))
+ return;
+
+- mmu_notifier_invalidate_range_start(mm, address,
+- address + HPAGE_PMD_SIZE);
+-
+ flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
+ pmdval = *pvmw->pmd;
+ pmdp_invalidate(vma, address, pvmw->pmd);
+@@ -2858,9 +2855,6 @@ void set_pmd_migration_entry(struct page
+ set_pmd_at(mm, address, pvmw->pmd, pmdswp);
+ page_remove_rmap(page, true);
+ put_page(page);
+-
+- mmu_notifier_invalidate_range_end(mm, address,
+- address + HPAGE_PMD_SIZE);
+ }
+
+ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
--- /dev/null
+From 28e2c4bb99aa40f9d5f07ac130cbc4da0ea93079 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Fri, 5 Oct 2018 15:52:03 -0700
+Subject: mm/vmstat.c: fix outdated vmstat_text
+
+From: Jann Horn <jannh@google.com>
+
+commit 28e2c4bb99aa40f9d5f07ac130cbc4da0ea93079 upstream.
+
+7a9cdebdcc17 ("mm: get rid of vmacache_flush_all() entirely") removed the
+VMACACHE_FULL_FLUSHES statistics, but didn't remove the corresponding
+entry in vmstat_text. This causes an out-of-bounds access in
+vmstat_show().
+
+Luckily this only affects kernels with CONFIG_DEBUG_VM_VMACACHE=y, which
+is probably very rare.
+
+Link: http://lkml.kernel.org/r/20181001143138.95119-1-jannh@google.com
+Fixes: 7a9cdebdcc17 ("mm: get rid of vmacache_flush_all() entirely")
+Signed-off-by: Jann Horn <jannh@google.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Roman Gushchin <guro@fb.com>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Christoph Lameter <clameter@sgi.com>
+Cc: Kemi Wang <kemi.wang@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmstat.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1214,7 +1214,6 @@ const char * const vmstat_text[] = {
+ #ifdef CONFIG_DEBUG_VM_VMACACHE
+ "vmacache_find_calls",
+ "vmacache_find_hits",
+- "vmacache_full_flushes",
+ #endif
+ #ifdef CONFIG_SWAP
+ "swap_ra",
--- /dev/null
+From 41591b38f5f8f78344954b68582b5f00e56ffe61 Mon Sep 17 00:00:00 2001
+From: Chris Boot <bootc@bootc.net>
+Date: Mon, 8 Oct 2018 17:07:30 +0200
+Subject: mmc: block: avoid multiblock reads for the last sector in SPI mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Boot <bootc@bootc.net>
+
+commit 41591b38f5f8f78344954b68582b5f00e56ffe61 upstream.
+
+On some SD cards over SPI, reading with the multiblock read command the last
+sector will leave the card in a bad state.
+
+Remove last sectors from the multiblock reading cmd.
+
+Signed-off-by: Chris Boot <bootc@bootc.net>
+Signed-off-by: Clément Péron <peron.clem@gmail.com>
+Cc: stable@vger.kernel.org # v4.10+
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/block.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1614,6 +1614,16 @@ static void mmc_blk_data_prep(struct mmc
+
+ if (brq->data.blocks > 1) {
+ /*
++ * Some SD cards in SPI mode return a CRC error or even lock up
++ * completely when trying to read the last block using a
++ * multiblock read command.
++ */
++ if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
++ (blk_rq_pos(req) + blk_rq_sectors(req) ==
++ get_capacity(md->disk)))
++ brq->data.blocks--;
++
++ /*
+ * After a read error, we redo the request one sector
+ * at a time in order to accurately determine which
+ * sectors can be read successfully.
--- /dev/null
+From 6685b357363bfe295e3ae73665014db4aed62c58 Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Date: Sun, 7 Oct 2018 11:31:51 +0300
+Subject: percpu: stop leaking bitmap metadata blocks
+
+From: Mike Rapoport <rppt@linux.vnet.ibm.com>
+
+commit 6685b357363bfe295e3ae73665014db4aed62c58 upstream.
+
+The commit ca460b3c9627 ("percpu: introduce bitmap metadata blocks")
+introduced bitmap metadata blocks. These metadata blocks are allocated
+whenever a new chunk is created, but they are never freed. Fix it.
+
+Fixes: ca460b3c9627 ("percpu: introduce bitmap metadata blocks")
+Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dennis Zhou <dennis@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/percpu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1208,6 +1208,7 @@ static void pcpu_free_chunk(struct pcpu_
+ {
+ if (!chunk)
+ return;
++ pcpu_mem_free(chunk->md_blocks);
+ pcpu_mem_free(chunk->bound_map);
+ pcpu_mem_free(chunk->alloc_map);
+ pcpu_mem_free(chunk);
--- /dev/null
+From 25e11700b54c7b6b5ebfc4361981dae12299557b Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Tue, 11 Sep 2018 14:45:03 +0300
+Subject: perf script python: Fix export-to-postgresql.py occasional failure
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 25e11700b54c7b6b5ebfc4361981dae12299557b upstream.
+
+Occasional export failures were found to be caused by truncating 64-bit
+pointers to 32-bits. Fix by explicitly setting types for all ctype
+arguments and results.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20180911114504.28516-2-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/scripts/python/export-to-postgresql.py | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/tools/perf/scripts/python/export-to-postgresql.py
++++ b/tools/perf/scripts/python/export-to-postgresql.py
+@@ -204,14 +204,23 @@ from ctypes import *
+ libpq = CDLL("libpq.so.5")
+ PQconnectdb = libpq.PQconnectdb
+ PQconnectdb.restype = c_void_p
++PQconnectdb.argtypes = [ c_char_p ]
+ PQfinish = libpq.PQfinish
++PQfinish.argtypes = [ c_void_p ]
+ PQstatus = libpq.PQstatus
++PQstatus.restype = c_int
++PQstatus.argtypes = [ c_void_p ]
+ PQexec = libpq.PQexec
+ PQexec.restype = c_void_p
++PQexec.argtypes = [ c_void_p, c_char_p ]
+ PQresultStatus = libpq.PQresultStatus
++PQresultStatus.restype = c_int
++PQresultStatus.argtypes = [ c_void_p ]
+ PQputCopyData = libpq.PQputCopyData
++PQputCopyData.restype = c_int
+ PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
+ PQputCopyEnd = libpq.PQputCopyEnd
++PQputCopyEnd.restype = c_int
+ PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
+
+ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
--- /dev/null
+From d005efe18db0b4a123dd92ea8e77e27aee8f99fd Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Tue, 11 Sep 2018 14:45:04 +0300
+Subject: perf script python: Fix export-to-sqlite.py sample columns
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit d005efe18db0b4a123dd92ea8e77e27aee8f99fd upstream.
+
+With the "branches" export option, not all sample columns are exported.
+However the unwanted columns are not at the end of the tuple, as assumed
+by the code. Fix by taking the first 15 and last 3 values, instead of
+the first 18.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20180911114504.28516-3-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/scripts/python/export-to-sqlite.py | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/tools/perf/scripts/python/export-to-sqlite.py
++++ b/tools/perf/scripts/python/export-to-sqlite.py
+@@ -440,7 +440,11 @@ def branch_type_table(*x):
+
+ def sample_table(*x):
+ if branches:
+- bind_exec(sample_query, 18, x)
++ for xx in x[0:15]:
++ sample_query.addBindValue(str(xx))
++ for xx in x[19:22]:
++ sample_query.addBindValue(str(xx))
++ do_query_(sample_query)
+ else:
+ bind_exec(sample_query, 22, x)
+
--- /dev/null
+From f259f896f2348f0302f6f88d4382378cf9d23a7e Mon Sep 17 00:00:00 2001
+From: Marco Felsch <m.felsch@pengutronix.de>
+Date: Tue, 2 Oct 2018 10:06:46 +0200
+Subject: pinctrl: mcp23s08: fix irq and irqchip setup order
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marco Felsch <m.felsch@pengutronix.de>
+
+commit f259f896f2348f0302f6f88d4382378cf9d23a7e upstream.
+
+Since 'commit 02e389e63e35 ("pinctrl: mcp23s08: fix irq setup order")' the
+irq request isn't the last devm_* allocation. Without a deeper look at
+the irq and testing this isn't a good solution. Since this driver relies
+on the devm mechanism, requesting a interrupt should be the last thing
+to avoid memory corruptions during unbinding.
+
+'Commit 02e389e63e35 ("pinctrl: mcp23s08: fix irq setup order")' fixed the
+order for the interrupt-controller use case only. The
+mcp23s08_irq_setup() must be split into two to fix it for the
+interrupt-controller use case and to register the irq at last. So the
+irq will be freed first during unbind.
+
+Cc: stable@vger.kernel.org
+Cc: Jan Kundrát <jan.kundrat@cesnet.cz>
+Cc: Dmitry Mastykin <mastichi@gmail.com>
+Cc: Sebastian Reichel <sebastian.reichel@collabora.co.uk>
+Fixes: 82039d244f87 ("pinctrl: mcp23s08: add pinconf support")
+Fixes: 02e389e63e35 ("pinctrl: mcp23s08: fix irq setup order")
+Signed-off-by: Marco Felsch <m.felsch@pengutronix.de>
+Tested-by: Phil Reid <preid@electromag.com.au>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/pinctrl-mcp23s08.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -643,6 +643,14 @@ static int mcp23s08_irq_setup(struct mcp
+ return err;
+ }
+
++ return 0;
++}
++
++static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
++{
++ struct gpio_chip *chip = &mcp->chip;
++ int err;
++
+ err = gpiochip_irqchip_add_nested(chip,
+ &mcp23s08_irq_chip,
+ 0,
+@@ -907,7 +915,7 @@ static int mcp23s08_probe_one(struct mcp
+ }
+
+ if (mcp->irq && mcp->irq_controller) {
+- ret = mcp23s08_irq_setup(mcp);
++ ret = mcp23s08_irqchip_setup(mcp);
+ if (ret)
+ goto fail;
+ }
+@@ -932,6 +940,9 @@ static int mcp23s08_probe_one(struct mcp
+ goto fail;
+ }
+
++ if (mcp->irq)
++ ret = mcp23s08_irq_setup(mcp);
++
+ fail:
+ if (ret < 0)
+ dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
--- /dev/null
+From 24abf2901b18bf941b9f21ea2ce5791f61097ae4 Mon Sep 17 00:00:00 2001
+From: Eric Farman <farman@linux.ibm.com>
+Date: Tue, 2 Oct 2018 03:02:35 +0200
+Subject: s390/cio: Fix how vfio-ccw checks pinned pages
+
+From: Eric Farman <farman@linux.ibm.com>
+
+commit 24abf2901b18bf941b9f21ea2ce5791f61097ae4 upstream.
+
+We have two nested loops to check the entries within the pfn_array_table
+arrays. But we mistakenly use the outer array as an index in our check,
+and completely ignore the indexing performed by the inner loop.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Farman <farman@linux.ibm.com>
+Message-Id: <20181002010235.42483-1-farman@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/cio/vfio_ccw_cp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -172,7 +172,7 @@ static bool pfn_array_table_iova_pinned(
+
+ for (i = 0; i < pat->pat_nr; i++, pa++)
+ for (j = 0; j < pa->pa_nr; j++)
+- if (pa->pa_iova_pfn[i] == iova_pfn)
++ if (pa->pa_iova_pfn[j] == iova_pfn)
+ return true;
+
+ return false;
clk-x86-stop-marking-clocks-as-clk_is_critical.patch
x86-kvm-lapic-always-disable-mmio-interface-in-x2apic-mode.patch
drm-amdgpu-fix-sdma-hqd-destroy-error-on-gfx_v7.patch
+mm-vmstat.c-fix-outdated-vmstat_text.patch
+mips-vdso-always-map-near-top-of-user-memory.patch
+mach64-detect-the-dot-clock-divider-correctly-on-sparc.patch
+percpu-stop-leaking-bitmap-metadata-blocks.patch
+perf-script-python-fix-export-to-postgresql.py-occasional-failure.patch
+perf-script-python-fix-export-to-sqlite.py-sample-columns.patch
+s390-cio-fix-how-vfio-ccw-checks-pinned-pages.patch
+dm-cache-destroy-migration_cache-if-cache-target-registration-failed.patch
+dm-fix-report-zone-remapping-to-account-for-partition-offset.patch
+dm-linear-eliminate-linear_end_io-call-if-config_dm_zoned-disabled.patch
+dm-linear-fix-linear_end_io-conditional-definition.patch
+cgroup-fix-dom_cgrp-propagation-when-enabling-threaded-mode.patch
+mmc-block-avoid-multiblock-reads-for-the-last-sector-in-spi-mode.patch
+pinctrl-mcp23s08-fix-irq-and-irqchip-setup-order.patch
+arm64-perf-reject-stand-alone-chain-events-for-pmuv3.patch
+mm-thp-fix-call-to-mmu_notifier-in-set_pmd_migration_entry-v2.patch
+mm-preserve-_page_devmap-across-mprotect-calls.patch