--- /dev/null
+From a06393ed03167771246c4c43192d9c264bc48412 Mon Sep 17 00:00:00 2001
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Wed, 18 Jan 2017 21:30:51 +0100
+Subject: can: bcm: fix hrtimer/tasklet termination in bcm op removal
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+commit a06393ed03167771246c4c43192d9c264bc48412 upstream.
+
+When removing a bcm tx operation either a hrtimer or a tasklet might run.
+As the hrtimer triggers its associated tasklet and vice versa we need to
+take care to mutually terminate both handlers.
+
+Reported-by: Michael Josenhans <michael.josenhans@web.de>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Tested-by: Michael Josenhans <michael.josenhans@web.de>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/can/bcm.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct
+
+ static void bcm_remove_op(struct bcm_op *op)
+ {
+- hrtimer_cancel(&op->timer);
+- hrtimer_cancel(&op->thrtimer);
++ if (op->tsklet.func) {
++ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
++ hrtimer_active(&op->timer)) {
++ hrtimer_cancel(&op->timer);
++ tasklet_kill(&op->tsklet);
++ }
++ }
+
+- if (op->tsklet.func)
+- tasklet_kill(&op->tsklet);
+-
+- if (op->thrtsklet.func)
+- tasklet_kill(&op->thrtsklet);
++ if (op->thrtsklet.func) {
++ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
++ hrtimer_active(&op->thrtimer)) {
++ hrtimer_cancel(&op->thrtimer);
++ tasklet_kill(&op->thrtsklet);
++ }
++ }
+
+ if ((op->frames) && (op->frames != &op->sframe))
+ kfree(op->frames);
--- /dev/null
+From 07cd12945551b63ecb1a349d50a6d69d1d6feb4a Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 26 Jan 2017 16:47:28 -0500
+Subject: cgroup: don't online subsystems before cgroup_name/path() are operational
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 07cd12945551b63ecb1a349d50a6d69d1d6feb4a upstream.
+
+While refactoring cgroup creation, a5bca2152036 ("cgroup: factor out
+cgroup_create() out of cgroup_mkdir()") incorrectly onlined subsystems
+before the new cgroup is associated with it kernfs_node. This is fine
+for cgroup proper but cgroup_name/path() depend on the associated
+kernfs_node and if a subsystem makes the new cgroup_subsys_state
+visible, which they're allowed to after onlining, it can lead to NULL
+dereference.
+
+The current code performs cgroup creation and subsystem onlining in
+cgroup_create() and cgroup_mkdir() makes the cgroup and subsystems
+visible afterwards. There's no reason to online the subsystems early
+and we can simply drop cgroup_apply_control_enable() call from
+cgroup_create() so that the subsystems are onlined and made visible at
+the same time.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Fixes: a5bca2152036 ("cgroup: factor out cgroup_create() out of cgroup_mkdir()")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5219,6 +5219,11 @@ err_free_css:
+ return ERR_PTR(err);
+ }
+
++/*
++ * The returned cgroup is fully initialized including its control mask, but
++ * it isn't associated with its kernfs_node and doesn't have the control
++ * mask applied.
++ */
+ static struct cgroup *cgroup_create(struct cgroup *parent)
+ {
+ struct cgroup_root *root = parent->root;
+@@ -5283,11 +5288,6 @@ static struct cgroup *cgroup_create(stru
+
+ cgroup_propagate_control(cgrp);
+
+- /* @cgrp doesn't have dir yet so the following will only create csses */
+- ret = cgroup_apply_control_enable(cgrp);
+- if (ret)
+- goto out_destroy;
+-
+ return cgrp;
+
+ out_cancel_ref:
+@@ -5295,9 +5295,6 @@ out_cancel_ref:
+ out_free_cgrp:
+ kfree(cgrp);
+ return ERR_PTR(ret);
+-out_destroy:
+- cgroup_destroy_locked(cgrp);
+- return ERR_PTR(ret);
+ }
+
+ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
--- /dev/null
+From 161e6d44a5e2d3f85365cb717d60e363171b39e6 Mon Sep 17 00:00:00 2001
+From: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
+Date: Mon, 16 Jan 2017 12:23:42 -0200
+Subject: mmc: sdhci: Ignore unexpected CARD_INT interrupts
+
+From: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
+
+commit 161e6d44a5e2d3f85365cb717d60e363171b39e6 upstream.
+
+One of our kernelCI boxes hanged at boot because a faulty eSDHC device
+was triggering spurious CARD_INT interrupts for SD cards, causing CMD52
+reads, which are not allowed for SD devices. This adds a sanity check
+to the interruption path, preventing that illegal command from getting
+sent if the CARD_INT interruption should be disabled.
+
+This quirk allows that particular machine to resume boot despite the
+faulty hardware, instead of getting hung dealing with thousands of
+mishandled interrupts.
+
+Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2719,7 +2719,8 @@ static irqreturn_t sdhci_irq(int irq, vo
+ if (intmask & SDHCI_INT_RETUNE)
+ mmc_retune_needed(host->mmc);
+
+- if (intmask & SDHCI_INT_CARD_INT) {
++ if ((intmask & SDHCI_INT_CARD_INT) &&
++ (host->ier & SDHCI_INT_CARD_INT)) {
+ sdhci_enable_sdio_irq_nolock(host, false);
+ host->thread_isr |= SDHCI_INT_CARD_INT;
+ result = IRQ_WAKE_THREAD;
--- /dev/null
+From 966d2b04e070bc040319aaebfec09e0144dc3341 Mon Sep 17 00:00:00 2001
+From: Douglas Miller <dougmill@linux.vnet.ibm.com>
+Date: Sat, 28 Jan 2017 06:42:20 -0600
+Subject: percpu-refcount: fix reference leak during percpu-atomic transition
+
+From: Douglas Miller <dougmill@linux.vnet.ibm.com>
+
+commit 966d2b04e070bc040319aaebfec09e0144dc3341 upstream.
+
+percpu_ref_tryget() and percpu_ref_tryget_live() should return
+"true" IFF they acquire a reference. But the return value from
+atomic_long_inc_not_zero() is a long and may have high bits set,
+e.g. PERCPU_COUNT_BIAS, and the return value of the tryget routines
+is bool so the reference may actually be acquired but the routines
+return "false" which results in a reference leak since the caller
+assumes it does not need to do a corresponding percpu_ref_put().
+
+This was seen when performing CPU hotplug during I/O, as hangs in
+blk_mq_freeze_queue_wait where percpu_ref_kill (blk_mq_freeze_queue_start)
+raced with percpu_ref_tryget (blk_mq_timeout_work).
+Sample stack trace:
+
+__switch_to+0x2c0/0x450
+__schedule+0x2f8/0x970
+schedule+0x48/0xc0
+blk_mq_freeze_queue_wait+0x94/0x120
+blk_mq_queue_reinit_work+0xb8/0x180
+blk_mq_queue_reinit_prepare+0x84/0xa0
+cpuhp_invoke_callback+0x17c/0x600
+cpuhp_up_callbacks+0x58/0x150
+_cpu_up+0xf0/0x1c0
+do_cpu_up+0x120/0x150
+cpu_subsys_online+0x64/0xe0
+device_online+0xb4/0x120
+online_store+0xb4/0xc0
+dev_attr_store+0x68/0xa0
+sysfs_kf_write+0x80/0xb0
+kernfs_fop_write+0x17c/0x250
+__vfs_write+0x6c/0x1e0
+vfs_write+0xd0/0x270
+SyS_write+0x6c/0x110
+system_call+0x38/0xe0
+
+Examination of the queue showed a single reference (no PERCPU_COUNT_BIAS,
+and __PERCPU_REF_DEAD, __PERCPU_REF_ATOMIC set) and no requests.
+However, conditions at the time of the race are count of PERCPU_COUNT_BIAS + 0
+and __PERCPU_REF_DEAD and __PERCPU_REF_ATOMIC set.
+
+The fix is to make the tryget routines use an actual boolean internally instead
+of the atomic long result truncated to a int.
+
+Fixes: e625305b3907 percpu-refcount: make percpu_ref based on longs instead of ints
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=190751
+Signed-off-by: Douglas Miller <dougmill@linux.vnet.ibm.com>
+Reviewed-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Fixes: e625305b3907 ("percpu-refcount: make percpu_ref based on longs instead of ints")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/percpu-refcount.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/percpu-refcount.h
++++ b/include/linux/percpu-refcount.h
+@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct
+ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ {
+ unsigned long __percpu *percpu_count;
+- int ret;
++ bool ret;
+
+ rcu_read_lock_sched();
+
+@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(str
+ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
+ {
+ unsigned long __percpu *percpu_count;
+- int ret = false;
++ bool ret = false;
+
+ rcu_read_lock_sched();
+
--- /dev/null
+From d0e287a401d9acf67b75180b26e2d62b7d482652 Mon Sep 17 00:00:00 2001
+From: Rask Ingemann Lambertsen <rask@formelder.dk>
+Date: Sat, 21 Jan 2017 17:11:43 +0100
+Subject: regulator: axp20x: AXP806: Fix dcdcb being set instead of dcdce
+
+From: Rask Ingemann Lambertsen <rask@formelder.dk>
+
+commit d0e287a401d9acf67b75180b26e2d62b7d482652 upstream.
+
+A typo or copy-paste bug means that the register access intended for
+regulator dcdce goes to dcdcb instead. This patch corrects it.
+
+Fixes: 2ca342d391e3 (regulator: axp20x: Support AXP806 variant)
+Signed-off-by: Rask Ingemann Lambertsen <rask@formelder.dk>
+Acked-by: Chen-Yu Tsai <wens@csie.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/axp20x-regulator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -272,7 +272,7 @@ static const struct regulator_desc axp80
+ 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
+- AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
++ AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+ AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
+ AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
--- /dev/null
+From 7195439d1d71bc4a6c33cfb57bc669a7cd041041 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Date: Fri, 13 Jan 2017 12:23:35 +0100
+Subject: Revert "bcma: init serial console directly from ChipCommon code"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rafał Miłecki <rafal@milecki.pl>
+
+commit 7195439d1d71bc4a6c33cfb57bc669a7cd041041 upstream.
+
+This reverts commit 4c81acab3816 ("bcma: init serial console directly
+from ChipCommon code") as it broke IRQ assignment. Getting IRQ with
+bcma_core_irq helper on SoC requires MIPS core to be set. It happens
+*after* ChipCommon initialization so we can't do this so early.
+
+This fixes a user reported regression. It wasn't critical as serial was
+still somehow working but lack of IRQs was making in unreliable.
+
+Fixes: 4c81acab3816 ("bcma: init serial console directly from ChipCommon code")
+Reported-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bcma/bcma_private.h | 3 +++
+ drivers/bcma/driver_chipcommon.c | 11 +++--------
+ drivers/bcma/driver_mips.c | 3 +++
+ 3 files changed, 9 insertions(+), 8 deletions(-)
+
+--- a/drivers/bcma/bcma_private.h
++++ b/drivers/bcma/bcma_private.h
+@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus)
+ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
+ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
++#ifdef CONFIG_BCMA_DRIVER_MIPS
++void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
++#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
+ /* driver_chipcommon_b.c */
+ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
+--- a/drivers/bcma/driver_chipcommon.c
++++ b/drivers/bcma/driver_chipcommon.c
+@@ -15,8 +15,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/bcma/bcma.h>
+
+-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+-
+ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
+ u32 mask, u32 value)
+ {
+@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(str
+ if (cc->capabilities & BCMA_CC_CAP_PMU)
+ bcma_pmu_early_init(cc);
+
+- if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
+- bcma_chipco_serial_init(cc);
+-
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ bcma_core_chipcommon_flash_detect(cc);
+
+@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcm
+ return res;
+ }
+
+-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
++#ifdef CONFIG_BCMA_DRIVER_MIPS
++void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+ {
+-#if IS_BUILTIN(CONFIG_BCM47XX)
+ unsigned int irq;
+ u32 baud_base;
+ u32 i;
+@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(stru
+ ports[i].baud_base = baud_base;
+ ports[i].reg_shift = 0;
+ }
+-#endif /* CONFIG_BCM47XX */
+ }
++#endif /* CONFIG_BCMA_DRIVER_MIPS */
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(st
+
+ void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+ {
++ struct bcma_bus *bus = mcore->core->bus;
++
+ if (mcore->early_setup_done)
+ return;
+
++ bcma_chipco_serial_init(&bus->drv_cc);
+ bcma_core_mips_nvram_init(mcore);
+
+ mcore->early_setup_done = true;
--- /dev/null
+From 0d5415b489f68b58e1983a53793d25d53098ed4b Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Fri, 3 Feb 2017 05:43:52 +0200
+Subject: Revert "vring: Force use of DMA API for ARM-based systems with legacy devices"
+
+From: Michael S. Tsirkin <mst@redhat.com>
+
+commit 0d5415b489f68b58e1983a53793d25d53098ed4b upstream.
+
+This reverts commit c7070619f3408d9a0dffbed9149e6f00479cf43b.
+
+This has been shown to regress on some ARM systems:
+
+by forcing on DMA API usage for ARM systems, we have inadvertently
+kicked open a hornets' nest in terms of cache-coherency. Namely that
+unless the virtio device is explicitly described as capable of coherent
+DMA by firmware, the DMA APIs on ARM and other DT-based platforms will
+assume it is non-coherent. This turns out to cause a big problem for the
+likes of QEMU and kvmtool, which generate virtio-mmio devices in their
+guest DTs but neglect to add the often-overlooked "dma-coherent"
+property; as a result, we end up with the guest making non-cacheable
+accesses to the vring, the host doing so cacheably, both talking past
+each other and things going horribly wrong.
+
+We are working on a safer work-around.
+
+Fixes: c7070619f340 ("vring: Force use of DMA API for ARM-based systems with legacy devices")
+Reported-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio_ring.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct vir
+ if (xen_domain())
+ return true;
+
+- /*
+- * On ARM-based machines, the DMA ops will do the right thing,
+- * so always use them with legacy devices.
+- */
+- if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+- return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+-
+ return false;
+ }
+
mm-memory_hotplug.c-check-start_pfn-in-test_pages_in_a_zone.patch
base-memory-hotplug-fix-a-kernel-oops-in-show_valid_zones.patch
mm-fs-check-for-fatal-signals-in-do_generic_file_read.patch
+tracing-fix-hwlat-kthread-migration.patch
+can-bcm-fix-hrtimer-tasklet-termination-in-bcm-op-removal.patch
+cgroup-don-t-online-subsystems-before-cgroup_name-path-are-operational.patch
+mmc-sdhci-ignore-unexpected-card_int-interrupts.patch
+vhost-fix-initialization-for-vq-is_le.patch
+regulator-axp20x-axp806-fix-dcdcb-being-set-instead-of-dcdce.patch
+percpu-refcount-fix-reference-leak-during-percpu-atomic-transition.patch
+revert-bcma-init-serial-console-directly-from-chipcommon-code.patch
+revert-vring-force-use-of-dma-api-for-arm-based-systems-with-legacy-devices.patch
--- /dev/null
+From 79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Mon, 30 Jan 2017 19:27:10 -0500
+Subject: tracing: Fix hwlat kthread migration
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 upstream.
+
+The hwlat tracer creates a kernel thread at start of the tracer. It is
+pinned to a single CPU and will move to the next CPU after each period of
+running. If the user modifies the migration thread's affinity, it will not
+change after that happens.
+
+The original code created the thread at the first instance it was called,
+but later was changed to destroy the thread after the tracer was finished,
+and would not be created until the next instance of the tracer was
+established. The code that initialized the affinity was only called on the
+initial instantiation of the tracer. After that, it was not initialized, and
+the previous affinity did not match the current newly created one, making
+it appear that the user modified the thread's affinity when it did not, and
+the thread failed to migrate again.
+
+Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_hwlat.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -266,7 +266,7 @@ out:
+ static struct cpumask save_cpumask;
+ static bool disable_migrate;
+
+-static void move_to_next_cpu(void)
++static void move_to_next_cpu(bool initmask)
+ {
+ static struct cpumask *current_mask;
+ int next_cpu;
+@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
+ return;
+
+ /* Just pick the first CPU on first iteration */
+- if (!current_mask) {
++ if (initmask) {
+ current_mask = &save_cpumask;
+ get_online_cpus();
+ cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
+ static int kthread_fn(void *data)
+ {
+ u64 interval;
++ bool initmask = true;
+
+ while (!kthread_should_stop()) {
+
+- move_to_next_cpu();
++ move_to_next_cpu(initmask);
++ initmask = false;
+
+ local_irq_disable();
+ get_sample();
--- /dev/null
+From cda8bba0f99d25d2061c531113c14fa41effc3ae Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.vnet.ibm.com>
+Date: Mon, 30 Jan 2017 11:09:36 +0100
+Subject: vhost: fix initialization for vq->is_le
+
+From: Halil Pasic <pasic@linux.vnet.ibm.com>
+
+commit cda8bba0f99d25d2061c531113c14fa41effc3ae upstream.
+
+Currently, under certain circumstances vhost_init_is_le does just a part
+of the initialization job, and depends on vhost_reset_is_le being called
+too. For this reason vhost_vq_init_access used to call vhost_reset_is_le
+when vq->private_data is NULL. This is not only counter intuitive, but
+also real a problem because it breaks vhost_net. The bug was introduced to
+vhost_net with commit 2751c9882b94 ("vhost: cross-endian support for
+legacy devices"). The symptom is corruption of the vq's used.idx field
+(virtio) after VHOST_NET_SET_BACKEND was issued as a part of the vhost
+shutdown on a vq with pending descriptors.
+
+Let us make sure the outcome of vhost_init_is_le never depend on the state
+it is actually supposed to initialize, and fix virtio_net by removing the
+reset from vhost_vq_init_access.
+
+With the above, there is no reason for vhost_reset_is_le to do just half
+of the job. Let us make vhost_reset_is_le reinitialize is_le.
+
+Signed-off-by: Halil Pasic <pasic@linux.vnet.ibm.com>
+Reported-by: Michael A. Tebolt <miket@us.ibm.com>
+Reported-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
+Fixes: commit 2751c9882b94 ("vhost: cross-endian support for legacy devices")
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Tested-by: Michael A. Tebolt <miket@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vhost/vhost.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struc
+
+ static void vhost_init_is_le(struct vhost_virtqueue *vq)
+ {
+- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
+- vq->is_le = true;
++ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
++ || virtio_legacy_is_little_endian();
+ }
+ #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+
+ static void vhost_reset_is_le(struct vhost_virtqueue *vq)
+ {
+- vq->is_le = virtio_legacy_is_little_endian();
++ vhost_init_is_le(vq);
+ }
+
+ struct vhost_flush_struct {
+@@ -1713,10 +1713,8 @@ int vhost_vq_init_access(struct vhost_vi
+ int r;
+ bool is_le = vq->is_le;
+
+- if (!vq->private_data) {
+- vhost_reset_is_le(vq);
++ if (!vq->private_data)
+ return 0;
+- }
+
+ vhost_init_is_le(vq);
+