--- /dev/null
+From 8bfd174312629866efa535193d9e563768ff4307 Mon Sep 17 00:00:00 2001
+From: Sui Chen <suichen6@gmail.com>
+Date: Tue, 9 May 2017 07:47:22 -0500
+Subject: ahci: Acer SA5-271 SSD Not Detected Fix
+
+From: Sui Chen <suichen6@gmail.com>
+
+commit 8bfd174312629866efa535193d9e563768ff4307 upstream.
+
+(Correction in this resend: fixed function name acer_sa5_271_workaround; fixed
+ the always-true condition in the function; fixed description.)
+
+On the Acer Switch Alpha 12 (model number: SA5-271), the internal SSD may not
+get detected because the port_map and CAP.nr_ports combination causes the driver
+to skip the port that is actually connected to the SSD. More specifically,
+either all SATA ports are identified as DUMMY, or all ports get ``link down''
+and never get up again.
+
+This problem occurs occasionally. When this problem occurs, CAP may hold a
+value of 0xC734FF00 or 0xC734FF01 and port_map may hold a value of 0x00 or 0x01.
+When this problem does not occur, CAP holds a value of 0xC734FF02 and port_map
+may hold a value of 0x07. Overriding the CAP value to 0xC734FF02 and port_map to
+0x7 significantly reduces the occurrence of this problem.
+
+Link: https://bugzilla.kernel.org/attachment.cgi?id=253091
+Signed-off-by: Sui Chen <suichen6@gmail.com>
+Tested-by: Damian Ivanov <damianatorrpm@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/ahci.c | 38 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_worka
+ {}
+ #endif
+
++/*
++ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
++ * as DUMMY, or detected but eventually get a "link down" and never get up
++ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
++ * port_map may hold a value of 0x00.
++ *
++ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
++ * and can significantly reduce the occurrence of the problem.
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
++ */
++static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
++ struct pci_dev *pdev)
++{
++ static const struct dmi_system_id sysids[] = {
++ {
++ .ident = "Acer Switch Alpha 12",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
++ },
++ },
++ { }
++ };
++
++ if (dmi_check_system(sysids)) {
++ dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
++ if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
++ hpriv->port_map = 0x7;
++ hpriv->cap = 0xC734FF02;
++ }
++ }
++}
++
+ #ifdef CONFIG_ARM64
+ /*
+ * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
+@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev
+ "online status unreliable, applying workaround\n");
+ }
+
++
++ /* Acer SA5-271 workaround modifies private_data */
++ acer_sa5_271_workaround(hpriv, pdev);
++
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
--- /dev/null
+From b425e50492583b10cceb388af36ef0bd3bdf842a Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Wed, 31 May 2017 14:43:45 -0700
+Subject: block: Avoid that blk_exit_rl() triggers a use-after-free
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit b425e50492583b10cceb388af36ef0bd3bdf842a upstream.
+
+Since the introduction of .init_rq_fn() and .exit_rq_fn() it is
+essential that the memory allocated for struct request_queue
+stays around until all blk_exit_rl() calls have finished. Hence
+make blk_init_rl() take a reference on struct request_queue.
+
+This patch fixes the following crash:
+
+general protection fault: 0000 [#2] SMP
+CPU: 3 PID: 28 Comm: ksoftirqd/3 Tainted: G D 4.12.0-rc2-dbg+ #2
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014
+task: ffff88013a108040 task.stack: ffffc9000071c000
+RIP: 0010:free_request_size+0x1a/0x30
+RSP: 0018:ffffc9000071fd38 EFLAGS: 00010202
+RAX: 6b6b6b6b6b6b6b6b RBX: ffff880067362a88 RCX: 0000000000000003
+RDX: ffff880067464178 RSI: ffff880067362a88 RDI: ffff880135ea4418
+RBP: ffffc9000071fd40 R08: 0000000000000000 R09: 0000000100180009
+R10: ffffc9000071fd38 R11: ffffffff81110800 R12: ffff88006752d3d8
+R13: ffff88006752d3d8 R14: ffff88013a108040 R15: 000000000000000a
+FS: 0000000000000000(0000) GS:ffff88013fd80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fa8ec1edb00 CR3: 0000000138ee8000 CR4: 00000000001406e0
+Call Trace:
+ mempool_destroy.part.10+0x21/0x40
+ mempool_destroy+0xe/0x10
+ blk_exit_rl+0x12/0x20
+ blkg_free+0x4d/0xa0
+ __blkg_release_rcu+0x59/0x170
+ rcu_process_callbacks+0x260/0x4e0
+ __do_softirq+0x116/0x250
+ smpboot_thread_fn+0x123/0x1e0
+ kthread+0x109/0x140
+ ret_from_fork+0x31/0x40
+
+Fixes: commit e9c787e65c0c ("scsi: allocate scsi_cmnd structures as part of struct request")
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-cgroup.c | 2 +-
+ block/blk-core.c | 10 ++++++++--
+ block/blk-sysfs.c | 2 +-
+ block/blk.h | 2 +-
+ 4 files changed, 11 insertions(+), 5 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *b
+ blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+
+ if (blkg->blkcg != &blkcg_root)
+- blk_exit_rl(&blkg->rl);
++ blk_exit_rl(blkg->q, &blkg->rl);
+
+ blkg_rwstat_exit(&blkg->stat_ios);
+ blkg_rwstat_exit(&blkg->stat_bytes);
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -643,13 +643,19 @@ int blk_init_rl(struct request_list *rl,
+ if (!rl->rq_pool)
+ return -ENOMEM;
+
++ if (rl != &q->root_rl)
++ WARN_ON_ONCE(!blk_get_queue(q));
++
+ return 0;
+ }
+
+-void blk_exit_rl(struct request_list *rl)
++void blk_exit_rl(struct request_queue *q, struct request_list *rl)
+ {
+- if (rl->rq_pool)
++ if (rl->rq_pool) {
+ mempool_destroy(rl->rq_pool);
++ if (rl != &q->root_rl)
++ blk_put_queue(q);
++ }
+ }
+
+ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -819,7 +819,7 @@ static void blk_release_queue(struct kob
+ elevator_exit(q, q->elevator);
+ }
+
+- blk_exit_rl(&q->root_rl);
++ blk_exit_rl(q, &q->root_rl);
+
+ if (q->queue_tags)
+ __blk_queue_free_tags(q);
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flu
+
+ int blk_init_rl(struct request_list *rl, struct request_queue *q,
+ gfp_t gfp_mask);
+-void blk_exit_rl(struct request_list *rl);
++void blk_exit_rl(struct request_queue *q, struct request_list *rl);
+ void init_request_from_bio(struct request *req, struct bio *bio);
+ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio);
--- /dev/null
+From 5be6b75610cefd1e21b98a218211922c2feb6e08 Mon Sep 17 00:00:00 2001
+From: Hou Tao <houtao1@huawei.com>
+Date: Wed, 1 Mar 2017 09:02:33 +0800
+Subject: cfq-iosched: fix the delay of cfq_group's vdisktime under iops mode
+
+From: Hou Tao <houtao1@huawei.com>
+
+commit 5be6b75610cefd1e21b98a218211922c2feb6e08 upstream.
+
+When adding a cfq_group into the cfq service tree, we use CFQ_IDLE_DELAY
+as the delay of cfq_group's vdisktime if there have been other cfq_groups
+already.
+
+When cfq is under iops mode, commit 9a7f38c42c2b ("cfq-iosched: Convert
+from jiffies to nanoseconds") could result in a large iops delay and
+lead to an abnormal io schedule delay for the added cfq_group. To fix
+it, we just need to revert to the old CFQ_IDLE_DELAY value: HZ / 5
+when iops mode is enabled.
+
+Despite having the same value, the delay of a cfq_queue in idle class
+and the delay of cfq_group are different things, so I define two new
+macros for the delay of a cfq_group under time-slice mode and iops mode.
+
+Fixes: 9a7f38c42c2b ("cfq-iosched: Convert from jiffies to nanoseconds")
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Acked-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/cfq-iosched.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u
+ static const int cfq_hist_divisor = 4;
+
+ /*
+- * offset from end of service tree
++ * offset from end of queue service tree for idle class
+ */
+ #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
++/* offset from end of group service tree under time slice mode */
++#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
++/* offset from end of group service under IOPS mode */
++#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
+
+ /*
+ * below this threshold, we consider thinktime immediate
+@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb
+ cfqg->vfraction = max_t(unsigned, vfr, 1);
+ }
+
++static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
++{
++ if (!iops_mode(cfqd))
++ return CFQ_SLICE_MODE_GROUP_DELAY;
++ else
++ return CFQ_IOPS_MODE_GROUP_DELAY;
++}
++
+ static void
+ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
+ {
+@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_da
+ n = rb_last(&st->rb);
+ if (n) {
+ __cfqg = rb_entry_cfqg(n);
+- cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
++ cfqg->vdisktime = __cfqg->vdisktime +
++ cfq_get_cfqg_vdisktime_delay(cfqd);
+ } else
+ cfqg->vdisktime = st->min_vdisktime;
+ cfq_group_service_tree_add(st, cfqg);
--- /dev/null
+From 33c35aa4817864e056fd772230b0c6b552e36ea2 Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Mon, 15 May 2017 09:34:06 -0400
+Subject: cgroup: Prevent kill_css() from being called more than once
+
+From: Waiman Long <longman@redhat.com>
+
+commit 33c35aa4817864e056fd772230b0c6b552e36ea2 upstream.
+
+The kill_css() function may be called more than once under the condition
+that the css was killed but not physically removed yet followed by the
+removal of the cgroup that is hosting the css. This patch prevents any
+harmm from being done when that happens.
+
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cgroup-defs.h | 1 +
+ kernel/cgroup/cgroup.c | 5 +++++
+ 2 files changed, 6 insertions(+)
+
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -47,6 +47,7 @@ enum {
+ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
+ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
+ CSS_VISIBLE = (1 << 3), /* css is visible to userland */
++ CSS_DYING = (1 << 4), /* css is dying */
+ };
+
+ /* bits in struct cgroup flags field */
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsy
+ {
+ lockdep_assert_held(&cgroup_mutex);
+
++ if (css->flags & CSS_DYING)
++ return;
++
++ css->flags |= CSS_DYING;
++
+ /*
+ * This must happen before css is disassociated with its cgroup.
+ * See seq_css() for details.
--- /dev/null
+From 6c77003677d5f1ce15f26d24360cb66c0bc07bb3 Mon Sep 17 00:00:00 2001
+From: David Arcari <darcari@redhat.com>
+Date: Fri, 26 May 2017 11:37:31 -0400
+Subject: cpufreq: cpufreq_register_driver() should return -ENODEV if init fails
+
+From: David Arcari <darcari@redhat.com>
+
+commit 6c77003677d5f1ce15f26d24360cb66c0bc07bb3 upstream.
+
+For a driver that does not set the CPUFREQ_STICKY flag, if all of the
+->init() calls fail, cpufreq_register_driver() should return an error.
+This will prevent the driver from loading.
+
+Fixes: ce1bcfe94db8 (cpufreq: check cpufreq_policy_list instead of scanning policies for all CPUs)
+Signed-off-by: David Arcari <darcari@redhat.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufr
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
+ list_empty(&cpufreq_policy_list)) {
+ /* if all ->init() calls failed, unregister */
++ ret = -ENODEV;
+ pr_debug("%s: No CPU initialized for driver %s\n", __func__,
+ driver_data->name);
+ goto err_if_unreg;
--- /dev/null
+From 0037ae47812b1f431cc602100d1d51f37d77b61e Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+Date: Mon, 22 May 2017 16:05:22 +0200
+Subject: dmaengine: ep93xx: Always start from BASE0
+
+From: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+commit 0037ae47812b1f431cc602100d1d51f37d77b61e upstream.
+
+The current buffer is being reset to zero on device_free_chan_resources()
+but not on device_terminate_all(). It could happen that HW is restarted and
+expects BASE0 to be used, but the driver is not synchronized and will start
+from BASE1. One solution is to reset the buffer explicitly in
+m2p_hw_setup().
+
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ep93xx_dma.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/dma/ep93xx_dma.c
++++ b/drivers/dma/ep93xx_dma.c
+@@ -323,6 +323,8 @@ static int m2p_hw_setup(struct ep93xx_dm
+ | M2P_CONTROL_ENABLE;
+ m2p_set_control(edmac, control);
+
++ edmac->buffer = 0;
++
+ return 0;
+ }
+
--- /dev/null
+From 98f9de366fccee7572c646af226b2d4b4841e3b5 Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+Date: Mon, 22 May 2017 16:05:23 +0200
+Subject: dmaengine: ep93xx: Don't drain the transfers in terminate_all()
+
+From: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+commit 98f9de366fccee7572c646af226b2d4b4841e3b5 upstream.
+
+Draining the transfers in terminate_all callback happens with IRQs disabled,
+therefore induces huge latency:
+
+ irqsoff latency trace v1.1.5 on 4.11.0
+ --------------------------------------------------------------------
+ latency: 39770 us, #57/57, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0)
+ -----------------
+ | task: process-129 (uid:0 nice:0 policy:2 rt_prio:50)
+ -----------------
+ => started at: _snd_pcm_stream_lock_irqsave
+ => ended at: snd_pcm_stream_unlock_irqrestore
+
+ _------=> CPU#
+ / _-----=> irqs-off
+ | / _----=> need-resched
+ || / _---=> hardirq/softirq
+ ||| / _--=> preempt-depth
+ |||| / delay
+ cmd pid ||||| time | caller
+ \ / ||||| \ | /
+process-129 0d.s. 3us : _snd_pcm_stream_lock_irqsave
+process-129 0d.s1 9us : snd_pcm_stream_lock <-_snd_pcm_stream_lock_irqsave
+process-129 0d.s1 15us : preempt_count_add <-snd_pcm_stream_lock
+process-129 0d.s2 22us : preempt_count_add <-snd_pcm_stream_lock
+process-129 0d.s3 32us : snd_pcm_update_hw_ptr0 <-snd_pcm_period_elapsed
+process-129 0d.s3 41us : soc_pcm_pointer <-snd_pcm_update_hw_ptr0
+process-129 0d.s3 50us : dmaengine_pcm_pointer <-soc_pcm_pointer
+process-129 0d.s3 58us+: snd_dmaengine_pcm_pointer_no_residue <-dmaengine_pcm_pointer
+process-129 0d.s3 96us : update_audio_tstamp <-snd_pcm_update_hw_ptr0
+process-129 0d.s3 103us : snd_pcm_update_state <-snd_pcm_update_hw_ptr0
+process-129 0d.s3 112us : xrun <-snd_pcm_update_state
+process-129 0d.s3 119us : snd_pcm_stop <-xrun
+process-129 0d.s3 126us : snd_pcm_action <-snd_pcm_stop
+process-129 0d.s3 134us : snd_pcm_action_single <-snd_pcm_action
+process-129 0d.s3 141us : snd_pcm_pre_stop <-snd_pcm_action_single
+process-129 0d.s3 150us : snd_pcm_do_stop <-snd_pcm_action_single
+process-129 0d.s3 157us : soc_pcm_trigger <-snd_pcm_do_stop
+process-129 0d.s3 166us : snd_dmaengine_pcm_trigger <-soc_pcm_trigger
+process-129 0d.s3 175us : ep93xx_dma_terminate_all <-snd_dmaengine_pcm_trigger
+process-129 0d.s3 182us : preempt_count_add <-ep93xx_dma_terminate_all
+process-129 0d.s4 189us*: m2p_hw_shutdown <-ep93xx_dma_terminate_all
+process-129 0d.s4 39472us : m2p_hw_setup <-ep93xx_dma_terminate_all
+
+ ... rest skipped...
+
+process-129 0d.s. 40080us : <stack trace>
+ => ep93xx_dma_tasklet
+ => tasklet_action
+ => __do_softirq
+ => irq_exit
+ => __handle_domain_irq
+ => vic_handle_irq
+ => __irq_usr
+ => 0xb66c6668
+
+Just abort the transfers and warn if the HW state is not what we expect.
+Move draining into device_synchronize callback.
+
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ep93xx_dma.c | 37 +++++++++++++++++++++++++++++++++----
+ 1 file changed, 33 insertions(+), 4 deletions(-)
+
+--- a/drivers/dma/ep93xx_dma.c
++++ b/drivers/dma/ep93xx_dma.c
+@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
+ struct dma_device dma_dev;
+ bool m2m;
+ int (*hw_setup)(struct ep93xx_dma_chan *);
++ void (*hw_synchronize)(struct ep93xx_dma_chan *);
+ void (*hw_shutdown)(struct ep93xx_dma_chan *);
+ void (*hw_submit)(struct ep93xx_dma_chan *);
+ int (*hw_interrupt)(struct ep93xx_dma_chan *);
+@@ -333,21 +334,27 @@ static inline u32 m2p_channel_state(stru
+ return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+ }
+
+-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
++static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
+ {
++ unsigned long flags;
+ u32 control;
+
++ spin_lock_irqsave(&edmac->lock, flags);
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
++ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+- cpu_relax();
++ schedule();
++}
+
++static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
++{
+ m2p_set_control(edmac, 0);
+
+- while (m2p_channel_state(edmac) == M2P_STATE_STALL)
+- cpu_relax();
++ while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
++ dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
+ }
+
+ static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+@@ -1163,6 +1170,26 @@ fail:
+ }
+
+ /**
++ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
++ * current context.
++ * @chan: channel
++ *
++ * Synchronizes the DMA channel termination to the current context. When this
++ * function returns it is guaranteed that all transfers for previously issued
++ * descriptors have stopped and and it is safe to free the memory associated
++ * with them. Furthermore it is guaranteed that all complete callback functions
++ * for a previously submitted descriptor have finished running and it is safe to
++ * free resources accessed from within the complete callbacks.
++ */
++static void ep93xx_dma_synchronize(struct dma_chan *chan)
++{
++ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
++
++ if (edmac->edma->hw_synchronize)
++ edmac->edma->hw_synchronize(edmac);
++}
++
++/**
+ * ep93xx_dma_terminate_all - terminate all transactions
+ * @chan: channel
+ *
+@@ -1325,6 +1352,7 @@ static int __init ep93xx_dma_probe(struc
+ dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+ dma_dev->device_config = ep93xx_dma_slave_config;
++ dma_dev->device_synchronize = ep93xx_dma_synchronize;
+ dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
+ dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+ dma_dev->device_tx_status = ep93xx_dma_tx_status;
+@@ -1342,6 +1370,7 @@ static int __init ep93xx_dma_probe(struc
+ } else {
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
++ edma->hw_synchronize = m2p_hw_synchronize;
+ edma->hw_setup = m2p_hw_setup;
+ edma->hw_shutdown = m2p_hw_shutdown;
+ edma->hw_submit = m2p_hw_submit;
--- /dev/null
+From bc473da1ed726c975ad47f8d7d27631de11356d8 Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Fri, 5 May 2017 11:57:46 +0200
+Subject: dmaengine: mv_xor_v2: do not use descriptors not acked by async_tx
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+commit bc473da1ed726c975ad47f8d7d27631de11356d8 upstream.
+
+Descriptors that have not been acknowledged by the async_tx layer
+should not be re-used, so this commit adjusts the implementation of
+mv_xor_v2_prep_sw_desc() to skip descriptors for which
+async_tx_test_ack() is false.
+
+Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver")
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor_v2.c | 32 ++++++++++++++++++++++----------
+ 1 file changed, 22 insertions(+), 10 deletions(-)
+
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -344,6 +344,7 @@ static struct mv_xor_v2_sw_desc *
+ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
+ {
+ struct mv_xor_v2_sw_desc *sw_desc;
++ bool found = false;
+
+ /* Lock the channel */
+ spin_lock_bh(&xor_dev->lock);
+@@ -355,19 +356,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_
+ return NULL;
+ }
+
+- /* get a free SW descriptor from the SW DESQ */
+- sw_desc = list_first_entry(&xor_dev->free_sw_desc,
+- struct mv_xor_v2_sw_desc, free_list);
++ list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
++ if (async_tx_test_ack(&sw_desc->async_tx)) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found) {
++ spin_unlock_bh(&xor_dev->lock);
++ return NULL;
++ }
++
+ list_del(&sw_desc->free_list);
+
+ /* Release the channel */
+ spin_unlock_bh(&xor_dev->lock);
+
+- /* set the async tx descriptor */
+- dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
+- sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+- async_tx_ack(&sw_desc->async_tx);
+-
+ return sw_desc;
+ }
+
+@@ -785,8 +790,15 @@ static int mv_xor_v2_probe(struct platfo
+
+ /* add all SW descriptors to the free list */
+ for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
+- xor_dev->sw_desq[i].idx = i;
+- list_add(&xor_dev->sw_desq[i].free_list,
++ struct mv_xor_v2_sw_desc *sw_desc =
++ xor_dev->sw_desq + i;
++ sw_desc->idx = i;
++ dma_async_tx_descriptor_init(&sw_desc->async_tx,
++ &xor_dev->dmachan);
++ sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
++ async_tx_ack(&sw_desc->async_tx);
++
++ list_add(&sw_desc->free_list,
+ &xor_dev->free_sw_desc);
+ }
+
--- /dev/null
+From ab2c5f0a77fe49bdb6e307b397496373cb47d2c2 Mon Sep 17 00:00:00 2001
+From: Hanna Hawa <hannah@marvell.com>
+Date: Fri, 5 May 2017 11:57:47 +0200
+Subject: dmaengine: mv_xor_v2: enable XOR engine after its configuration
+
+From: Hanna Hawa <hannah@marvell.com>
+
+commit ab2c5f0a77fe49bdb6e307b397496373cb47d2c2 upstream.
+
+The engine was enabled prior to its configuration, which isn't
+correct. This patch relocates the activation of the XOR engine, to be
+after the configuration of the XOR engine.
+
+Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver")
+Signed-off-by: Hanna Hawa <hannah@marvell.com>
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor_v2.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -653,9 +653,6 @@ static int mv_xor_v2_descq_init(struct m
+ writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
+
+- /* enable the DMA engine */
+- writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+-
+ /*
+ * This is a temporary solution, until we activate the
+ * SMMU. Set the attributes for reading & writing data buffers
+@@ -699,6 +696,9 @@ static int mv_xor_v2_descq_init(struct m
+ reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
+ writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+
++ /* enable the DMA engine */
++ writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
++
+ return 0;
+ }
+
--- /dev/null
+From 44d5887a8bf1e86915c8ff647337cb138149da82 Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Fri, 5 May 2017 11:57:48 +0200
+Subject: dmaengine: mv_xor_v2: fix tx_submit() implementation
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+commit 44d5887a8bf1e86915c8ff647337cb138149da82 upstream.
+
+The mv_xor_v2_tx_submit() gets the next available HW descriptor by
+calling mv_xor_v2_get_desq_write_ptr(), which reads a HW register
+telling the next available HW descriptor. This was working fine when HW
+descriptors were issued for processing directly in tx_submit().
+
+However, as part of the review process of the driver, a change was
+requested to move the actual kick-off of HW descriptors processing to
+->issue_pending(). Due to this, reading the HW register to know the next
+available HW descriptor no longer works.
+
+So instead of using this HW register, we implemented a software index
+pointing to the next available HW descriptor.
+
+Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver")
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor_v2.c | 22 +++++-----------------
+ 1 file changed, 5 insertions(+), 17 deletions(-)
+
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
+ struct mv_xor_v2_sw_desc *sw_desq;
+ int desc_size;
+ unsigned int npendings;
++ unsigned int hw_queue_idx;
+ };
+
+ /**
+@@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(s
+ }
+
+ /*
+- * Return the next available index in the DESQ.
+- */
+-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
+-{
+- /* read the index for the next available descriptor in the DESQ */
+- u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
+-
+- return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
+- & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
+-}
+-
+-/*
+ * notify the engine of new descriptors, and update the available index.
+ */
+ static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
+@@ -306,7 +295,6 @@ static irqreturn_t mv_xor_v2_interrupt_h
+ static dma_cookie_t
+ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
+ {
+- int desq_ptr;
+ void *dest_hw_desc;
+ dma_cookie_t cookie;
+ struct mv_xor_v2_sw_desc *sw_desc =
+@@ -322,15 +310,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_
+ spin_lock_bh(&xor_dev->lock);
+ cookie = dma_cookie_assign(tx);
+
+- /* get the next available slot in the DESQ */
+- desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
+-
+ /* copy the HW descriptor from the SW descriptor to the DESQ */
+- dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
++ dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
+
+ memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
+
+ xor_dev->npendings++;
++ xor_dev->hw_queue_idx++;
++ if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
++ xor_dev->hw_queue_idx = 0;
+
+ spin_unlock_bh(&xor_dev->lock);
+
--- /dev/null
+From eb8df543e444492328f506adffc7dfe94111f1bd Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Fri, 5 May 2017 11:57:44 +0200
+Subject: dmaengine: mv_xor_v2: handle mv_xor_v2_prep_sw_desc() error properly
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+commit eb8df543e444492328f506adffc7dfe94111f1bd upstream.
+
+The mv_xor_v2_prep_sw_desc() is called from a few different places in
+the driver, but we never take into account the fact that it might
+return NULL. This commit fixes that, ensuring that we don't panic if
+there are no more descriptors available.
+
+Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver")
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor_v2.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -389,6 +389,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_cha
+ __func__, len, &src, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
++ if (!sw_desc)
++ return NULL;
+
+ sw_desc->async_tx.flags = flags;
+
+@@ -443,6 +445,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *
+ __func__, src_cnt, len, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
++ if (!sw_desc)
++ return NULL;
+
+ sw_desc->async_tx.flags = flags;
+
+@@ -491,6 +495,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
++ if (!sw_desc)
++ return NULL;
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
--- /dev/null
+From 2aab4e18152cd30cb5d2f4c27629fc8a04aed979 Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Fri, 5 May 2017 11:57:45 +0200
+Subject: dmaengine: mv_xor_v2: properly handle wrapping in the array of HW descriptors
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+commit 2aab4e18152cd30cb5d2f4c27629fc8a04aed979 upstream.
+
+mv_xor_v2_tasklet() is looping over completed HW descriptors. Before the
+loop, it initializes 'next_pending_hw_desc' to the first HW descriptor
+to handle, and then the loop simply increments this point, without
+taking care of wrapping when we reach the last HW descriptor. The
+'pending_ptr' index was being wrapped back to 0 at the end, but it
+wasn't used in each iteration of the loop to calculate
+next_pending_hw_desc.
+
+This commit fixes that, and makes next_pending_hw_desc a variable local
+to the loop itself.
+
+Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver")
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor_v2.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -560,7 +560,6 @@ static void mv_xor_v2_tasklet(unsigned l
+ {
+ struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
+ int pending_ptr, num_of_pending, i;
+- struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
+ struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
+
+ dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
+@@ -568,17 +567,10 @@ static void mv_xor_v2_tasklet(unsigned l
+ /* get the pending descriptors parameters */
+ num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
+
+- /* next HW descriptor */
+- next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
+-
+ /* loop over free descriptors */
+ for (i = 0; i < num_of_pending; i++) {
+-
+- if (pending_ptr > MV_XOR_V2_DESC_NUM)
+- pending_ptr = 0;
+-
+- if (next_pending_sw_desc != NULL)
+- next_pending_hw_desc++;
++ struct mv_xor_v2_descriptor *next_pending_hw_desc =
++ xor_dev->hw_desq_virt + pending_ptr;
+
+ /* get the SW descriptor related to the HW descriptor */
+ next_pending_sw_desc =
+@@ -614,6 +606,8 @@ static void mv_xor_v2_tasklet(unsigned l
+
+ /* increment the next descriptor */
+ pending_ptr++;
++ if (pending_ptr >= MV_XOR_V2_DESC_NUM)
++ pending_ptr = 0;
+ }
+
+ if (num_of_pending != 0) {
--- /dev/null
+From 9dd4f319bac25334a869d9276b19eac9e478fd33 Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Fri, 5 May 2017 11:57:49 +0200
+Subject: dmaengine: mv_xor_v2: remove interrupt coalescing
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+commit 9dd4f319bac25334a869d9276b19eac9e478fd33 upstream.
+
+The current implementation of interrupt coalescing doesn't work, because
+it doesn't configure the coalescing timer, which is needed to make sure
+we get an interrupt at some point.
+
+As a fix for stable, we simply remove the interrupt coalescing
+functionality. It will be re-introduced properly in a future commit.
+
+Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver")
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor_v2.c | 25 -------------------------
+ 1 file changed, 25 deletions(-)
+
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -246,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struc
+ return MV_XOR_V2_EXT_DESC_SIZE;
+ }
+
+-/*
+- * Set the IMSG threshold
+- */
+-static inline
+-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
+-{
+- u32 reg;
+-
+- reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+-
+- reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+- reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+-
+- writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+-}
+-
+ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
+ {
+ struct mv_xor_v2_device *xor_dev = data;
+@@ -277,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_h
+ if (!ndescs)
+ return IRQ_NONE;
+
+- /*
+- * Update IMSG threshold, to disable new IMSG interrupts until
+- * end of the tasklet
+- */
+- mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
+-
+ /* schedule a tasklet to handle descriptors callbacks */
+ tasklet_schedule(&xor_dev->irq_tasklet);
+
+@@ -607,9 +585,6 @@ static void mv_xor_v2_tasklet(unsigned l
+ /* free the descriptores */
+ mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
+ }
+-
+- /* Update IMSG threshold, to enable new IMSG interrupts */
+- mv_xor_v2_set_imsg_thrd(xor_dev, 0);
+ }
+
+ /*
--- /dev/null
+From b2d3c270f9f2fb82518ac500a9849c3aaf503852 Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Fri, 5 May 2017 11:57:50 +0200
+Subject: dmaengine: mv_xor_v2: set DMA mask to 40 bits
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+commit b2d3c270f9f2fb82518ac500a9849c3aaf503852 upstream.
+
+The XORv2 engine on Armada 7K/8K can only access the first 40 bits of
+the physical address space, so the DMA mask must be set accordingly.
+
+Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver")
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor_v2.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -693,6 +693,10 @@ static int mv_xor_v2_probe(struct platfo
+
+ platform_set_drvdata(pdev, xor_dev);
+
++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
++ if (ret)
++ return ret;
++
+ xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
--- /dev/null
+From 9a445bbb1607d9f14556a532453dd86d1b7e381e Mon Sep 17 00:00:00 2001
+From: Hiroyuki Yokoyama <hiroyuki.yokoyama.vx@renesas.com>
+Date: Mon, 15 May 2017 17:49:52 +0900
+Subject: dmaengine: usb-dmac: Fix DMAOR AE bit definition
+
+From: Hiroyuki Yokoyama <hiroyuki.yokoyama.vx@renesas.com>
+
+commit 9a445bbb1607d9f14556a532453dd86d1b7e381e upstream.
+
+This patch fixes the register definition of AE (Address Error flag) bit.
+
+Fixes: 0c1c8ff32fa2 ("dmaengine: usb-dmac: Add Renesas USB DMA Controller (USB-DMAC) driver")
+Signed-off-by: Hiroyuki Yokoyama <hiroyuki.yokoyama.vx@renesas.com>
+[Shimoda: add Fixes and Cc tags in the commit log]
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/sh/usb-dmac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/dma/sh/usb-dmac.c
++++ b/drivers/dma/sh/usb-dmac.c
+@@ -117,7 +117,7 @@ struct usb_dmac {
+ #define USB_DMASWR 0x0008
+ #define USB_DMASWR_SWR (1 << 0)
+ #define USB_DMAOR 0x0060
+-#define USB_DMAOR_AE (1 << 2)
++#define USB_DMAOR_AE (1 << 1)
+ #define USB_DMAOR_DME (1 << 0)
+
+ #define USB_DMASAR 0x0000
--- /dev/null
+From 75fb636324a839c2c31be9f81644034c6142e469 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 1 Jun 2017 13:54:30 +0200
+Subject: drm: Fix oops + Xserver hang when unplugging USB drm devices
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 75fb636324a839c2c31be9f81644034c6142e469 upstream.
+
+commit a39be606f99d ("drm: Do a full device unregister when unplugging")
+causes backtraces like this one when unplugging an usb drm device while
+it is in use:
+
+usb 2-3: USB disconnect, device number 25
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 242 at drivers/gpu/drm/drm_mode_config.c:424
+ drm_mode_config_cleanup+0x220/0x280 [drm]
+...
+RIP: 0010:drm_mode_config_cleanup+0x220/0x280 [drm]
+...
+Call Trace:
+ gm12u320_modeset_cleanup+0xe/0x10 [gm12u320]
+ gm12u320_driver_unload+0x35/0x70 [gm12u320]
+ drm_dev_unregister+0x3c/0xe0 [drm]
+ drm_unplug_dev+0x12/0x60 [drm]
+ gm12u320_usb_disconnect+0x36/0x40 [gm12u320]
+ usb_unbind_interface+0x72/0x280
+ device_release_driver_internal+0x158/0x210
+ device_release_driver+0x12/0x20
+ bus_remove_device+0x104/0x180
+ device_del+0x1d2/0x350
+ usb_disable_device+0x9f/0x270
+ usb_disconnect+0xc6/0x260
+...
+[drm:drm_mode_config_cleanup [drm]] *ERROR* connector Unknown-1 leaked!
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 242 at drivers/gpu/drm/drm_mode_config.c:458
+ drm_mode_config_cleanup+0x268/0x280 [drm]
+...
+<same Call Trace>
+---[ end trace 80df975dae439ed6 ]---
+general protection fault: 0000 [#1] SMP
+...
+Call Trace:
+ ? __switch_to+0x225/0x450
+ drm_mode_rmfb_work_fn+0x55/0x70 [drm]
+ process_one_work+0x193/0x3c0
+ worker_thread+0x4a/0x3a0
+...
+RIP: drm_framebuffer_remove+0x62/0x3f0 [drm] RSP: ffffb776c39dfd98
+---[ end trace 80df975dae439ed7 ]---
+
+After which the system is unusable this is caused by drm_dev_unregister
+getting called immediately on unplug, which calls the drivers unload
+function which calls drm_mode_config_cleanup which removes the framebuffer
+object while userspace is still holding a reference to it.
+
+Reverting commit a39be606f99d ("drm: Do a full device unregister
+when unplugging") leads to the following oops on unplug instead,
+when userspace closes the last fd referencing the drm_dev:
+
+sysfs group 'power' not found for kobject 'card1-Unknown-1'
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 2459 at fs/sysfs/group.c:237
+ sysfs_remove_group+0x80/0x90
+...
+RIP: 0010:sysfs_remove_group+0x80/0x90
+...
+Call Trace:
+ dpm_sysfs_remove+0x57/0x60
+ device_del+0xfd/0x350
+ device_unregister+0x1a/0x60
+ drm_sysfs_connector_remove+0x39/0x50 [drm]
+ drm_connector_unregister+0x5a/0x70 [drm]
+ drm_connector_unregister_all+0x45/0xa0 [drm]
+ drm_modeset_unregister_all+0x12/0x30 [drm]
+ drm_dev_unregister+0xca/0xe0 [drm]
+ drm_put_dev+0x32/0x60 [drm]
+ drm_release+0x2f3/0x380 [drm]
+ __fput+0xdf/0x1e0
+...
+---[ end trace ecfb91ac85688bbe ]---
+BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8
+IP: down_write+0x1f/0x40
+...
+Call Trace:
+ debugfs_remove_recursive+0x55/0x1b0
+ drm_debugfs_connector_remove+0x21/0x40 [drm]
+ drm_connector_unregister+0x62/0x70 [drm]
+ drm_connector_unregister_all+0x45/0xa0 [drm]
+ drm_modeset_unregister_all+0x12/0x30 [drm]
+ drm_dev_unregister+0xca/0xe0 [drm]
+ drm_put_dev+0x32/0x60 [drm]
+ drm_release+0x2f3/0x380 [drm]
+ __fput+0xdf/0x1e0
+...
+---[ end trace ecfb91ac85688bbf ]---
+
+This is caused by the revert moving back to drm_unplug_dev calling
+drm_minor_unregister which does:
+
+ device_del(minor->kdev);
+ dev_set_drvdata(minor->kdev, NULL); /* safety belt */
+ drm_debugfs_cleanup(minor);
+
+Causing the sysfs entries to already be removed even though we still
+have references to them in e.g. drm_connector.
+
+Note we must call drm_minor_unregister to notify userspace of the unplug
+of the device, so calling drm_dev_unregister is not completely wrong the
+problem is that drm_dev_unregister does too much.
+
+This commit fixes drm_unplug_dev by not only reverting
+commit a39be606f99d ("drm: Do a full device unregister when unplugging")
+but by also adding a call to drm_modeset_unregister_all before the
+drm_minor_unregister calls to make sure all sysfs entries are removed
+before calling device_del(minor->kdev) thereby also fixing the second
+set of oopses caused by just reverting the commit.
+
+Fixes: a39be606f99d ("drm: Do a full device unregister when unplugging")
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Jeffy <jeffy.chen@rock-chips.com>
+Cc: Marco Diego Aurélio Mesquita <marcodiegomesquita@gmail.com>
+Reported-by: Marco Diego Aurélio Mesquita <marcodiegomesquita@gmail.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Sean Paul <seanpaul@chromium.org>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170601115430.4113-1-hdegoede@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_drv.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
+ void drm_unplug_dev(struct drm_device *dev)
+ {
+ /* for a USB device */
+- drm_dev_unregister(dev);
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ drm_modeset_unregister_all(dev);
++
++ drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
++ drm_minor_unregister(dev, DRM_MINOR_RENDER);
++ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+
+ mutex_lock(&drm_global_mutex);
+
--- /dev/null
+From 43523eba79bda8f5b4c27f8ffe20ea078d20113a Mon Sep 17 00:00:00 2001
+From: Eric Anholt <eric@anholt.net>
+Date: Wed, 12 Apr 2017 12:11:58 -0700
+Subject: drm/msm: Expose our reservation object when exporting a dmabuf.
+
+From: Eric Anholt <eric@anholt.net>
+
+commit 43523eba79bda8f5b4c27f8ffe20ea078d20113a upstream.
+
+Without this, polling on the dma-buf (and presumably other devices
+synchronizing against our rendering) would return immediately, even
+while the BO was busy.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Rob Clark <robdclark@gmail.com>
+Cc: linux-arm-msm@vger.kernel.org
+Cc: freedreno@lists.freedesktop.org
+Reviewed-by: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/msm/msm_drv.c | 1 +
+ drivers/gpu/drm/msm/msm_drv.h | 1 +
+ drivers/gpu/drm/msm/msm_gem_prime.c | 7 +++++++
+ 3 files changed, 9 insertions(+)
+
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -828,6 +828,7 @@ static struct drm_driver msm_driver = {
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
++ .gem_prime_res_obj = msm_gem_prime_res_obj,
+ .gem_prime_pin = msm_gem_prime_pin,
+ .gem_prime_unpin = msm_gem_prime_unpin,
+ .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -223,6 +223,7 @@ struct sg_table *msm_gem_prime_get_sg_ta
+ void *msm_gem_prime_vmap(struct drm_gem_object *obj);
+ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
++struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
+ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg);
+ int msm_gem_prime_pin(struct drm_gem_object *obj);
+--- a/drivers/gpu/drm/msm/msm_gem_prime.c
++++ b/drivers/gpu/drm/msm/msm_gem_prime.c
+@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_
+ if (!obj->import_attach)
+ msm_gem_put_pages(obj);
+ }
++
++struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
++{
++ struct msm_gem_object *msm_obj = to_msm_bo(obj);
++
++ return msm_obj->resv;
++}
--- /dev/null
+From 786813c343cb619d23cb0990e152e350b826d810 Mon Sep 17 00:00:00 2001
+From: Rob Clark <robdclark@gmail.com>
+Date: Wed, 3 May 2017 10:04:48 -0400
+Subject: drm/msm/mdp5: use __drm_atomic_helper_plane_duplicate_state()
+
+From: Rob Clark <robdclark@gmail.com>
+
+commit 786813c343cb619d23cb0990e152e350b826d810 upstream.
+
+Somehow the helper was never retrofitted for mdp5. Which meant when
+plane_state->fence was added, it could get copied into new state in
+mdp5_plane_duplicate_state().
+
+If an update to disable the plane (for example on rmfb) managed to sneak
+in after an nonblock update had swapped state, but before it was
+committed, we'd get a splat:
+
+ WARNING: CPU: 1 PID: 69 at ../drivers/gpu/drm/drm_atomic_helper.c:1061 drm_atomic_helper_wait_for_fences+0xe0/0xf8
+ Modules linked in:
+
+ CPU: 1 PID: 69 Comm: kworker/1:1 Tainted: G W 4.11.0-rc8+ #1187
+ Hardware name: Qualcomm Technologies, Inc. APQ 8016 SBC (DT)
+ Workqueue: events drm_mode_rmfb_work_fn
+ task: ffffffc036560d00 task.stack: ffffffc036550000
+ PC is at drm_atomic_helper_wait_for_fences+0xe0/0xf8
+ LR is at complete_commit.isra.1+0x44/0x1c0
+ pc : [<ffffff80084f6040>] lr : [<ffffff800854176c>] pstate: 20000145
+ sp : ffffffc036553b60
+ x29: ffffffc036553b60 x28: ffffffc0264e6a00
+ x27: ffffffc035659000 x26: 0000000000000000
+ x25: ffffffc0240e8000 x24: 0000000000000038
+ x23: 0000000000000000 x22: ffffff800858f200
+ x21: ffffffc0240e8000 x20: ffffffc02f56a800
+ x19: 0000000000000000 x18: 0000000000000000
+ x17: 0000000000000000 x16: 0000000000000000
+ x15: 0000000000000000 x14: ffffffc00a192700
+ x13: 0000000000000004 x12: 0000000000000000
+ x11: ffffff80089a1690 x10: 00000000000008f0
+ x9 : ffffffc036553b20 x8 : ffffffc036561650
+ x7 : ffffffc03fe6cb40 x6 : 0000000000000000
+ x5 : 0000000000000001 x4 : 0000000000000002
+ x3 : ffffffc035659000 x2 : ffffffc0240e8c80
+ x1 : 0000000000000000 x0 : ffffffc02adbe588
+
+ ---[ end trace 13aeec77c3fb55e2 ]---
+ Call trace:
+ Exception stack(0xffffffc036553990 to 0xffffffc036553ac0)
+ 3980: 0000000000000000 0000008000000000
+ 39a0: ffffffc036553b60 ffffff80084f6040 0000000000004ff0 0000000000000038
+ 39c0: ffffffc0365539d0 ffffff800857e098 ffffffc036553a00 ffffff800857e1b0
+ 39e0: ffffffc036553a10 ffffff800857c554 ffffffc0365e8400 ffffffc0365e8400
+ 3a00: ffffffc036553a20 ffffff8008103358 000000000001aad7 ffffff800851b72c
+ 3a20: ffffffc036553a50 ffffff80080e9228 ffffffc02adbe588 0000000000000000
+ 3a40: ffffffc0240e8c80 ffffffc035659000 0000000000000002 0000000000000001
+ 3a60: 0000000000000000 ffffffc03fe6cb40 ffffffc036561650 ffffffc036553b20
+ 3a80: 00000000000008f0 ffffff80089a1690 0000000000000000 0000000000000004
+ 3aa0: ffffffc00a192700 0000000000000000 0000000000000000 0000000000000000
+ [<ffffff80084f6040>] drm_atomic_helper_wait_for_fences+0xe0/0xf8
+ [<ffffff800854176c>] complete_commit.isra.1+0x44/0x1c0
+ [<ffffff8008541c64>] msm_atomic_commit+0x32c/0x350
+ [<ffffff8008516230>] drm_atomic_commit+0x50/0x60
+ [<ffffff8008517548>] drm_atomic_remove_fb+0x158/0x250
+ [<ffffff80085186d0>] drm_framebuffer_remove+0x50/0x158
+ [<ffffff8008518818>] drm_mode_rmfb_work_fn+0x40/0x58
+ [<ffffff80080d5668>] process_one_work+0x1d0/0x378
+ [<ffffff80080d5a54>] worker_thread+0x244/0x488
+ [<ffffff80080db7fc>] kthread+0xfc/0x128
+ [<ffffff8008082ec0>] ret_from_fork+0x10/0x50
+
+Fixes: 9626014 ("drm/fence: add in-fences support")
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Reported-by: Stanimir Varbanov <stanimir.varbanov@linaro.org>
+Signed-off-by: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
++++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+@@ -220,9 +220,10 @@ mdp5_plane_duplicate_state(struct drm_pl
+
+ mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+ sizeof(*mdp5_state), GFP_KERNEL);
++ if (!mdp5_state)
++ return NULL;
+
+- if (mdp5_state && mdp5_state->base.fb)
+- drm_framebuffer_reference(mdp5_state->base.fb);
++ __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
+
+ return &mdp5_state->base;
+ }
--- /dev/null
+From 4f8caa60a5a13a78f26198618f21774bd6aa6498 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 26 May 2017 17:40:52 -0400
+Subject: ext4: fix data corruption with EXT4_GET_BLOCKS_ZERO
+
+From: Jan Kara <jack@suse.cz>
+
+commit 4f8caa60a5a13a78f26198618f21774bd6aa6498 upstream.
+
+When ext4_map_blocks() is called with EXT4_GET_BLOCKS_ZERO to zero-out
+allocated blocks and these blocks are actually converted from unwritten
+extent the following race can happen:
+
+CPU0 CPU1
+
+page fault page fault
+... ...
+ext4_map_blocks()
+ ext4_ext_map_blocks()
+ ext4_ext_handle_unwritten_extents()
+ ext4_ext_convert_to_initialized()
+ - zero out converted extent
+ ext4_zeroout_es()
+ - inserts extent as initialized in status tree
+
+ ext4_map_blocks()
+ ext4_es_lookup_extent()
+ - finds initialized extent
+ write data
+ ext4_issue_zeroout()
+ - zeroes out new extent overwriting data
+
+This problem can be reproduced by generic/340 for the fallocated case
+for the last block in the file.
+
+Fix the problem by avoiding zeroing out the area we are mapping with
+ext4_map_blocks() in ext4_ext_convert_to_initialized(). It is pointless
+to zero out this area in the first place as the caller asked us to
+convert the area to initialized because he is just going to write data
+there before the transaction finishes. To achieve this we delete the
+special case of zeroing out full extent as that will be handled by the
+cases below zeroing only the part of the extent that needs it. We also
+instruct ext4_split_extent() that the middle of extent being split
+contains data so that ext4_split_extent_at() cannot zero out full extent
+in case of ENOSPC.
+
+Fixes: 12735f881952c32b31bc4e433768f18489f79ec9
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c | 80 ++++++++++++++++++++++++------------------------------
+ 1 file changed, 37 insertions(+), 43 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initializ
+ struct ext4_sb_info *sbi;
+ struct ext4_extent_header *eh;
+ struct ext4_map_blocks split_map;
+- struct ext4_extent zero_ex;
++ struct ext4_extent zero_ex1, zero_ex2;
+ struct ext4_extent *ex, *abut_ex;
+ ext4_lblk_t ee_block, eof_block;
+ unsigned int ee_len, depth, map_len = map->m_len;
+ int allocated = 0, max_zeroout = 0;
+ int err = 0;
+- int split_flag = 0;
++ int split_flag = EXT4_EXT_DATA_VALID2;
+
+ ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+@@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initializ
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+- zero_ex.ee_len = 0;
++ zero_ex1.ee_len = 0;
++ zero_ex2.ee_len = 0;
+
+ trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
+
+@@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initializ
+ if (ext4_encrypted_inode(inode))
+ max_zeroout = 0;
+
+- /* If extent is less than s_max_zeroout_kb, zeroout directly */
+- if (max_zeroout && (ee_len <= max_zeroout)) {
+- err = ext4_ext_zeroout(inode, ex);
+- if (err)
+- goto out;
+- zero_ex.ee_block = ex->ee_block;
+- zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
+- ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
+-
+- err = ext4_ext_get_access(handle, inode, path + depth);
+- if (err)
+- goto out;
+- ext4_ext_mark_initialized(ex);
+- ext4_ext_try_to_merge(handle, inode, path, ex);
+- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+- goto out;
+- }
+-
+ /*
+- * four cases:
++ * five cases:
+ * 1. split the extent into three extents.
+- * 2. split the extent into two extents, zeroout the first half.
+- * 3. split the extent into two extents, zeroout the second half.
++ * 2. split the extent into two extents, zeroout the head of the first
++ * extent.
++ * 3. split the extent into two extents, zeroout the tail of the second
++ * extent.
+ * 4. split the extent into two extents with out zeroout.
++ * 5. no splitting needed, just possibly zeroout the head and / or the
++ * tail of the extent.
+ */
+ split_map.m_lblk = map->m_lblk;
+ split_map.m_len = map->m_len;
+
+- if (max_zeroout && (allocated > map->m_len)) {
++ if (max_zeroout && (allocated > split_map.m_len)) {
+ if (allocated <= max_zeroout) {
+- /* case 3 */
+- zero_ex.ee_block =
+- cpu_to_le32(map->m_lblk);
+- zero_ex.ee_len = cpu_to_le16(allocated);
+- ext4_ext_store_pblock(&zero_ex,
+- ext4_ext_pblock(ex) + map->m_lblk - ee_block);
+- err = ext4_ext_zeroout(inode, &zero_ex);
++ /* case 3 or 5 */
++ zero_ex1.ee_block =
++ cpu_to_le32(split_map.m_lblk +
++ split_map.m_len);
++ zero_ex1.ee_len =
++ cpu_to_le16(allocated - split_map.m_len);
++ ext4_ext_store_pblock(&zero_ex1,
++ ext4_ext_pblock(ex) + split_map.m_lblk +
++ split_map.m_len - ee_block);
++ err = ext4_ext_zeroout(inode, &zero_ex1);
+ if (err)
+ goto out;
+- split_map.m_lblk = map->m_lblk;
+ split_map.m_len = allocated;
+- } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
+- /* case 2 */
+- if (map->m_lblk != ee_block) {
+- zero_ex.ee_block = ex->ee_block;
+- zero_ex.ee_len = cpu_to_le16(map->m_lblk -
++ }
++ if (split_map.m_lblk - ee_block + split_map.m_len <
++ max_zeroout) {
++ /* case 2 or 5 */
++ if (split_map.m_lblk != ee_block) {
++ zero_ex2.ee_block = ex->ee_block;
++ zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
+ ee_block);
+- ext4_ext_store_pblock(&zero_ex,
++ ext4_ext_store_pblock(&zero_ex2,
+ ext4_ext_pblock(ex));
+- err = ext4_ext_zeroout(inode, &zero_ex);
++ err = ext4_ext_zeroout(inode, &zero_ex2);
+ if (err)
+ goto out;
+ }
+
++ split_map.m_len += split_map.m_lblk - ee_block;
+ split_map.m_lblk = ee_block;
+- split_map.m_len = map->m_lblk - ee_block + map->m_len;
+ allocated = map->m_len;
+ }
+ }
+@@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initializ
+ err = 0;
+ out:
+ /* If we have gotten a failure, don't zero out status tree */
+- if (!err)
+- err = ext4_zeroout_es(inode, &zero_ex);
++ if (!err) {
++ err = ext4_zeroout_es(inode, &zero_ex1);
++ if (!err)
++ err = ext4_zeroout_es(inode, &zero_ex2);
++ }
+ return err ? err : allocated;
+ }
+
--- /dev/null
+From 67a7d5f561f469ad2fa5154d2888258ab8e6df7c Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 29 May 2017 13:24:55 -0400
+Subject: ext4: fix fdatasync(2) after extent manipulation operations
+
+From: Jan Kara <jack@suse.cz>
+
+commit 67a7d5f561f469ad2fa5154d2888258ab8e6df7c upstream.
+
+Currently, extent manipulation operations such as hole punch, range
+zeroing, or extent shifting do not record the fact that file data has
+changed and thus fdatasync(2) has a work to do. As a result if we crash
+e.g. after a punch hole and fdatasync, user can still possibly see the
+punched out data after journal replay. Test generic/392 fails due to
+these problems.
+
+Fix the problem by properly marking that file data has changed in these
+operations.
+
+Fixes: a4bb6b64e39abc0e41ca077725f2a72c868e7622
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c | 5 +++++
+ fs/ext4/inode.c | 2 ++
+ 2 files changed, 7 insertions(+)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4877,6 +4877,8 @@ static long ext4_zero_range(struct file
+
+ /* Zero out partial block at the edges of the range */
+ ret = ext4_zero_partial_blocks(handle, inode, offset, len);
++ if (ret >= 0)
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+ if (file->f_flags & O_SYNC)
+ ext4_handle_sync(handle);
+@@ -5563,6 +5565,7 @@ int ext4_collapse_range(struct inode *in
+ ext4_handle_sync(handle);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+ out_stop:
+ ext4_journal_stop(handle);
+@@ -5736,6 +5739,8 @@ int ext4_insert_range(struct inode *inod
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
++ if (ret >= 0)
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+ out_stop:
+ ext4_journal_stop(handle);
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4165,6 +4165,8 @@ int ext4_punch_hole(struct inode *inode,
+
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
++ if (ret >= 0)
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ out_stop:
+ ext4_journal_stop(handle);
+ out_dio:
--- /dev/null
+From 7d95eddf313c88b24f99d4ca9c2411a4b82fef33 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Sun, 21 May 2017 22:33:23 -0400
+Subject: ext4: fix SEEK_HOLE
+
+From: Jan Kara <jack@suse.cz>
+
+commit 7d95eddf313c88b24f99d4ca9c2411a4b82fef33 upstream.
+
+Currently, SEEK_HOLE implementation in ext4 may both return that there's
+a hole at some offset although that offset already has data and skip
+some holes during a search for the next hole. The first problem is
+demostrated by:
+
+xfs_io -c "falloc 0 256k" -c "pwrite 0 56k" -c "seek -h 0" file
+wrote 57344/57344 bytes at offset 0
+56 KiB, 14 ops; 0.0000 sec (2.054 GiB/sec and 538461.5385 ops/sec)
+Whence Result
+HOLE 0
+
+Where we can see that SEEK_HOLE wrongly returned offset 0 as containing
+a hole although we have written data there. The second problem can be
+demonstrated by:
+
+xfs_io -c "falloc 0 256k" -c "pwrite 0 56k" -c "pwrite 128k 8k"
+ -c "seek -h 0" file
+
+wrote 57344/57344 bytes at offset 0
+56 KiB, 14 ops; 0.0000 sec (1.978 GiB/sec and 518518.5185 ops/sec)
+wrote 8192/8192 bytes at offset 131072
+8 KiB, 2 ops; 0.0000 sec (2 GiB/sec and 500000.0000 ops/sec)
+Whence Result
+HOLE 139264
+
+Where we can see that hole at offsets 56k..128k has been ignored by the
+SEEK_HOLE call.
+
+The underlying problem is in the ext4_find_unwritten_pgoff() which is
+just buggy. In some cases it fails to update returned offset when it
+finds a hole (when no pages are found or when the first found page has
+higher index than expected), in some cases conditions for detecting hole
+are just missing (we fail to detect a situation where indices of
+returned pages are not contiguous).
+
+Fix ext4_find_unwritten_pgoff() to properly detect non-contiguous page
+indices and also handle all cases where we got less pages then expected
+in one place and handle it properly there.
+
+Fixes: c8c0df241cc2719b1262e627f999638411934f60
+CC: Zheng Liu <wenqing.lz@taobao.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/file.c | 50 ++++++++++++++------------------------------------
+ 1 file changed, 14 insertions(+), 36 deletions(-)
+
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -484,47 +484,27 @@ static int ext4_find_unwritten_pgoff(str
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
+ (pgoff_t)num);
+- if (nr_pages == 0) {
+- if (whence == SEEK_DATA)
+- break;
+-
+- BUG_ON(whence != SEEK_HOLE);
+- /*
+- * If this is the first time to go into the loop and
+- * offset is not beyond the end offset, it will be a
+- * hole at this offset
+- */
+- if (lastoff == startoff || lastoff < endoff)
+- found = 1;
++ if (nr_pages == 0)
+ break;
+- }
+-
+- /*
+- * If this is the first time to go into the loop and
+- * offset is smaller than the first page offset, it will be a
+- * hole at this offset.
+- */
+- if (lastoff == startoff && whence == SEEK_HOLE &&
+- lastoff < page_offset(pvec.pages[0])) {
+- found = 1;
+- break;
+- }
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ struct buffer_head *bh, *head;
+
+ /*
+- * If the current offset is not beyond the end of given
+- * range, it will be a hole.
++ * If current offset is smaller than the page offset,
++ * there is a hole at this offset.
+ */
+- if (lastoff < endoff && whence == SEEK_HOLE &&
+- page->index > end) {
++ if (whence == SEEK_HOLE && lastoff < endoff &&
++ lastoff < page_offset(pvec.pages[i])) {
+ found = 1;
+ *offset = lastoff;
+ goto out;
+ }
+
++ if (page->index > end)
++ goto out;
++
+ lock_page(page);
+
+ if (unlikely(page->mapping != inode->i_mapping)) {
+@@ -564,20 +544,18 @@ static int ext4_find_unwritten_pgoff(str
+ unlock_page(page);
+ }
+
+- /*
+- * The no. of pages is less than our desired, that would be a
+- * hole in there.
+- */
+- if (nr_pages < num && whence == SEEK_HOLE) {
+- found = 1;
+- *offset = lastoff;
++ /* The no. of pages is less than our desired, we are done. */
++ if (nr_pages < num)
+ break;
+- }
+
+ index = pvec.pages[i - 1]->index + 1;
+ pagevec_release(&pvec);
+ } while (index <= end);
+
++ if (whence == SEEK_HOLE && lastoff < endoff) {
++ found = 1;
++ *offset = lastoff;
++ }
+ out:
+ pagevec_release(&pvec);
+ return found;
--- /dev/null
+From 887a9730614727c4fff7cb756711b190593fc1df Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Sun, 21 May 2017 22:36:23 -0400
+Subject: ext4: keep existing extra fields when inode expands
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+commit 887a9730614727c4fff7cb756711b190593fc1df upstream.
+
+ext4_expand_extra_isize() should clear only space between old and new
+size.
+
+Fixes: 6dd4ee7cab7e # v2.6.23
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5621,8 +5621,9 @@ static int ext4_expand_extra_isize(struc
+ /* No extended attributes present */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+ header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
+- new_extra_isize);
++ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
++ EXT4_I(inode)->i_extra_isize, 0,
++ new_extra_isize - EXT4_I(inode)->i_extra_isize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ return 0;
+ }
--- /dev/null
+From f7d86ecf83cb66d3c4c6ac4edb1dd50c0919aa2b Mon Sep 17 00:00:00 2001
+From: Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>
+Date: Tue, 16 May 2017 12:22:42 +0530
+Subject: iio: adc: bcm_iproc_adc: swap primary and secondary isr handler's
+
+From: Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>
+
+commit f7d86ecf83cb66d3c4c6ac4edb1dd50c0919aa2b upstream.
+
+The third argument of devm_request_threaded_irq() is the primary
+handler. It is called in hardirq context and checks whether the
+interrupt is relevant to the device. If the primary handler returns
+IRQ_WAKE_THREAD, the secondary handler (a.k.a. handler thread) is
+scheduled to run in process context.
+
+bcm_iproc_adc.c uses the secondary handler as the primary one
+and the other way around. So this patch fixes the same, along with
+re-naming the secondary handler and primary handler names properly.
+
+Tested on the BCM9583XX iProc SoC based boards.
+
+Fixes: 4324c97ecedc ("iio: Add driver for Broadcom iproc-static-adc")
+Reported-by: Pavel Roskin <plroskin@gmail.com>
+Signed-off-by: Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/adc/bcm_iproc_adc.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/iio/adc/bcm_iproc_adc.c
++++ b/drivers/iio/adc/bcm_iproc_adc.c
+@@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct ii
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
+ }
+
+-static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
++static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+ {
+ u32 channel_intr_status;
+ u32 intr_status;
+@@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_h
+ return IRQ_NONE;
+ }
+
+-static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
++static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+ {
+ irqreturn_t retval = IRQ_NONE;
+ struct iproc_adc_priv *adc_priv;
+@@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_t
+ adc_priv = iio_priv(indio_dev);
+
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+- dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
++ dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
+ intr_status);
+
+ intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
+@@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platfo
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
+- iproc_adc_interrupt_thread,
+ iproc_adc_interrupt_handler,
++ iproc_adc_interrupt_thread,
+ IRQF_SHARED, "iproc-adc", indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error %d\n", ret);
--- /dev/null
+From 7cc3bff4efe6164a0c8163331c8aa55454799f42 Mon Sep 17 00:00:00 2001
+From: Franziska Naepelt <franziska.naepelt@idt.com>
+Date: Wed, 17 May 2017 12:41:19 +0200
+Subject: iio: light: ltr501 Fix interchanged als/ps register field
+
+From: Franziska Naepelt <franziska.naepelt@idt.com>
+
+commit 7cc3bff4efe6164a0c8163331c8aa55454799f42 upstream.
+
+The register mapping for the IIO driver for the Liteon Light and Proximity
+sensor LTR501 interrupt mode is interchanged (ALS/PS).
+There is a register called INTERRUPT register (address 0x8F)
+Bit 0 represents PS measurement trigger.
+Bit 1 represents ALS measurement trigger.
+This two bit fields are interchanged within the driver.
+see datasheet page 24:
+http://optoelectronics.liteon.com/upload/download/DS86-2012-0006/S_110_LTR-501ALS-01_PrelimDS_ver1%5B1%5D.pdf
+
+Signed-off-by: Franziska Naepelt <franziska.naepelt@idt.com>
+Fixes: 7ac702b3144b6 ("iio: ltr501: Add interrupt support")
+Acked-by: Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/light/ltr501.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/light/ltr501.c
++++ b/drivers/iio/light/ltr501.c
+@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {1
+ static const struct reg_field reg_field_it =
+ REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
+ static const struct reg_field reg_field_als_intr =
+- REG_FIELD(LTR501_INTR, 0, 0);
+-static const struct reg_field reg_field_ps_intr =
+ REG_FIELD(LTR501_INTR, 1, 1);
++static const struct reg_field reg_field_ps_intr =
++ REG_FIELD(LTR501_INTR, 0, 0);
+ static const struct reg_field reg_field_als_rate =
+ REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
+ static const struct reg_field reg_field_ps_rate =
--- /dev/null
+From 275292d3a3d62670b1b13484707b74e5239b4bb0 Mon Sep 17 00:00:00 2001
+From: Matt Ranostay <matt.ranostay@konsulko.com>
+Date: Thu, 27 Apr 2017 00:52:32 -0700
+Subject: iio: proximity: as3935: fix AS3935_INT mask
+
+From: Matt Ranostay <matt.ranostay@konsulko.com>
+
+commit 275292d3a3d62670b1b13484707b74e5239b4bb0 upstream.
+
+AS3935 interrupt mask has been incorrect so valid lightning events
+would never trigger an buffer event. Also noise interrupt should be
+BIT(0).
+
+Fixes: 24ddb0e4bba4 ("iio: Add AS3935 lightning sensor support")
+Signed-off-by: Matt Ranostay <matt.ranostay@konsulko.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/proximity/as3935.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -40,9 +40,9 @@
+ #define AS3935_AFE_PWR_BIT BIT(0)
+
+ #define AS3935_INT 0x03
+-#define AS3935_INT_MASK 0x07
++#define AS3935_INT_MASK 0x0f
+ #define AS3935_EVENT_INT BIT(3)
+-#define AS3935_NOISE_INT BIT(1)
++#define AS3935_NOISE_INT BIT(0)
+
+ #define AS3935_DATA 0x07
+ #define AS3935_DATA_MASK 0x3F
--- /dev/null
+From 9122b54f266ddee09654fe3fbc503c1a60f4a01c Mon Sep 17 00:00:00 2001
+From: Matt Ranostay <matt.ranostay@konsulko.com>
+Date: Thu, 4 May 2017 17:32:19 -0700
+Subject: iio: proximity: as3935: fix iio_trigger_poll issue
+
+From: Matt Ranostay <matt.ranostay@konsulko.com>
+
+commit 9122b54f266ddee09654fe3fbc503c1a60f4a01c upstream.
+
+Using iio_trigger_poll() can oops when multiple interrupts
+happen before the first is handled.
+
+Use iio_trigger_poll_chained() instead and use the timestamp
+when processed, since it will be in theory be 2 ms max latency.
+
+Fixes: 24ddb0e4bba4 ("iio: Add AS3935 lightning sensor support")
+Signed-off-by: Matt Ranostay <matt.ranostay@konsulko.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/proximity/as3935.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handle
+
+ st->buffer[0] = val & AS3935_DATA_MASK;
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+- pf->timestamp);
++ iio_get_time_ns(indio_dev));
+ err_read:
+ iio_trigger_notify_done(indio_dev->trig);
+
+@@ -244,7 +244,7 @@ static void as3935_event_work(struct wor
+
+ switch (val) {
+ case AS3935_EVENT_INT:
+- iio_trigger_poll(st->trig);
++ iio_trigger_poll_chained(st->trig);
+ break;
+ case AS3935_NOISE_INT:
+ dev_warn(&st->spi->dev, "noise level is too high\n");
--- /dev/null
+From 4eecbe81885180c9f6217ecfd679b1f285967218 Mon Sep 17 00:00:00 2001
+From: Marcin Niestroj <m.niestroj@grinn-global.com>
+Date: Thu, 18 May 2017 09:12:06 +0200
+Subject: iio: trigger: fix NULL pointer dereference in iio_trigger_write_current()
+
+From: Marcin Niestroj <m.niestroj@grinn-global.com>
+
+commit 4eecbe81885180c9f6217ecfd679b1f285967218 upstream.
+
+In case oldtrig == trig == NULL (which happens when we set none
+trigger, when there is already none set) there is a NULL pointer
+dereference during iio_trigger_put(trig). Below is kernel output when
+this occurs:
+
+[ 26.741790] Unable to handle kernel NULL pointer dereference at virtual address 00000000
+[ 26.750179] pgd = cacc0000
+[ 26.752936] [00000000] *pgd=8adc6835, *pte=00000000, *ppte=00000000
+[ 26.759531] Internal error: Oops: 17 [#1] SMP ARM
+[ 26.764261] Modules linked in: usb_f_ncm u_ether usb_f_acm u_serial usb_f_fs libcomposite configfs evbug
+[ 26.773844] CPU: 0 PID: 152 Comm: synchro Not tainted 4.12.0-rc1 #2
+[ 26.780128] Hardware name: Freescale i.MX6 Ultralite (Device Tree)
+[ 26.786329] task: cb1de200 task.stack: cac92000
+[ 26.790892] PC is at iio_trigger_write_current+0x188/0x1f4
+[ 26.796403] LR is at lock_release+0xf8/0x20c
+[ 26.800696] pc : [<c0736f34>] lr : [<c016efb0>] psr: 600d0013
+[ 26.800696] sp : cac93e30 ip : cac93db0 fp : cac93e5c
+[ 26.812193] r10: c0e64fe8 r9 : 00000000 r8 : 00000001
+[ 26.817436] r7 : cb190810 r6 : 00000010 r5 : 00000001 r4 : 00000000
+[ 26.823982] r3 : 00000000 r2 : 00000000 r1 : cb1de200 r0 : 00000000
+[ 26.830528] Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none
+[ 26.837683] Control: 10c5387d Table: 8acc006a DAC: 00000051
+[ 26.843448] Process synchro (pid: 152, stack limit = 0xcac92210)
+[ 26.849475] Stack: (0xcac93e30 to 0xcac94000)
+[ 26.853857] 3e20: 00000001 c0736dac c054033c cae6b680
+[ 26.862060] 3e40: cae6b680 00000000 00000001 cb3f8610 cac93e74 cac93e60 c054035c c0736db8
+[ 26.870264] 3e60: 00000001 c054033c cac93e94 cac93e78 c029bf34 c0540348 00000000 00000000
+[ 26.878469] 3e80: cb3f8600 cae6b680 cac93ed4 cac93e98 c029b320 c029bef0 00000000 00000000
+[ 26.886672] 3ea0: 00000000 cac93f78 cb2d41fc caed3280 c029b214 cac93f78 00000001 000e20f8
+[ 26.894874] 3ec0: 00000001 00000000 cac93f44 cac93ed8 c0221dcc c029b220 c0e1ca39 cb2d41fc
+[ 26.903079] 3ee0: cac93f04 cac93ef0 c0183ef0 c0183ab0 cb2d41fc 00000000 cac93f44 cac93f08
+[ 26.911282] 3f00: c0225eec c0183ebc 00000001 00000000 c0223728 00000000 c0245454 00000001
+[ 26.919485] 3f20: 00000001 caed3280 000e20f8 cac93f78 000e20f8 00000001 cac93f74 cac93f48
+[ 26.927690] 3f40: c0223680 c0221da4 c0246520 c0245460 caed3283 caed3280 00000000 00000000
+[ 26.935893] 3f60: 000e20f8 00000001 cac93fa4 cac93f78 c0224520 c02235e4 00000000 00000000
+[ 26.944096] 3f80: 00000001 000e20f8 00000001 00000004 c0107f84 cac92000 00000000 cac93fa8
+[ 26.952299] 3fa0: c0107de0 c02244e8 00000001 000e20f8 0000000e 000e20f8 00000001 fbad2484
+[ 26.960502] 3fc0: 00000001 000e20f8 00000001 00000004 beb6b698 00064260 0006421c beb6b4b4
+[ 26.968705] 3fe0: 00000000 beb6b450 b6f219a0 b6e2f268 800d0010 0000000e cac93ff4 cac93ffc
+[ 26.976896] Backtrace:
+[ 26.979388] [<c0736dac>] (iio_trigger_write_current) from [<c054035c>] (dev_attr_store+0x20/0x2c)
+[ 26.988289] r10:cb3f8610 r9:00000001 r8:00000000 r7:cae6b680 r6:cae6b680 r5:c054033c
+[ 26.996138] r4:c0736dac r3:00000001
+[ 26.999747] [<c054033c>] (dev_attr_store) from [<c029bf34>] (sysfs_kf_write+0x50/0x54)
+[ 27.007686] r5:c054033c r4:00000001
+[ 27.011290] [<c029bee4>] (sysfs_kf_write) from [<c029b320>] (kernfs_fop_write+0x10c/0x224)
+[ 27.019579] r7:cae6b680 r6:cb3f8600 r5:00000000 r4:00000000
+[ 27.025271] [<c029b214>] (kernfs_fop_write) from [<c0221dcc>] (__vfs_write+0x34/0x120)
+[ 27.033214] r10:00000000 r9:00000001 r8:000e20f8 r7:00000001 r6:cac93f78 r5:c029b214
+[ 27.041059] r4:caed3280
+[ 27.043622] [<c0221d98>] (__vfs_write) from [<c0223680>] (vfs_write+0xa8/0x170)
+[ 27.050959] r9:00000001 r8:000e20f8 r7:cac93f78 r6:000e20f8 r5:caed3280 r4:00000001
+[ 27.058731] [<c02235d8>] (vfs_write) from [<c0224520>] (SyS_write+0x44/0x98)
+[ 27.065806] r9:00000001 r8:000e20f8 r7:00000000 r6:00000000 r5:caed3280 r4:caed3283
+[ 27.073582] [<c02244dc>] (SyS_write) from [<c0107de0>] (ret_fast_syscall+0x0/0x1c)
+[ 27.081179] r9:cac92000 r8:c0107f84 r7:00000004 r6:00000001 r5:000e20f8 r4:00000001
+[ 27.088947] Code: 1a000009 e1a04009 e3a06010 e1a05008 (e5943000)
+[ 27.095244] ---[ end trace 06d1dab86d6e6bab ]---
+
+To fix that problem call iio_trigger_put(trig) only when trig is not
+NULL.
+
+Fixes: d5d24bcc0a10 ("iio: trigger: close race condition in acquiring trigger reference")
+Signed-off-by: Marcin Niestroj <m.niestroj@grinn-global.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/industrialio-trigger.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current
+ return len;
+
+ out_trigger_put:
+- iio_trigger_put(trig);
++ if (trig)
++ iio_trigger_put(trig);
+ return ret;
+ }
+
--- /dev/null
+From 47eb0c8b4d9eb6368941c6a9bb443f00847a46d7 Mon Sep 17 00:00:00 2001
+From: Ulrik De Bie <ulrik.debie-os@e2big.org>
+Date: Wed, 7 Jun 2017 10:30:57 -0700
+Subject: Input: elantech - add Fujitsu Lifebook E546/E557 to force crc_enabled
+
+From: Ulrik De Bie <ulrik.debie-os@e2big.org>
+
+commit 47eb0c8b4d9eb6368941c6a9bb443f00847a46d7 upstream.
+
+The Lifebook E546 and E557 touchpad were also not functioning and
+worked after running:
+
+ echo "1" > /sys/devices/platform/i8042/serio2/crc_enabled
+
+Add them to the list of machines that need this workaround.
+
+Signed-off-by: Ulrik De Bie <ulrik.debie-os@e2big.org>
+Reviewed-by: Arjan Opmeer <arjan@opmeer.net>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/elantech.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(st
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
++ * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
++ * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
+ * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
+ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
+@@ -1525,6 +1527,13 @@ static const struct dmi_system_id elante
+ },
+ },
+ {
++ /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
++ },
++ },
++ {
+ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+@@ -1546,6 +1555,13 @@ static const struct dmi_system_id elante
+ },
+ },
+ {
++ /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
++ },
++ },
++ {
+ /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
--- /dev/null
+From 6f9193ec044a8f72d8b6ae94a5c4ab6e8b0f00ca Mon Sep 17 00:00:00 2001
+From: Pratyush Anand <panand@redhat.com>
+Date: Mon, 29 May 2017 22:08:24 +0300
+Subject: mei: make sysfs modalias format similar as uevent modalias
+
+From: Pratyush Anand <panand@redhat.com>
+
+commit 6f9193ec044a8f72d8b6ae94a5c4ab6e8b0f00ca upstream.
+
+modprobe is not able to resolve sysfs modalias for mei devices.
+
+ # cat
+/sys/class/watchdog/watchdog0/device/watchdog/watchdog0/device/modalias
+mei::05b79a6f-4628-4d7f-899d-a91514cb32ab:
+ # modprobe --set-version 4.9.6-200.fc25.x86_64 -R
+mei::05b79a6f-4628-4d7f-899d-a91514cb32ab:
+modprobe: FATAL: Module mei::05b79a6f-4628-4d7f-899d-a91514cb32ab: not
+found in directory /lib/modules/4.9.6-200.fc25.x86_64
+ # cat /lib/modules/4.9.6-200.fc25.x86_64/modules.alias | grep
+05b79a6f-4628-4d7f-899d-a91514cb32ab
+alias mei:*:05b79a6f-4628-4d7f-899d-a91514cb32ab:*:* mei_wdt
+
+commit b26864cad1c9 ("mei: bus: add client protocol
+version to the device alias"), however sysfs modalias
+is still in formmat mei:S:uuid:*.
+
+This patch equates format of uevent and sysfs modalias so that modprobe
+is able to resolve the aliases.
+
+Fixes: commit b26864cad1c9 ("mei: bus: add client protocol version to the device alias")
+Signed-off-by: Pratyush Anand <panand@redhat.com>
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/mei/bus.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -763,8 +763,10 @@ static ssize_t modalias_show(struct devi
+ {
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
++ u8 version = mei_me_cl_ver(cldev->me_cl);
+
+- return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
++ return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
++ cldev->name, uuid, version);
+ }
+ static DEVICE_ATTR_RO(modalias);
+
--- /dev/null
+From 2761b4f12b017f6d3e5add386733a700a490df47 Mon Sep 17 00:00:00 2001
+From: Andres Galacho <andresgalacho@gmail.com>
+Date: Mon, 1 May 2017 16:30:15 -0400
+Subject: mtd: nand: tango: Export OF device ID table as module aliases
+
+From: Andres Galacho <andresgalacho@gmail.com>
+
+commit 2761b4f12b017f6d3e5add386733a700a490df47 upstream.
+
+The device table is required to load modules based on
+modaliases. After adding MODULE_DEVICE_TABLE, below entries
+for example will be added to module.alias:
+alias: of:N*T*Csigma,smp8758-nandC*
+alias: of:N*T*Csigma,smp8758-nand
+
+Fixes: 6956e2385a16 ("mtd: nand: add tango NAND flash controller support")
+Signed-off-by: Andres Galacho <andresgalacho@gmail.com>
+Acked-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/tango_nand.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/nand/tango_nand.c
++++ b/drivers/mtd/nand/tango_nand.c
+@@ -661,6 +661,7 @@ static const struct of_device_id tango_n
+ { .compatible = "sigma,smp8758-nand" },
+ { /* sentinel */ }
+ };
++MODULE_DEVICE_TABLE(of, tango_nand_ids);
+
+ static struct platform_driver tango_nand_driver = {
+ .probe = tango_nand_probe,
--- /dev/null
+From 60cf0ce14b09b54e7ee79dc3ef498de6ef0e41e9 Mon Sep 17 00:00:00 2001
+From: Marc Gonzalez <marc_gonzalez@sigmadesigns.com>
+Date: Fri, 12 May 2017 17:34:01 +0200
+Subject: mtd: nand: tango: Update ecc_stats.corrected
+
+From: Marc Gonzalez <marc_gonzalez@sigmadesigns.com>
+
+commit 60cf0ce14b09b54e7ee79dc3ef498de6ef0e41e9 upstream.
+
+According to Boris, some user-space tools expect MTD drivers to
+update ecc_stats.corrected, and it's better to provide a lower
+bound than to provide no information at all.
+
+Fixes: 6956e2385a16 ("mtd: nand: add tango NAND flash controller support")
+Reported-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Marc Gonzalez <marc_gonzalez@sigmadesigns.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/tango_nand.c | 22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+--- a/drivers/mtd/nand/tango_nand.c
++++ b/drivers/mtd/nand/tango_nand.c
+@@ -55,10 +55,10 @@
+ * byte 1 for other packets in the page (PKT_N, for N > 0)
+ * ERR_COUNT_PKT_N is the max error count over all but the first packet.
+ */
+-#define DECODE_OK_PKT_0(v) ((v) & BIT(7))
+-#define DECODE_OK_PKT_N(v) ((v) & BIT(15))
+ #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
+ #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
++#define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0)
++#define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0)
+
+ /* Offsets relative to pbus_base */
+ #define PBUS_CS_CTRL 0x83c
+@@ -193,6 +193,8 @@ static int check_erased_page(struct nand
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
++ else
++ mtd->ecc_stats.corrected += res;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+@@ -202,9 +204,11 @@ static int check_erased_page(struct nand
+ return bitflips;
+ }
+
+-static int decode_error_report(struct tango_nfc *nfc)
++static int decode_error_report(struct nand_chip *chip)
+ {
+ u32 status, res;
++ struct mtd_info *mtd = nand_to_mtd(chip);
++ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+
+ status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
+ if (status & PAGE_IS_EMPTY)
+@@ -212,10 +216,14 @@ static int decode_error_report(struct ta
+
+ res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
+
+- if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
+- return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
++ if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
++ return -EBADMSG;
++
++ /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
++ mtd->ecc_stats.corrected +=
++ ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
+
+- return -EBADMSG;
++ return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+ }
+
+ static void tango_dma_callback(void *arg)
+@@ -280,7 +288,7 @@ static int tango_read_page(struct mtd_in
+ if (err)
+ return err;
+
+- res = decode_error_report(nfc);
++ res = decode_error_report(chip);
+ if (res < 0) {
+ chip->ecc.read_oob_raw(mtd, chip, page);
+ res = check_erased_page(chip, buf);
--- /dev/null
+From b169c13de473a85b3c859bb36216a4cb5f00a54a Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Wed, 7 Jun 2017 19:45:31 -0400
+Subject: random: invalidate batched entropy after crng init
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit b169c13de473a85b3c859bb36216a4cb5f00a54a upstream.
+
+It's possible that get_random_{u32,u64} is used before the crng has
+initialized, in which case, its output might not be cryptographically
+secure. For this problem, directly, this patch set is introducing the
+*_wait variety of functions, but even with that, there's a subtle issue:
+what happens to our batched entropy that was generated before
+initialization. Prior to this commit, it'd stick around, supplying bad
+numbers. After this commit, we force the entropy to be re-extracted
+after each phase of the crng has initialized.
+
+In order to avoid a race condition with the position counter, we
+introduce a simple rwlock for this invalidation. Since it's only during
+this awkward transition period, after things are all set up, we stop
+using it, so that it doesn't have an impact on performance.
+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c | 37 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 37 insertions(+)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1,6 +1,9 @@
+ /*
+ * random.c -- A strong random number generator
+ *
++ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
++ * Rights Reserved.
++ *
+ * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ *
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
+@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
+ static struct crng_state **crng_node_pool __read_mostly;
+ #endif
+
++static void invalidate_batched_entropy(void);
++
+ static void crng_initialize(struct crng_state *crng)
+ {
+ int i;
+@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp
+ cp++; crng_init_cnt++; len--;
+ }
+ if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
++ invalidate_batched_entropy();
+ crng_init = 1;
+ wake_up_interruptible(&crng_init_wait);
+ pr_notice("random: fast init done\n");
+@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_stat
+ memzero_explicit(&buf, sizeof(buf));
+ crng->init_time = jiffies;
+ if (crng == &primary_crng && crng_init < 2) {
++ invalidate_batched_entropy();
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+@@ -2019,6 +2026,7 @@ struct batched_entropy {
+ };
+ unsigned int position;
+ };
++static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
+@@ -2029,6 +2037,8 @@ static DEFINE_PER_CPU(struct batched_ent
+ u64 get_random_u64(void)
+ {
+ u64 ret;
++ bool use_lock = crng_init < 2;
++ unsigned long flags;
+ struct batched_entropy *batch;
+
+ #if BITS_PER_LONG == 64
+@@ -2041,11 +2051,15 @@ u64 get_random_u64(void)
+ #endif
+
+ batch = &get_cpu_var(batched_entropy_u64);
++ if (use_lock)
++ read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
++ if (use_lock)
++ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ put_cpu_var(batched_entropy_u64);
+ return ret;
+ }
+@@ -2055,22 +2069,45 @@ static DEFINE_PER_CPU(struct batched_ent
+ u32 get_random_u32(void)
+ {
+ u32 ret;
++ bool use_lock = crng_init < 2;
++ unsigned long flags;
+ struct batched_entropy *batch;
+
+ if (arch_get_random_int(&ret))
+ return ret;
+
+ batch = &get_cpu_var(batched_entropy_u32);
++ if (use_lock)
++ read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
++ if (use_lock)
++ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ put_cpu_var(batched_entropy_u32);
+ return ret;
+ }
+ EXPORT_SYMBOL(get_random_u32);
+
++/* It's important to invalidate all potential batched entropy that might
++ * be stored before the crng is initialized, which we can do lazily by
++ * simply resetting the counter to zero so that it's re-extracted on the
++ * next usage. */
++static void invalidate_batched_entropy(void)
++{
++ int cpu;
++ unsigned long flags;
++
++ write_lock_irqsave(&batched_entropy_reset_lock, flags);
++ for_each_possible_cpu (cpu) {
++ per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
++ per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
++ }
++ write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
++}
++
+ /**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
--- /dev/null
+From 963761a0b2e85663ee4a5630f72930885a06598a Mon Sep 17 00:00:00 2001
+From: Sean Young <sean@mess.org>
+Date: Wed, 24 May 2017 06:24:51 -0300
+Subject: [media] rc-core: race condition during ir_raw_event_register()
+
+From: Sean Young <sean@mess.org>
+
+commit 963761a0b2e85663ee4a5630f72930885a06598a upstream.
+
+A rc device can call ir_raw_event_handle() after rc_allocate_device(),
+but before rc_register_device() has completed. This is racey because
+rcdev->raw is set before rcdev->raw->thread has a valid value.
+
+Reported-by: kbuild test robot <fengguang.wu@intel.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/rc/rc-ir-raw.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/rc/rc-ir-raw.c
++++ b/drivers/media/rc/rc-ir-raw.c
+@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle)
+ */
+ void ir_raw_event_handle(struct rc_dev *dev)
+ {
+- if (!dev->raw)
++ if (!dev->raw || !dev->raw->thread)
+ return;
+
+ wake_up_process(dev->raw->thread);
+@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev
+ {
+ int rc;
+ struct ir_raw_handler *handler;
++ struct task_struct *thread;
+
+ if (!dev)
+ return -EINVAL;
+@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev
+ * because the event is coming from userspace
+ */
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+- dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
+- "rc%u", dev->minor);
++ thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
++ dev->minor);
+
+- if (IS_ERR(dev->raw->thread)) {
+- rc = PTR_ERR(dev->raw->thread);
++ if (IS_ERR(thread)) {
++ rc = PTR_ERR(thread);
+ goto out;
+ }
++
++ dev->raw->thread = thread;
+ }
+
+ mutex_lock(&ir_raw_handler_lock);
--- /dev/null
+From d8747d642ec4ce96adf17ae35652a5e4015cfe02 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 2 May 2017 13:16:18 +0200
+Subject: reiserfs: Make flush bios explicitely sync
+
+From: Jan Kara <jack@suse.cz>
+
+commit d8747d642ec4ce96adf17ae35652a5e4015cfe02 upstream.
+
+Commit b685d3d65ac7 "block: treat REQ_FUA and REQ_PREFLUSH as
+synchronous" removed REQ_SYNC flag from WRITE_{FUA|PREFLUSH|...}
+definitions. generic_make_request_checks() however strips REQ_FUA and
+REQ_PREFLUSH flags from a bio when the storage doesn't report volatile
+write cache and thus write effectively becomes asynchronous which can
+lead to performance regressions
+
+Fix the problem by making sure all bios which are synchronous are
+properly marked with REQ_SYNC.
+
+Fixes: b685d3d65ac791406e0dfd8779cc9b3707fea5a3
+CC: reiserfs-devel@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/reiserfs/journal.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct supe
+ depth = reiserfs_write_unlock_nested(s);
+ if (reiserfs_barrier_flush(s))
+ __sync_dirty_buffer(jl->j_commit_bh,
+- REQ_PREFLUSH | REQ_FUA);
++ REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ else
+ sync_dirty_buffer(jl->j_commit_bh);
+ reiserfs_write_lock_nested(s, depth);
+@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(
+
+ if (reiserfs_barrier_flush(sb))
+ __sync_dirty_buffer(journal->j_header_bh,
+- REQ_PREFLUSH | REQ_FUA);
++ REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ else
+ sync_dirty_buffer(journal->j_header_bh);
+
arm64-kvm-allow-unaligned-accesses-at-el2.patch
arm-kvm-allow-unaligned-accesses-at-hyp.patch
kvm-async_pf-avoid-async-pf-injection-when-in-guest-mode.patch
+dmaengine-usb-dmac-fix-dmaor-ae-bit-definition.patch
+dmaengine-ep93xx-always-start-from-base0.patch
+dmaengine-ep93xx-don-t-drain-the-transfers-in-terminate_all.patch
+dmaengine-mv_xor_v2-handle-mv_xor_v2_prep_sw_desc-error-properly.patch
+dmaengine-mv_xor_v2-properly-handle-wrapping-in-the-array-of-hw-descriptors.patch
+dmaengine-mv_xor_v2-do-not-use-descriptors-not-acked-by-async_tx.patch
+dmaengine-mv_xor_v2-enable-xor-engine-after-its-configuration.patch
+dmaengine-mv_xor_v2-fix-tx_submit-implementation.patch
+dmaengine-mv_xor_v2-remove-interrupt-coalescing.patch
+dmaengine-mv_xor_v2-set-dma-mask-to-40-bits.patch
+cfq-iosched-fix-the-delay-of-cfq_group-s-vdisktime-under-iops-mode.patch
+reiserfs-make-flush-bios-explicitely-sync.patch
+mtd-nand-tango-export-of-device-id-table-as-module-aliases.patch
+mtd-nand-tango-update-ecc_stats.corrected.patch
+xen-privcmd-support-correctly-64kb-page-granularity-when-mapping-memory.patch
+ext4-fix-seek_hole.patch
+ext4-keep-existing-extra-fields-when-inode-expands.patch
+ext4-fix-data-corruption-with-ext4_get_blocks_zero.patch
+ext4-fix-fdatasync-2-after-extent-manipulation-operations.patch
+drm-fix-oops-xserver-hang-when-unplugging-usb-drm-devices.patch
+usb-gadget-f_mass_storage-serialize-wake-and-sleep-execution.patch
+usb-musb-dsps-keep-vbus-on-for-host-only-mode.patch
+usb-chipidea-imx-do-not-access-clkonoff-on-i.mx51.patch
+usb-chipidea-udc-fix-null-pointer-dereference-if-udc_start-failed.patch
+usb-chipidea-debug-check-before-accessing-ci_role.patch
+staging-lustre-lov-remove-set_fs-call-from-lov_getstripe.patch
+iio-adc-bcm_iproc_adc-swap-primary-and-secondary-isr-handler-s.patch
+iio-light-ltr501-fix-interchanged-als-ps-register-field.patch
+iio-trigger-fix-null-pointer-dereference-in-iio_trigger_write_current.patch
+iio-proximity-as3935-fix-as3935_int-mask.patch
+iio-proximity-as3935-fix-iio_trigger_poll-issue.patch
+block-avoid-that-blk_exit_rl-triggers-a-use-after-free.patch
+mei-make-sysfs-modalias-format-similar-as-uevent-modalias.patch
+random-invalidate-batched-entropy-after-crng-init.patch
+cpufreq-cpufreq_register_driver-should-return-enodev-if-init-fails.patch
+target-re-add-check-to-reject-control-writes-with-overflow-data.patch
+drm-msm-expose-our-reservation-object-when-exporting-a-dmabuf.patch
+drm-msm-mdp5-use-__drm_atomic_helper_plane_duplicate_state.patch
+ahci-acer-sa5-271-ssd-not-detected-fix.patch
+rc-core-race-condition-during-ir_raw_event_register.patch
+cgroup-prevent-kill_css-from-being-called-more-than-once.patch
+input-elantech-add-fujitsu-lifebook-e546-e557-to-force-crc_enabled.patch
--- /dev/null
+From 0a33252e060e97ed3fbdcec9517672f1e91aaef3 Mon Sep 17 00:00:00 2001
+From: Oleg Drokin <green@linuxhacker.ru>
+Date: Fri, 26 May 2017 23:40:33 -0400
+Subject: staging/lustre/lov: remove set_fs() call from lov_getstripe()
+
+From: Oleg Drokin <green@linuxhacker.ru>
+
+commit 0a33252e060e97ed3fbdcec9517672f1e91aaef3 upstream.
+
+lov_getstripe() calls set_fs(KERNEL_DS) so that it can handle a struct
+lov_user_md pointer from user- or kernel-space. This changes the
+behavior of copy_from_user() on SPARC and may result in a misaligned
+access exception which in turn oopses the kernel. In fact the
+relevant argument to lov_getstripe() is never called with a
+kernel-space pointer and so changing the address limits is unnecessary
+and so we remove the calls to save, set, and restore the address
+limits.
+
+Signed-off-by: John L. Hammond <john.hammond@intel.com>
+Reviewed-on: http://review.whamcloud.com/6150
+Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3221
+Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
+Reviewed-by: Li Wei <wei.g.li@intel.com>
+Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/lustre/lustre/lov/lov_pack.c | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
++++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
+@@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj
+ size_t lmmk_size;
+ size_t lum_size;
+ int rc;
+- mm_segment_t seg;
+
+ if (!lsm)
+ return -ENODATA;
+
+- /*
+- * "Switch to kernel segment" to allow copying from kernel space by
+- * copy_{to,from}_user().
+- */
+- seg = get_fs();
+- set_fs(KERNEL_DS);
+-
+ if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
+ CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+ lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+@@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj
+ out_free:
+ kvfree(lmmk);
+ out:
+- set_fs(seg);
+ return rc;
+ }
--- /dev/null
+From 4ff83daa0200affe1894bd33d17bac404e3d78d4 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Thu, 11 May 2017 01:07:24 -0700
+Subject: target: Re-add check to reject control WRITEs with overflow data
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 4ff83daa0200affe1894bd33d17bac404e3d78d4 upstream.
+
+During v4.3 when the overflow/underflow check was relaxed by
+commit c72c525022:
+
+ commit c72c5250224d475614a00c1d7e54a67f77cd3410
+ Author: Roland Dreier <roland@purestorage.com>
+ Date: Wed Jul 22 15:08:18 2015 -0700
+
+ target: allow underflow/overflow for PR OUT etc. commands
+
+to allow underflow/overflow for Windows compliance + FCP, a
+consequence was to allow control CDBs to process overflow
+data for iscsi-target with immediate data as well.
+
+As per Roland's original change, continue to allow underflow
+cases for control CDBs to make Windows compliance + FCP happy,
+but until overflow for control CDBs is supported tree-wide,
+explicitly reject all control WRITEs with overflow following
+pre v4.3.y logic.
+
+Reported-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Cc: Roland Dreier <roland@purestorage.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_transport.c | 23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd
+ if (cmd->unknown_data_length) {
+ cmd->data_length = size;
+ } else if (size != cmd->data_length) {
+- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
++ pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
+ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+ cmd->data_length, size, cmd->t_task_cdb[0]);
+
+- if (cmd->data_direction == DMA_TO_DEVICE &&
+- cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+- pr_err("Rejecting underflow/overflow WRITE data\n");
+- return TCM_INVALID_CDB_FIELD;
++ if (cmd->data_direction == DMA_TO_DEVICE) {
++ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
++ pr_err_ratelimited("Rejecting underflow/overflow"
++ " for WRITE data CDB\n");
++ return TCM_INVALID_CDB_FIELD;
++ }
++ /*
++ * Some fabric drivers like iscsi-target still expect to
++ * always reject overflow writes. Reject this case until
++ * full fabric driver level support for overflow writes
++ * is introduced tree-wide.
++ */
++ if (size > cmd->data_length) {
++ pr_err_ratelimited("Rejecting overflow for"
++ " WRITE control CDB\n");
++ return TCM_INVALID_CDB_FIELD;
++ }
+ }
+ /*
+ * Reject READ_* or WRITE_* with overflow/underflow for
--- /dev/null
+From 0340ff83cd4475261e7474033a381bc125b45244 Mon Sep 17 00:00:00 2001
+From: Michael Thalmeier <michael.thalmeier@hale.at>
+Date: Thu, 18 May 2017 16:14:14 +0200
+Subject: usb: chipidea: debug: check before accessing ci_role
+
+From: Michael Thalmeier <michael.thalmeier@hale.at>
+
+commit 0340ff83cd4475261e7474033a381bc125b45244 upstream.
+
+ci_role BUGs when the role is >= CI_ROLE_END.
+
+Signed-off-by: Michael Thalmeier <michael.thalmeier@hale.at>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/chipidea/debug.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file
+ {
+ struct ci_hdrc *ci = s->private;
+
+- seq_printf(s, "%s\n", ci_role(ci)->name);
++ if (ci->role != CI_ROLE_END)
++ seq_printf(s, "%s\n", ci_role(ci)->name);
+
+ return 0;
+ }
--- /dev/null
+From 62b97d502bb76c6e8d589e42e02bfcb7bdff0453 Mon Sep 17 00:00:00 2001
+From: Andrey Smirnov <andrew.smirnov@gmail.com>
+Date: Mon, 15 May 2017 06:48:58 -0700
+Subject: usb: chipidea: imx: Do not access CLKONOFF on i.MX51
+
+From: Andrey Smirnov <andrew.smirnov@gmail.com>
+
+commit 62b97d502bb76c6e8d589e42e02bfcb7bdff0453 upstream.
+
+Unlike i.MX53, i.MX51's USBOH3 register file does not implemenent
+registers past offset 0x018, which includes
+MX53_USB_CLKONOFF_CTRL_OFFSET and trying to access that register on
+said platform results in external abort.
+
+Fix it by enabling CLKONOFF accessing codepath only for i.MX53.
+
+Fixes 3be3251db088 ("usb: chipidea: imx: Disable internal 60Mhz clock with ULPI PHY")
+Cc: cphealy@gmail.com
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-usb@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/chipidea/usbmisc_imx.c | 41 ++++++++++++++++++++++++++++---------
+ 1 file changed, 32 insertions(+), 9 deletions(-)
+
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -108,6 +108,8 @@ struct imx_usbmisc {
+ const struct usbmisc_ops *ops;
+ };
+
++static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
++
+ static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
+ {
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+@@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+- /* Disable internal 60Mhz clock */
+- reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+- val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+- writel(val, reg);
++ if (is_imx53_usbmisc(data)) {
++ /* Disable internal 60Mhz clock */
++ reg = usbmisc->base +
++ MX53_USB_CLKONOFF_CTRL_OFFSET;
++ val = readl(reg) |
++ MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
++ writel(val, reg);
++ }
++
+ }
+ if (data->disable_oc) {
+ reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
+@@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+- /* Disable internal 60Mhz clock */
+- reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+- val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+- writel(val, reg);
++
++ if (is_imx53_usbmisc(data)) {
++ /* Disable internal 60Mhz clock */
++ reg = usbmisc->base +
++ MX53_USB_CLKONOFF_CTRL_OFFSET;
++ val = readl(reg) |
++ MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
++ writel(val, reg);
++ }
+ }
+ if (data->disable_oc) {
+ reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
+@@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_us
+ .init = usbmisc_imx27_init,
+ };
+
++static const struct usbmisc_ops imx51_usbmisc_ops = {
++ .init = usbmisc_imx53_init,
++};
++
+ static const struct usbmisc_ops imx53_usbmisc_ops = {
+ .init = usbmisc_imx53_init,
+ };
+@@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_us
+ .set_wakeup = usbmisc_imx7d_set_wakeup,
+ };
+
++static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
++{
++ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
++
++ return usbmisc->ops == &imx53_usbmisc_ops;
++}
++
+ int imx_usbmisc_init(struct imx_usbmisc_data *data)
+ {
+ struct imx_usbmisc *usbmisc;
+@@ -536,7 +559,7 @@ static const struct of_device_id usbmisc
+ },
+ {
+ .compatible = "fsl,imx51-usbmisc",
+- .data = &imx53_usbmisc_ops,
++ .data = &imx51_usbmisc_ops,
+ },
+ {
+ .compatible = "fsl,imx53-usbmisc",
--- /dev/null
+From aa1f058d7d9244423b8c5a75b9484b1115df7f02 Mon Sep 17 00:00:00 2001
+From: Jisheng Zhang <jszhang@marvell.com>
+Date: Mon, 24 Apr 2017 12:35:51 +0000
+Subject: usb: chipidea: udc: fix NULL pointer dereference if udc_start failed
+
+From: Jisheng Zhang <jszhang@marvell.com>
+
+commit aa1f058d7d9244423b8c5a75b9484b1115df7f02 upstream.
+
+Fix below NULL pointer dereference. we set ci->roles[CI_ROLE_GADGET]
+too early in ci_hdrc_gadget_init(), if udc_start() fails due to some
+reason, the ci->roles[CI_ROLE_GADGET] check in ci_hdrc_gadget_destroy
+can't protect us.
+
+We fix this issue by only setting ci->roles[CI_ROLE_GADGET] if
+udc_start() succeed.
+
+[ 1.398550] Unable to handle kernel NULL pointer dereference at
+virtual address 00000000
+...
+[ 1.448600] PC is at dma_pool_free+0xb8/0xf0
+[ 1.453012] LR is at dma_pool_free+0x28/0xf0
+[ 2.113369] [<ffffff80081817d8>] dma_pool_free+0xb8/0xf0
+[ 2.118857] [<ffffff800841209c>] destroy_eps+0x4c/0x68
+[ 2.124165] [<ffffff8008413770>] ci_hdrc_gadget_destroy+0x28/0x50
+[ 2.130461] [<ffffff800840fa30>] ci_hdrc_probe+0x588/0x7e8
+[ 2.136129] [<ffffff8008380fb8>] platform_drv_probe+0x50/0xb8
+[ 2.142066] [<ffffff800837f494>] driver_probe_device+0x1fc/0x2a8
+[ 2.148270] [<ffffff800837f68c>] __device_attach_driver+0x9c/0xf8
+[ 2.154563] [<ffffff800837d570>] bus_for_each_drv+0x58/0x98
+[ 2.160317] [<ffffff800837f174>] __device_attach+0xc4/0x138
+[ 2.166072] [<ffffff800837f738>] device_initial_probe+0x10/0x18
+[ 2.172185] [<ffffff800837e58c>] bus_probe_device+0x94/0xa0
+[ 2.177940] [<ffffff800837c560>] device_add+0x3f0/0x560
+[ 2.183337] [<ffffff8008380d20>] platform_device_add+0x180/0x240
+[ 2.189541] [<ffffff800840f0e8>] ci_hdrc_add_device+0x440/0x4f8
+[ 2.195654] [<ffffff8008414194>] ci_hdrc_usb2_probe+0x13c/0x2d8
+[ 2.201769] [<ffffff8008380fb8>] platform_drv_probe+0x50/0xb8
+[ 2.207705] [<ffffff800837f494>] driver_probe_device+0x1fc/0x2a8
+[ 2.213910] [<ffffff800837f5ec>] __driver_attach+0xac/0xb0
+[ 2.219575] [<ffffff800837d4b0>] bus_for_each_dev+0x60/0xa0
+[ 2.225329] [<ffffff800837ec80>] driver_attach+0x20/0x28
+[ 2.230816] [<ffffff800837e880>] bus_add_driver+0x1d0/0x238
+[ 2.236571] [<ffffff800837fdb0>] driver_register+0x60/0xf8
+[ 2.242237] [<ffffff8008380ef4>] __platform_driver_register+0x44/0x50
+[ 2.248891] [<ffffff80086fd440>] ci_hdrc_usb2_driver_init+0x18/0x20
+[ 2.255365] [<ffffff8008082950>] do_one_initcall+0x38/0x128
+[ 2.261121] [<ffffff80086e0d00>] kernel_init_freeable+0x1ac/0x250
+[ 2.267414] [<ffffff800852f0b8>] kernel_init+0x10/0x100
+[ 2.272810] [<ffffff8008082680>] ret_from_fork+0x10/0x50
+
+Fixes: 3f124d233e97 ("usb: chipidea: add role init and destroy APIs")
+Signed-off-by: Jisheng Zhang <jszhang@marvell.com>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/chipidea/udc.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1984,6 +1984,7 @@ static void udc_id_switch_for_host(struc
+ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
+ {
+ struct ci_role_driver *rdrv;
++ int ret;
+
+ if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
+ return -ENXIO;
+@@ -1996,7 +1997,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *
+ rdrv->stop = udc_id_switch_for_host;
+ rdrv->irq = udc_irq;
+ rdrv->name = "gadget";
+- ci->roles[CI_ROLE_GADGET] = rdrv;
+
+- return udc_start(ci);
++ ret = udc_start(ci);
++ if (!ret)
++ ci->roles[CI_ROLE_GADGET] = rdrv;
++
++ return ret;
+ }
--- /dev/null
+From dc9217b69dd6089dcfeb86ed4b3c671504326087 Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Thu, 11 May 2017 17:26:48 -0700
+Subject: usb: gadget: f_mass_storage: Serialize wake and sleep execution
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit dc9217b69dd6089dcfeb86ed4b3c671504326087 upstream.
+
+f_mass_storage has a memorry barrier issue with the sleep and wake
+functions that can cause a deadlock. This results in intermittent hangs
+during MSC file transfer. The host will reset the device after receiving
+no response to resume the transfer. This issue is seen when dwc3 is
+processing 2 transfer-in-progress events at the same time, invoking
+completion handlers for CSW and CBW. Also this issue occurs depending on
+the system timing and latency.
+
+To increase the chance to hit this issue, you can force dwc3 driver to
+wait and process those 2 events at once by adding a small delay (~100us)
+in dwc3_check_event_buf() whenever the request is for CSW and read the
+event count again. Avoid debugging with printk and ftrace as extra
+delays and memory barrier will mask this issue.
+
+Scenario which can lead to failure:
+-----------------------------------
+1) The main thread sleeps and waits for the next command in
+ get_next_command().
+2) bulk_in_complete() wakes up main thread for CSW.
+3) bulk_out_complete() tries to wake up the running main thread for CBW.
+4) thread_wakeup_needed is not loaded with correct value in
+ sleep_thread().
+5) Main thread goes to sleep again.
+
+The pattern is shown below. Note the 2 critical variables.
+ * common->thread_wakeup_needed
+ * bh->state
+
+ CPU 0 (sleep_thread) CPU 1 (wakeup_thread)
+ ============================== ===============================
+
+ bh->state = BH_STATE_FULL;
+ smp_wmb();
+ thread_wakeup_needed = 0; thread_wakeup_needed = 1;
+ smp_rmb();
+ if (bh->state != BH_STATE_FULL)
+ sleep again ...
+
+As pointed out by Alan Stern, this is an R-pattern issue. The issue can
+be seen when there are two wakeups in quick succession. The
+thread_wakeup_needed can be overwritten in sleep_thread, and the read of
+the bh->state maybe reordered before the write to thread_wakeup_needed.
+
+This patch applies full memory barrier smp_mb() in both sleep_thread()
+and wakeup_thread() to ensure the order which the thread_wakeup_needed
+and bh->state are written and loaded.
+
+However, a better solution in the future would be to use wait_queue
+method that takes care of managing memory barrier between waker and
+waiter.
+
+Acked-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Thinh Nguyen <thinhn@synopsys.com>
+Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/gadget/function/f_mass_storage.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *
+ /* Caller must hold fsg->lock */
+ static void wakeup_thread(struct fsg_common *common)
+ {
+- smp_wmb(); /* ensure the write of bh->state is complete */
++ /*
++ * Ensure the reading of thread_wakeup_needed
++ * and the writing of bh->state are completed
++ */
++ smp_mb();
+ /* Tell the main thread that something has happened */
+ common->thread_wakeup_needed = 1;
+ if (common->thread_task)
+@@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_commo
+ }
+ __set_current_state(TASK_RUNNING);
+ common->thread_wakeup_needed = 0;
+- smp_rmb(); /* ensure the latest bh->state is visible */
++
++ /*
++ * Ensure the writing of thread_wakeup_needed
++ * and the reading of bh->state are completed
++ */
++ smp_mb();
+ return rc;
+ }
+
--- /dev/null
+From b3addcf0d1f04f53fcc302577d5a5e964c18531a Mon Sep 17 00:00:00 2001
+From: Bin Liu <b-liu@ti.com>
+Date: Thu, 25 May 2017 13:42:39 -0500
+Subject: usb: musb: dsps: keep VBUS on for host-only mode
+
+From: Bin Liu <b-liu@ti.com>
+
+commit b3addcf0d1f04f53fcc302577d5a5e964c18531a upstream.
+
+Currently VBUS is turned off while a usb device is detached, and turned
+on again by the polling routine. This short period VBUS loss prevents
+usb modem to switch mode.
+
+VBUS should be constantly on for host-only mode, so this changes the
+driver to not turn off VBUS for host-only mode.
+
+Fixes: 2f3fd2c5bde1 ("usb: musb: Prepare dsps glue layer for PM runtime support")
+Reported-by: Moreno Bartalucci <moreno.bartalucci@tecnorama.it>
+Acked-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Bin Liu <b-liu@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/musb/musb_dsps.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -245,6 +245,11 @@ static int dsps_check_status(struct musb
+ dsps_mod_timer_optional(glue);
+ break;
+ case OTG_STATE_A_WAIT_BCON:
++ /* keep VBUS on for host-only mode */
++ if (musb->port_mode == MUSB_PORT_MODE_HOST) {
++ dsps_mod_timer_optional(glue);
++ break;
++ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ skip_session = 1;
+ /* fall */
--- /dev/null
+From 753c09b5652bb4fe53e2db648002ec64b32b8827 Mon Sep 17 00:00:00 2001
+From: Julien Grall <julien.grall@arm.com>
+Date: Wed, 31 May 2017 14:03:57 +0100
+Subject: xen/privcmd: Support correctly 64KB page granularity when mapping memory
+
+From: Julien Grall <julien.grall@arm.com>
+
+commit 753c09b5652bb4fe53e2db648002ec64b32b8827 upstream.
+
+Commit 5995a68 "xen/privcmd: Add support for Linux 64KB page granularity" did
+not go far enough to support 64KB in mmap_batch_fn.
+
+The variable 'nr' is the number of 4KB chunk to map. However, when Linux
+is using 64KB page granularity the array of pages (vma->vm_private_data)
+contain one page per 64KB. Fix it by incrementing st->index correctly.
+
+Furthermore, st->va is not correctly incremented as PAGE_SIZE !=
+XEN_PAGE_SIZE.
+
+Fixes: 5995a68 ("xen/privcmd: Add support for Linux 64KB page granularity")
+Reported-by: Feng Kan <fkan@apm.com>
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/privcmd.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int
+ st->global_error = 1;
+ }
+ }
+- st->va += PAGE_SIZE * nr;
+- st->index += nr;
++ st->va += XEN_PAGE_SIZE * nr;
++ st->index += nr / XEN_PFN_PER_PAGE;
+
+ return 0;
+ }