--- /dev/null
+From 441ad62d6c3f131f1dbd7dcdd9cbe3f74dbd8501 Mon Sep 17 00:00:00 2001
+From: Dmitry Tunin <hanipouspilot@gmail.com>
+Date: Thu, 5 Jan 2017 13:19:53 +0300
+Subject: Bluetooth: Add another AR3012 04ca:3018 device
+
+From: Dmitry Tunin <hanipouspilot@gmail.com>
+
+commit 441ad62d6c3f131f1dbd7dcdd9cbe3f74dbd8501 upstream.
+
+T: Bus=01 Lev=01 Prnt=01 Port=07 Cnt=04 Dev#= 5 Spd=12 MxCh= 0
+D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=04ca ProdID=3018 Rev=00.01
+C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA
+I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+
+Signed-off-by: Dmitry Tunin <hanipouspilot@gmail.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/ath3k.c | 2 ++
+ drivers/bluetooth/btusb.c | 1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_
+ { USB_DEVICE(0x04CA, 0x300f) },
+ { USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x04CA, 0x3014) },
++ { USB_DEVICE(0x04CA, 0x3018) },
+ { USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0930, 0x021c) },
+ { USB_DEVICE(0x0930, 0x0220) },
+@@ -160,6 +161,7 @@ static const struct usb_device_id ath3k_
+ { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -208,6 +208,7 @@ static const struct usb_device_id blackl
+ { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
--- /dev/null
+From df963ea8a082d31521a120e8e31a29ad8a1dc215 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@redhat.com>
+Date: Tue, 14 Feb 2017 10:09:40 -0500
+Subject: ceph: remove req from unsafe list when unregistering it
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit df963ea8a082d31521a120e8e31a29ad8a1dc215 upstream.
+
+There's no reason a request should ever be on a s_unsafe list but not
+in the request tree.
+
+Link: http://tracker.ceph.com/issues/18474
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Reviewed-by: Yan, Zheng <zyan@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/mds_client.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -644,6 +644,9 @@ static void __unregister_request(struct
+ {
+ dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+
++ /* Never leave an unregistered request on an unsafe list! */
++ list_del_init(&req->r_unsafe_item);
++
+ if (req->r_tid == mdsc->oldest_tid) {
+ struct rb_node *p = rb_next(&req->r_node);
+ mdsc->oldest_tid = 0;
+@@ -1051,7 +1054,6 @@ static void cleanup_session_requests(str
+ while (!list_empty(&session->s_unsafe)) {
+ req = list_first_entry(&session->s_unsafe,
+ struct ceph_mds_request, r_unsafe_item);
+- list_del_init(&req->r_unsafe_item);
+ pr_warn_ratelimited(" dropping unsafe request %llu\n",
+ req->r_tid);
+ __unregister_request(mdsc, req);
+@@ -2477,7 +2479,6 @@ static void handle_reply(struct ceph_mds
+ * useful we could do with a revised return value.
+ */
+ dout("got safe reply %llu, mds%d\n", tid, mds);
+- list_del_init(&req->r_unsafe_item);
+
+ /* last unsafe request during umount? */
+ if (mdsc->stopping && !__get_oldest_req(mdsc))
--- /dev/null
+From 2b0841766a898aba84630fb723989a77a9d3b4e6 Mon Sep 17 00:00:00 2001
+From: Erez Shitrit <erezsh@mellanox.com>
+Date: Wed, 1 Feb 2017 19:10:05 +0200
+Subject: IB/IPoIB: Add destination address when re-queue packet
+
+From: Erez Shitrit <erezsh@mellanox.com>
+
+commit 2b0841766a898aba84630fb723989a77a9d3b4e6 upstream.
+
+When sending packet to destination that was not resolved yet
+via path query, the driver keeps the skb and tries to re-send it
+again when the path is resolved.
+
+But when re-sending via dev_queue_xmit the kernel doesn't call
+to dev_hard_header, so IPoIB needs to keep 20 bytes in the skb
+and to put the destination address inside them.
+
+In that way the dev_start_xmit will have the correct destination,
+and the driver won't take the destination from the skb->data, while
+nothing exists there, which causes to packet be be dropped.
+
+The test flow is:
+1. Run the SM on remote node,
+2. Restart the driver.
+4. Ping some destination,
+3. Observe that first ICMP request will be dropped.
+
+Fixes: fc791b633515 ("IB/ipoib: move back IB LL address into the hard header")
+Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
+Signed-off-by: Noa Osherovich <noaos@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Tested-by: Yuval Shaia <yuval.shaia@oracle.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib_main.c | 30 +++++++++++++++++-------------
+ 1 file changed, 17 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -626,6 +626,14 @@ void ipoib_mark_paths_invalid(struct net
+ spin_unlock_irq(&priv->lock);
+ }
+
++static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
++{
++ struct ipoib_pseudo_header *phdr;
++
++ phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr));
++ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
++}
++
+ void ipoib_flush_paths(struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -850,8 +858,7 @@ static void neigh_add_path(struct sk_buf
+ }
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+- /* put pseudoheader back on for next time */
+- skb_push(skb, IPOIB_PSEUDO_LEN);
++ push_pseudo_header(skb, neigh->daddr);
+ __skb_queue_tail(&neigh->queue, skb);
+ } else {
+ ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
+@@ -869,10 +876,12 @@ static void neigh_add_path(struct sk_buf
+
+ if (!path->query && path_rec_start(dev, path))
+ goto err_path;
+- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
++ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++ push_pseudo_header(skb, neigh->daddr);
+ __skb_queue_tail(&neigh->queue, skb);
+- else
++ } else {
+ goto err_drop;
++ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -908,8 +917,7 @@ static void unicast_arp_send(struct sk_b
+ }
+ if (path) {
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+- /* put pseudoheader back on for next time */
+- skb_push(skb, IPOIB_PSEUDO_LEN);
++ push_pseudo_header(skb, phdr->hwaddr);
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+@@ -941,8 +949,7 @@ static void unicast_arp_send(struct sk_b
+ return;
+ } else if ((path->query || !path_rec_start(dev, path)) &&
+ skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+- /* put pseudoheader back on for next time */
+- skb_push(skb, IPOIB_PSEUDO_LEN);
++ push_pseudo_header(skb, phdr->hwaddr);
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+@@ -1023,8 +1030,7 @@ send_using_neigh:
+ }
+
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+- /* put pseudoheader back on for next time */
+- skb_push(skb, sizeof(*phdr));
++ push_pseudo_header(skb, phdr->hwaddr);
+ spin_lock_irqsave(&priv->lock, flags);
+ __skb_queue_tail(&neigh->queue, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1056,7 +1062,6 @@ static int ipoib_hard_header(struct sk_b
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned len)
+ {
+- struct ipoib_pseudo_header *phdr;
+ struct ipoib_header *header;
+
+ header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+@@ -1069,8 +1074,7 @@ static int ipoib_hard_header(struct sk_b
+ * destination address into skb hard header so we can figure out where
+ * to send the packet later.
+ */
+- phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+- memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
++ push_pseudo_header(skb, daddr);
+
+ return IPOIB_HARD_LEN;
+ }
--- /dev/null
+From 0a0007f28304cb9fc87809c86abb80ec71317f20 Mon Sep 17 00:00:00 2001
+From: Feras Daoud <ferasda@mellanox.com>
+Date: Wed, 28 Dec 2016 14:47:23 +0200
+Subject: IB/ipoib: Fix deadlock between rmmod and set_mode
+
+From: Feras Daoud <ferasda@mellanox.com>
+
+commit 0a0007f28304cb9fc87809c86abb80ec71317f20 upstream.
+
+When calling set_mode from sys/fs, the call flow locks the sys/fs lock
+first and then tries to lock rtnl_lock (when calling ipoib_set_mod).
+On the other hand, the rmmod call flow takes the rtnl_lock first
+(when calling unregister_netdev) and then tries to take the sys/fs
+lock. Deadlock a->b, b->a.
+
+The problem starts when ipoib_set_mod frees it's rtnl_lck and tries
+to get it after that.
+
+ set_mod:
+ [<ffffffff8104f2bd>] ? check_preempt_curr+0x6d/0x90
+ [<ffffffff814fee8e>] __mutex_lock_slowpath+0x13e/0x180
+ [<ffffffff81448655>] ? __rtnl_unlock+0x15/0x20
+ [<ffffffff814fed2b>] mutex_lock+0x2b/0x50
+ [<ffffffff81448675>] rtnl_lock+0x15/0x20
+ [<ffffffffa02ad807>] ipoib_set_mode+0x97/0x160 [ib_ipoib]
+ [<ffffffffa02b5f5b>] set_mode+0x3b/0x80 [ib_ipoib]
+ [<ffffffff8134b840>] dev_attr_store+0x20/0x30
+ [<ffffffff811f0fe5>] sysfs_write_file+0xe5/0x170
+ [<ffffffff8117b068>] vfs_write+0xb8/0x1a0
+ [<ffffffff8117ba81>] sys_write+0x51/0x90
+ [<ffffffff8100b0f2>] system_call_fastpath+0x16/0x1b
+
+ rmmod:
+ [<ffffffff81279ffc>] ? put_dec+0x10c/0x110
+ [<ffffffff8127a2ee>] ? number+0x2ee/0x320
+ [<ffffffff814fe6a5>] schedule_timeout+0x215/0x2e0
+ [<ffffffff8127cc04>] ? vsnprintf+0x484/0x5f0
+ [<ffffffff8127b550>] ? string+0x40/0x100
+ [<ffffffff814fe323>] wait_for_common+0x123/0x180
+ [<ffffffff81060250>] ? default_wake_function+0x0/0x20
+ [<ffffffff8119661e>] ? ifind_fast+0x5e/0xb0
+ [<ffffffff814fe43d>] wait_for_completion+0x1d/0x20
+ [<ffffffff811f2e68>] sysfs_addrm_finish+0x228/0x270
+ [<ffffffff811f2fb3>] sysfs_remove_dir+0xa3/0xf0
+ [<ffffffff81273f66>] kobject_del+0x16/0x40
+ [<ffffffff8134cd14>] device_del+0x184/0x1e0
+ [<ffffffff8144e59b>] netdev_unregister_kobject+0xab/0xc0
+ [<ffffffff8143c05e>] rollback_registered+0xae/0x130
+ [<ffffffff8143c102>] unregister_netdevice+0x22/0x70
+ [<ffffffff8143c16e>] unregister_netdev+0x1e/0x30
+ [<ffffffffa02a91b0>] ipoib_remove_one+0xe0/0x120 [ib_ipoib]
+ [<ffffffffa01ed95f>] ib_unregister_device+0x4f/0x100 [ib_core]
+ [<ffffffffa021f5e1>] mlx4_ib_remove+0x41/0x180 [mlx4_ib]
+ [<ffffffffa01ab771>] mlx4_remove_device+0x71/0x90 [mlx4_core]
+
+Fixes: 862096a8bbf8 ("IB/ipoib: Add more rtnl_link_ops callbacks")
+Cc: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: Feras Daoud <ferasda@mellanox.com>
+Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib_cm.c | 12 +++++++-----
+ drivers/infiniband/ulp/ipoib/ipoib_main.c | 6 ++----
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1488,12 +1488,14 @@ static ssize_t set_mode(struct device *d
+
+ ret = ipoib_set_mode(dev, buf);
+
+- rtnl_unlock();
++ /* The assumption is that the function ipoib_set_mode returned
++ * with the rtnl held by it, if not the value -EBUSY returned,
++ * then no need to rtnl_unlock
++ */
++ if (ret != -EBUSY)
++ rtnl_unlock();
+
+- if (!ret)
+- return count;
+-
+- return ret;
++ return (!ret || ret == -EBUSY) ? count : ret;
+ }
+
+ static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -464,8 +464,7 @@ int ipoib_set_mode(struct net_device *de
+ priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
+
+ ipoib_flush_paths(dev);
+- rtnl_lock();
+- return 0;
++ return (!rtnl_trylock()) ? -EBUSY : 0;
+ }
+
+ if (!strcmp(buf, "datagram\n")) {
+@@ -474,8 +473,7 @@ int ipoib_set_mode(struct net_device *de
+ dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ rtnl_unlock();
+ ipoib_flush_paths(dev);
+- rtnl_lock();
+- return 0;
++ return (!rtnl_trylock()) ? -EBUSY : 0;
+ }
+
+ return -EINVAL;
--- /dev/null
+From 6cb72bc1b40bb2c1750ee7a5ebade93bed49a5fb Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Tue, 14 Feb 2017 10:56:30 -0800
+Subject: IB/srp: Avoid that duplicate responses trigger a kernel bug
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit 6cb72bc1b40bb2c1750ee7a5ebade93bed49a5fb upstream.
+
+After srp_process_rsp() returns there is a short time during which
+the scsi_host_find_tag() call will return a pointer to the SCSI
+command that is being completed. If during that time a duplicate
+response is received, avoid that the following call stack appears:
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+IP: srp_recv_done+0x450/0x6b0 [ib_srp]
+Oops: 0000 [#1] SMP
+CPU: 10 PID: 0 Comm: swapper/10 Not tainted 4.10.0-rc7-dbg+ #1
+Call Trace:
+ <IRQ>
+ __ib_process_cq+0x4b/0xd0 [ib_core]
+ ib_poll_handler+0x1d/0x70 [ib_core]
+ irq_poll_softirq+0xba/0x120
+ __do_softirq+0xba/0x4c0
+ irq_exit+0xbe/0xd0
+ smp_apic_timer_interrupt+0x38/0x50
+ apic_timer_interrupt+0x90/0xa0
+ </IRQ>
+RIP: srp_recv_done+0x450/0x6b0 [ib_srp] RSP: ffff88046f483e20
+
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Cc: Israel Rukshin <israelr@mellanox.com>
+Cc: Max Gurtovoy <maxg@mellanox.com>
+Cc: Laurence Oberman <loberman@redhat.com>
+Cc: Steve Feeley <Steve.Feeley@sandisk.com>
+Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/srp/ib_srp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1795,9 +1795,11 @@ static void srp_process_rsp(struct srp_r
+ complete(&ch->tsk_mgmt_done);
+ } else {
+ scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
+- if (scmnd) {
++ if (scmnd && scmnd->host_scribble) {
+ req = (void *)scmnd->host_scribble;
+ scmnd = srp_claim_req(ch, req, NULL, scmnd);
++ } else {
++ scmnd = NULL;
+ }
+ if (!scmnd) {
+ shost_printk(KERN_ERR, target->scsi_host,
--- /dev/null
+From 0a6fdbdeb1c25e31763c1fb333fa2723a7d2aba6 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Tue, 14 Feb 2017 10:56:31 -0800
+Subject: IB/srp: Fix race conditions related to task management
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit 0a6fdbdeb1c25e31763c1fb333fa2723a7d2aba6 upstream.
+
+Avoid that srp_process_rsp() overwrites the status information
+in ch if the SRP target response timed out and processing of
+another task management function has already started. Avoid that
+issuing multiple task management functions concurrently triggers
+list corruption. This patch prevents that the following stack
+trace appears in the system log:
+
+WARNING: CPU: 8 PID: 9269 at lib/list_debug.c:52 __list_del_entry_valid+0xbc/0xc0
+list_del corruption. prev->next should be ffffc90004bb7b00, but was ffff8804052ecc68
+CPU: 8 PID: 9269 Comm: sg_reset Tainted: G W 4.10.0-rc7-dbg+ #3
+Call Trace:
+ dump_stack+0x68/0x93
+ __warn+0xc6/0xe0
+ warn_slowpath_fmt+0x4a/0x50
+ __list_del_entry_valid+0xbc/0xc0
+ wait_for_completion_timeout+0x12e/0x170
+ srp_send_tsk_mgmt+0x1ef/0x2d0 [ib_srp]
+ srp_reset_device+0x5b/0x110 [ib_srp]
+ scsi_ioctl_reset+0x1c7/0x290
+ scsi_ioctl+0x12a/0x420
+ sd_ioctl+0x9d/0x100
+ blkdev_ioctl+0x51e/0x9f0
+ block_ioctl+0x38/0x40
+ do_vfs_ioctl+0x8f/0x700
+ SyS_ioctl+0x3c/0x70
+ entry_SYSCALL_64_fastpath+0x18/0xad
+
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Cc: Israel Rukshin <israelr@mellanox.com>
+Cc: Max Gurtovoy <maxg@mellanox.com>
+Cc: Laurence Oberman <loberman@redhat.com>
+Cc: Steve Feeley <Steve.Feeley@sandisk.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/srp/ib_srp.c | 45 +++++++++++++++++++++++-------------
+ drivers/infiniband/ulp/srp/ib_srp.h | 1
+ 2 files changed, 30 insertions(+), 16 deletions(-)
+
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1787,12 +1787,17 @@ static void srp_process_rsp(struct srp_r
+ if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
++ if (rsp->tag == ch->tsk_mgmt_tag) {
++ ch->tsk_mgmt_status = -1;
++ if (be32_to_cpu(rsp->resp_data_len) >= 4)
++ ch->tsk_mgmt_status = rsp->data[3];
++ complete(&ch->tsk_mgmt_done);
++ } else {
++ shost_printk(KERN_ERR, target->scsi_host,
++ "Received tsk mgmt response too late for tag %#llx\n",
++ rsp->tag);
++ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+-
+- ch->tsk_mgmt_status = -1;
+- if (be32_to_cpu(rsp->resp_data_len) >= 4)
+- ch->tsk_mgmt_status = rsp->data[3];
+- complete(&ch->tsk_mgmt_done);
+ } else {
+ scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
+ if (scmnd && scmnd->host_scribble) {
+@@ -2471,19 +2476,18 @@ srp_change_queue_depth(struct scsi_devic
+ }
+
+ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
+- u8 func)
++ u8 func, u8 *status)
+ {
+ struct srp_target_port *target = ch->target;
+ struct srp_rport *rport = target->rport;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
+ struct srp_iu *iu;
+ struct srp_tsk_mgmt *tsk_mgmt;
++ int res;
+
+ if (!ch->connected || target->qp_in_error)
+ return -1;
+
+- init_completion(&ch->tsk_mgmt_done);
+-
+ /*
+ * Lock the rport mutex to avoid that srp_create_ch_ib() is
+ * invoked while a task management function is being sent.
+@@ -2506,10 +2510,16 @@ static int srp_send_tsk_mgmt(struct srp_
+
+ tsk_mgmt->opcode = SRP_TSK_MGMT;
+ int_to_scsilun(lun, &tsk_mgmt->lun);
+- tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
+ tsk_mgmt->tsk_mgmt_func = func;
+ tsk_mgmt->task_tag = req_tag;
+
++ spin_lock_irq(&ch->lock);
++ ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
++ tsk_mgmt->tag = ch->tsk_mgmt_tag;
++ spin_unlock_irq(&ch->lock);
++
++ init_completion(&ch->tsk_mgmt_done);
++
+ ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+ DMA_TO_DEVICE);
+ if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
+@@ -2518,13 +2528,15 @@ static int srp_send_tsk_mgmt(struct srp_
+
+ return -1;
+ }
++ res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
++ msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
++ if (res > 0 && status)
++ *status = ch->tsk_mgmt_status;
+ mutex_unlock(&rport->mutex);
+
+- if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
+- msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
+- return -1;
++ WARN_ON_ONCE(res < 0);
+
+- return 0;
++ return res > 0 ? 0 : -1;
+ }
+
+ static int srp_abort(struct scsi_cmnd *scmnd)
+@@ -2550,7 +2562,7 @@ static int srp_abort(struct scsi_cmnd *s
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Sending SRP abort for tag %#x\n", tag);
+ if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
+- SRP_TSK_ABORT_TASK) == 0)
++ SRP_TSK_ABORT_TASK, NULL) == 0)
+ ret = SUCCESS;
+ else if (target->rport->state == SRP_RPORT_LOST)
+ ret = FAST_IO_FAIL;
+@@ -2568,14 +2580,15 @@ static int srp_reset_device(struct scsi_
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_rdma_ch *ch;
+ int i;
++ u8 status;
+
+ shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
+
+ ch = &target->ch[0];
+ if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
+- SRP_TSK_LUN_RESET))
++ SRP_TSK_LUN_RESET, &status))
+ return FAILED;
+- if (ch->tsk_mgmt_status)
++ if (status)
+ return FAILED;
+
+ for (i = 0; i < target->ch_count; i++) {
+--- a/drivers/infiniband/ulp/srp/ib_srp.h
++++ b/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -168,6 +168,7 @@ struct srp_rdma_ch {
+ int max_ti_iu_len;
+ int comp_vector;
+
++ u64 tsk_mgmt_tag;
+ struct completion tsk_mgmt_done;
+ u8 tsk_mgmt_status;
+ bool connected;
--- /dev/null
+From 32677207dcc5e594254b7fb4fb2352b1755b1d5b Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Tue, 7 Feb 2017 12:05:25 -0500
+Subject: ktest: Fix child exit code processing
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 32677207dcc5e594254b7fb4fb2352b1755b1d5b upstream.
+
+The child_exit errno needs to be shifted by 8 bits to compare against the
+return values for the bisect variables.
+
+Fixes: c5dacb88f0a64 ("ktest: Allow overriding bisect test results")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/ktest/ktest.pl | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2629,7 +2629,7 @@ sub do_run_test {
+ }
+
+ waitpid $child_pid, 0;
+- $child_exit = $?;
++ $child_exit = $? >> 8;
+
+ my $end_time = time;
+ $test_time = $end_time - $start_time;
--- /dev/null
+From e1e8a9624f7ba8ead4f056ff558ed070e86fa747 Mon Sep 17 00:00:00 2001
+From: Janosch Frank <frankja@linux.vnet.ibm.com>
+Date: Thu, 2 Feb 2017 16:39:31 +0100
+Subject: KVM: s390: Disable dirty log retrieval for UCONTROL guests
+
+From: Janosch Frank <frankja@linux.vnet.ibm.com>
+
+commit e1e8a9624f7ba8ead4f056ff558ed070e86fa747 upstream.
+
+User controlled KVM guests do not support the dirty log, as they have
+no single gmap that we can check for changes.
+
+As they have no single gmap, kvm->arch.gmap is NULL and all further
+referencing to it for dirty checking will result in a NULL
+dereference.
+
+Let's return -EINVAL if a caller tries to sync dirty logs for a
+UCONTROL guest.
+
+Fixes: 15f36eb ("KVM: s390: Add proper dirty bitmap support to S390 kvm.")
+Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
+Reported-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/kvm-s390.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -295,6 +295,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
+ struct kvm_memory_slot *memslot;
+ int is_dirty = 0;
+
++ if (kvm_is_ucontrol(kvm))
++ return -EINVAL;
++
+ mutex_lock(&kvm->slots_lock);
+
+ r = -EINVAL;
--- /dev/null
+From 96794e4ed4d758272c486e1529e431efb7045265 Mon Sep 17 00:00:00 2001
+From: Chao Peng <chao.p.peng@linux.intel.com>
+Date: Tue, 21 Feb 2017 03:50:01 -0500
+Subject: KVM: VMX: use correct vmcs_read/write for guest segment selector/base
+
+From: Chao Peng <chao.p.peng@linux.intel.com>
+
+commit 96794e4ed4d758272c486e1529e431efb7045265 upstream.
+
+Guest segment selector is 16 bit field and guest segment base is natural
+width field. Fix two incorrect invocations accordingly.
+
+Without this patch, build fails when aggressive inlining is used with ICC.
+
+Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3499,7 +3499,7 @@ static void fix_rmode_seg(int seg, struc
+ }
+
+ vmcs_write16(sf->selector, var.selector);
+- vmcs_write32(sf->base, var.base);
++ vmcs_writel(sf->base, var.base);
+ vmcs_write32(sf->limit, var.limit);
+ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
+ }
+@@ -7905,7 +7905,7 @@ static void kvm_flush_pml_buffers(struct
+ static void vmx_dump_sel(char *name, uint32_t sel)
+ {
+ pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
+- name, vmcs_read32(sel),
++ name, vmcs_read16(sel),
+ vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
+ vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
+ vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
--- /dev/null
+From a9e9200d8661c1a0be8c39f93deb383dc940de35 Mon Sep 17 00:00:00 2001
+From: Matt Chen <matt.chen@intel.com>
+Date: Sun, 22 Jan 2017 02:16:58 +0800
+Subject: mac80211: flush delayed work when entering suspend
+
+From: Matt Chen <matt.chen@intel.com>
+
+commit a9e9200d8661c1a0be8c39f93deb383dc940de35 upstream.
+
+The issue was found when entering suspend and resume.
+It triggers a warning in:
+mac80211/key.c: ieee80211_enable_keys()
+...
+WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+ sdata->crypto_tx_tailroom_pending_dec);
+...
+
+It points out sdata->crypto_tx_tailroom_pending_dec isn't cleaned up successfully
+in a delayed_work during suspend. Add a flush_delayed_work to fix it.
+
+Signed-off-by: Matt Chen <matt.chen@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/pm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/mac80211/pm.c
++++ b/net/mac80211/pm.c
+@@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211
+ break;
+ }
+
++ flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ drv_remove_interface(local, sdata);
+ }
+
--- /dev/null
+From 1064f874abc0d05eeed8993815f584d847b72486 Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Fri, 20 Jan 2017 18:28:35 +1300
+Subject: mnt: Tuck mounts under others instead of creating shadow/side mounts.
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit 1064f874abc0d05eeed8993815f584d847b72486 upstream.
+
+Ever since mount propagation was introduced in cases where a mount in
+propagated to parent mount mountpoint pair that is already in use the
+code has placed the new mount behind the old mount in the mount hash
+table.
+
+This implementation detail is problematic as it allows creating
+arbitrary length mount hash chains.
+
+Furthermore it invalidates the constraint maintained elsewhere in the
+mount code that a parent mount and a mountpoint pair will have exactly
+one mount upon them. Making it hard to deal with and to talk about
+this special case in the mount code.
+
+Modify mount propagation to notice when there is already a mount at
+the parent mount and mountpoint where a new mount is propagating to
+and place that preexisting mount on top of the new mount.
+
+Modify unmount propagation to notice when a mount that is being
+unmounted has another mount on top of it (and no other children), and
+to replace the unmounted mount with the mount on top of it.
+
+Move the MNT_UMUONT test from __lookup_mnt_last into
+__propagate_umount as that is the only call of __lookup_mnt_last where
+MNT_UMOUNT may be set on any mount visible in the mount hash table.
+
+These modifications allow:
+ - __lookup_mnt_last to be removed.
+ - attach_shadows to be renamed __attach_mnt and its shadow
+ handling to be removed.
+ - commit_tree to be simplified
+ - copy_tree to be simplified
+
+The result is an easier to understand tree of mounts that does not
+allow creation of arbitrary length hash chains in the mount hash table.
+
+The result is also a very slight userspace visible difference in semantics.
+The following two cases now behave identically, where before order
+mattered:
+
+case 1: (explicit user action)
+ B is a slave of A
+ mount something on A/a , it will propagate to B/a
+ and than mount something on B/a
+
+case 2: (tucked mount)
+ B is a slave of A
+ mount something on B/a
+ and than mount something on A/a
+
+Histroically umount A/a would fail in case 1 and succeed in case 2.
+Now umount A/a succeeds in both configurations.
+
+This very small change in semantics appears if anything to be a bug
+fix to me and my survey of userspace leads me to believe that no programs
+will notice or care of this subtle semantic change.
+
+v2: Updated to mnt_change_mountpoint to not call dput or mntput
+and instead to decrement the counts directly. It is guaranteed
+that there will be other references when mnt_change_mountpoint is
+called so this is safe.
+
+v3: Moved put_mountpoint under mount_lock in attach_recursive_mnt
+ As the locking in fs/namespace.c changed between v2 and v3.
+
+v4: Reworked the logic in propagate_mount_busy and __propagate_umount
+ that detects when a mount completely covers another mount.
+
+v5: Removed unnecessary tests whose result is alwasy true in
+ find_topper and attach_recursive_mnt.
+
+v6: Document the user space visible semantic difference.
+
+Fixes: b90fa9ae8f51 ("[PATCH] shared mount handling: bind and rbind")
+Tested-by: Andrei Vagin <avagin@virtuozzo.com>
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/mount.h | 1
+ fs/namespace.c | 109 ++++++++++++++++++++++++++++++---------------------------
+ fs/pnode.c | 61 +++++++++++++++++++++++++------
+ fs/pnode.h | 2 +
+ 4 files changed, 110 insertions(+), 63 deletions(-)
+
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -86,7 +86,6 @@ static inline int is_mounted(struct vfsm
+ }
+
+ extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
+-extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
+
+ extern int __legitimize_mnt(struct vfsmount *, unsigned);
+ extern bool legitimize_mnt(struct vfsmount *, unsigned);
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -638,28 +638,6 @@ struct mount *__lookup_mnt(struct vfsmou
+ }
+
+ /*
+- * find the last mount at @dentry on vfsmount @mnt.
+- * mount_lock must be held.
+- */
+-struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
+-{
+- struct mount *p, *res = NULL;
+- p = __lookup_mnt(mnt, dentry);
+- if (!p)
+- goto out;
+- if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+- res = p;
+- hlist_for_each_entry_continue(p, mnt_hash) {
+- if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+- break;
+- if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+- res = p;
+- }
+-out:
+- return res;
+-}
+-
+-/*
+ * lookup_mnt - Return the first child mount mounted at path
+ *
+ * "First" means first mounted chronologically. If you create the
+@@ -879,6 +857,13 @@ void mnt_set_mountpoint(struct mount *mn
+ hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
+ }
+
++static void __attach_mnt(struct mount *mnt, struct mount *parent)
++{
++ hlist_add_head_rcu(&mnt->mnt_hash,
++ m_hash(&parent->mnt, mnt->mnt_mountpoint));
++ list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
++}
++
+ /*
+ * vfsmount lock must be held for write
+ */
+@@ -887,28 +872,45 @@ static void attach_mnt(struct mount *mnt
+ struct mountpoint *mp)
+ {
+ mnt_set_mountpoint(parent, mp, mnt);
+- hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
+- list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
++ __attach_mnt(mnt, parent);
+ }
+
+-static void attach_shadowed(struct mount *mnt,
+- struct mount *parent,
+- struct mount *shadows)
++void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
+ {
+- if (shadows) {
+- hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
+- list_add(&mnt->mnt_child, &shadows->mnt_child);
+- } else {
+- hlist_add_head_rcu(&mnt->mnt_hash,
+- m_hash(&parent->mnt, mnt->mnt_mountpoint));
+- list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+- }
++ struct mountpoint *old_mp = mnt->mnt_mp;
++ struct dentry *old_mountpoint = mnt->mnt_mountpoint;
++ struct mount *old_parent = mnt->mnt_parent;
++
++ list_del_init(&mnt->mnt_child);
++ hlist_del_init(&mnt->mnt_mp_list);
++ hlist_del_init_rcu(&mnt->mnt_hash);
++
++ attach_mnt(mnt, parent, mp);
++
++ put_mountpoint(old_mp);
++
++ /*
++ * Safely avoid even the suggestion this code might sleep or
++ * lock the mount hash by taking advantage of the knowledge that
++ * mnt_change_mountpoint will not release the final reference
++ * to a mountpoint.
++ *
++ * During mounting, the mount passed in as the parent mount will
++ * continue to use the old mountpoint and during unmounting, the
++ * old mountpoint will continue to exist until namespace_unlock,
++ * which happens well after mnt_change_mountpoint.
++ */
++ spin_lock(&old_mountpoint->d_lock);
++ old_mountpoint->d_lockref.count--;
++ spin_unlock(&old_mountpoint->d_lock);
++
++ mnt_add_count(old_parent, -1);
+ }
+
+ /*
+ * vfsmount lock must be held for write
+ */
+-static void commit_tree(struct mount *mnt, struct mount *shadows)
++static void commit_tree(struct mount *mnt)
+ {
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
+@@ -923,7 +925,7 @@ static void commit_tree(struct mount *mn
+
+ list_splice(&head, n->list.prev);
+
+- attach_shadowed(mnt, parent, shadows);
++ __attach_mnt(mnt, parent);
+ touch_mnt_namespace(n);
+ }
+
+@@ -1718,7 +1720,6 @@ struct mount *copy_tree(struct mount *mn
+ continue;
+
+ for (s = r; s; s = next_mnt(s, r)) {
+- struct mount *t = NULL;
+ if (!(flag & CL_COPY_UNBINDABLE) &&
+ IS_MNT_UNBINDABLE(s)) {
+ s = skip_mnt_tree(s);
+@@ -1740,14 +1741,7 @@ struct mount *copy_tree(struct mount *mn
+ goto out;
+ lock_mount_hash();
+ list_add_tail(&q->mnt_list, &res->mnt_list);
+- mnt_set_mountpoint(parent, p->mnt_mp, q);
+- if (!list_empty(&parent->mnt_mounts)) {
+- t = list_last_entry(&parent->mnt_mounts,
+- struct mount, mnt_child);
+- if (t->mnt_mp != p->mnt_mp)
+- t = NULL;
+- }
+- attach_shadowed(q, parent, t);
++ attach_mnt(q, parent, p->mnt_mp);
+ unlock_mount_hash();
+ }
+ }
+@@ -1925,10 +1919,18 @@ static int attach_recursive_mnt(struct m
+ struct path *parent_path)
+ {
+ HLIST_HEAD(tree_list);
++ struct mountpoint *smp;
+ struct mount *child, *p;
+ struct hlist_node *n;
+ int err;
+
++ /* Preallocate a mountpoint in case the new mounts need
++ * to be tucked under other mounts.
++ */
++ smp = get_mountpoint(source_mnt->mnt.mnt_root);
++ if (IS_ERR(smp))
++ return PTR_ERR(smp);
++
+ if (IS_MNT_SHARED(dest_mnt)) {
+ err = invent_group_ids(source_mnt, true);
+ if (err)
+@@ -1948,16 +1950,19 @@ static int attach_recursive_mnt(struct m
+ touch_mnt_namespace(source_mnt->mnt_ns);
+ } else {
+ mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
+- commit_tree(source_mnt, NULL);
++ commit_tree(source_mnt);
+ }
+
+ hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
+ struct mount *q;
+ hlist_del_init(&child->mnt_hash);
+- q = __lookup_mnt_last(&child->mnt_parent->mnt,
+- child->mnt_mountpoint);
+- commit_tree(child, q);
++ q = __lookup_mnt(&child->mnt_parent->mnt,
++ child->mnt_mountpoint);
++ if (q)
++ mnt_change_mountpoint(child, smp, q);
++ commit_tree(child);
+ }
++ put_mountpoint(smp);
+ unlock_mount_hash();
+
+ return 0;
+@@ -1970,6 +1975,10 @@ static int attach_recursive_mnt(struct m
+ unlock_mount_hash();
+ cleanup_group_ids(source_mnt, NULL);
+ out:
++ read_seqlock_excl(&mount_lock);
++ put_mountpoint(smp);
++ read_sequnlock_excl(&mount_lock);
++
+ return err;
+ }
+
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -324,6 +324,21 @@ out:
+ return ret;
+ }
+
++static struct mount *find_topper(struct mount *mnt)
++{
++ /* If there is exactly one mount covering mnt completely return it. */
++ struct mount *child;
++
++ if (!list_is_singular(&mnt->mnt_mounts))
++ return NULL;
++
++ child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
++ if (child->mnt_mountpoint != mnt->mnt.mnt_root)
++ return NULL;
++
++ return child;
++}
++
+ /*
+ * return true if the refcount is greater than count
+ */
+@@ -344,9 +359,8 @@ static inline int do_refcount_check(stru
+ */
+ int propagate_mount_busy(struct mount *mnt, int refcnt)
+ {
+- struct mount *m, *child;
++ struct mount *m, *child, *topper;
+ struct mount *parent = mnt->mnt_parent;
+- int ret = 0;
+
+ if (mnt == parent)
+ return do_refcount_check(mnt, refcnt);
+@@ -361,12 +375,24 @@ int propagate_mount_busy(struct mount *m
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+- child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
+- if (child && list_empty(&child->mnt_mounts) &&
+- (ret = do_refcount_check(child, 1)))
+- break;
++ int count = 1;
++ child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
++ if (!child)
++ continue;
++
++ /* Is there exactly one mount on the child that covers
++ * it completely whose reference should be ignored?
++ */
++ topper = find_topper(child);
++ if (topper)
++ count += 1;
++ else if (!list_empty(&child->mnt_mounts))
++ continue;
++
++ if (do_refcount_check(child, count))
++ return 1;
+ }
+- return ret;
++ return 0;
+ }
+
+ /*
+@@ -383,7 +409,7 @@ void propagate_mount_unlock(struct mount
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+- child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
++ child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
+ if (child)
+ child->mnt.mnt_flags &= ~MNT_LOCKED;
+ }
+@@ -401,9 +427,11 @@ static void mark_umount_candidates(struc
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+- struct mount *child = __lookup_mnt_last(&m->mnt,
++ struct mount *child = __lookup_mnt(&m->mnt,
+ mnt->mnt_mountpoint);
+- if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
++ if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
++ continue;
++ if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
+ SET_MNT_MARK(child);
+ }
+ }
+@@ -422,8 +450,8 @@ static void __propagate_umount(struct mo
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+-
+- struct mount *child = __lookup_mnt_last(&m->mnt,
++ struct mount *topper;
++ struct mount *child = __lookup_mnt(&m->mnt,
+ mnt->mnt_mountpoint);
+ /*
+ * umount the child only if the child has no children
+@@ -432,6 +460,15 @@ static void __propagate_umount(struct mo
+ if (!child || !IS_MNT_MARKED(child))
+ continue;
+ CLEAR_MNT_MARK(child);
++
++ /* If there is exactly one mount covering all of child
++ * replace child with that mount.
++ */
++ topper = find_topper(child);
++ if (topper)
++ mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
++ topper);
++
+ if (list_empty(&child->mnt_mounts)) {
+ list_del_init(&child->mnt_child);
+ child->mnt.mnt_flags |= MNT_UMOUNT;
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -49,6 +49,8 @@ int get_dominating_id(struct mount *mnt,
+ unsigned int mnt_get_count(struct mount *mnt);
+ void mnt_set_mountpoint(struct mount *, struct mountpoint *,
+ struct mount *);
++void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
++ struct mount *mnt);
+ struct mount *copy_tree(struct mount *, struct dentry *, int);
+ bool is_path_reachable(struct mount *, struct dentry *,
+ const struct path *root);
--- /dev/null
+From 239a3b663647869330955ec59caac0100ef9b60a Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Tue, 21 Feb 2017 11:28:01 +0100
+Subject: net: mvpp2: fix DMA address calculation in mvpp2_txq_inc_put()
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+commit 239a3b663647869330955ec59caac0100ef9b60a upstream.
+
+When TX descriptors are filled in, the buffer DMA address is split
+between the tx_desc->buf_phys_addr field (high-order bits) and
+tx_desc->packet_offset field (5 low-order bits).
+
+However, when we re-calculate the DMA address from the TX descriptor in
+mvpp2_txq_inc_put(), we do not take tx_desc->packet_offset into
+account. This means that when the DMA address is not aligned on a 32
+bytes boundary, we end up calling dma_unmap_single() with a DMA address
+that was not the one returned by dma_map_single().
+
+This inconsistency is detected by the kernel when DMA_API_DEBUG is
+enabled. We fix this problem by properly calculating the DMA address in
+mvpp2_txq_inc_put().
+
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/marvell/mvpp2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -993,7 +993,7 @@ static void mvpp2_txq_inc_put(struct mvp
+ txq_pcpu->buffs + txq_pcpu->txq_put_index;
+ tx_buf->skb = skb;
+ tx_buf->size = tx_desc->data_size;
+- tx_buf->phys = tx_desc->buf_phys_addr;
++ tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
+ txq_pcpu->txq_put_index++;
+ if (txq_pcpu->txq_put_index == txq_pcpu->size)
+ txq_pcpu->txq_put_index = 0;
--- /dev/null
+From 251af29c320d86071664f02c76f0d063a19fefdf Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Sat, 11 Feb 2017 10:37:38 -0500
+Subject: nlm: Ensure callback code also checks that the files match
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit 251af29c320d86071664f02c76f0d063a19fefdf upstream.
+
+It is not sufficient to just check that the lock pids match when
+granting a callback, we also need to ensure that we're granting
+the callback on the right file.
+
+Reported-by: Pankaj Singh <psingh.ait@gmail.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/lockd/lockd.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -355,7 +355,8 @@ static inline int nlm_privileged_request
+ static inline int nlm_compare_locks(const struct file_lock *fl1,
+ const struct file_lock *fl2)
+ {
+- return fl1->fl_pid == fl2->fl_pid
++ return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
++ && fl1->fl_pid == fl2->fl_pid
+ && fl1->fl_owner == fl2->fl_owner
+ && fl1->fl_start == fl2->fl_start
+ && fl1->fl_end == fl2->fl_end
--- /dev/null
+From 8d254a340efb12b40c4c1ff25a48a4f48f7bbd6b Mon Sep 17 00:00:00 2001
+From: Clemens Gruber <clemens.gruber@pqgruber.com>
+Date: Tue, 13 Dec 2016 16:52:50 +0100
+Subject: pwm: pca9685: Fix period change with same duty cycle
+
+From: Clemens Gruber <clemens.gruber@pqgruber.com>
+
+commit 8d254a340efb12b40c4c1ff25a48a4f48f7bbd6b upstream.
+
+When first implementing support for changing the output frequency, an
+optimization was added to continue the PWM after changing the prescaler
+without having to reprogram the ON and OFF registers for the duty cycle,
+in case the duty cycle stayed the same. This was flawed, because we
+compared the absolute value of the duty cycle in nanoseconds instead of
+the ratio to the period.
+
+Fix the problem by removing the shortcut.
+
+Fixes: 01ec8472009c9 ("pwm-pca9685: Support changing the output frequency")
+Signed-off-by: Clemens Gruber <clemens.gruber@pqgruber.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Thierry Reding <thierry.reding@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pwm/pwm-pca9685.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/drivers/pwm/pwm-pca9685.c
++++ b/drivers/pwm/pwm-pca9685.c
+@@ -65,7 +65,6 @@
+ #define PCA9685_MAXCHAN 0x10
+
+ #define LED_FULL (1 << 4)
+-#define MODE1_RESTART (1 << 7)
+ #define MODE1_SLEEP (1 << 4)
+ #define MODE2_INVRT (1 << 4)
+ #define MODE2_OUTDRV (1 << 2)
+@@ -117,16 +116,6 @@ static int pca9685_pwm_config(struct pwm
+ udelay(500);
+
+ pca->period_ns = period_ns;
+-
+- /*
+- * If the duty cycle did not change, restart PWM with
+- * the same duty cycle to period ratio and return.
+- */
+- if (duty_ns == pca->duty_ns) {
+- regmap_update_bits(pca->regmap, PCA9685_MODE1,
+- MODE1_RESTART, 0x1);
+- return 0;
+- }
+ } else {
+ dev_err(chip->dev,
+ "prescaler not set: period out of bounds!\n");
--- /dev/null
+From a63f53e34db8b49675448d03ae324f6c5bc04fe6 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Mon, 30 Jan 2017 15:52:14 +0100
+Subject: s390/dcssblk: fix device size calculation in dcssblk_direct_access()
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit a63f53e34db8b49675448d03ae324f6c5bc04fe6 upstream.
+
+Since commit dd22f551 "block: Change direct_access calling convention",
+the device size calculation in dcssblk_direct_access() is off-by-one.
+This results in bdev_direct_access() always returning -ENXIO because the
+returned value is not page aligned.
+
+Fix this by adding 1 to the dev_sz calculation.
+
+Fixes: dd22f551 ("block: Change direct_access calling convention")
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/block/dcssblk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/s390/block/dcssblk.c
++++ b/drivers/s390/block/dcssblk.c
+@@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_devi
+ dev_info = bdev->bd_disk->private_data;
+ if (!dev_info)
+ return -ENODEV;
+- dev_sz = dev_info->end - dev_info->start;
++ dev_sz = dev_info->end - dev_info->start + 1;
+ offset = secnum * 512;
+ addr = (void *) (dev_info->start + offset);
+ *pfn = virt_to_phys(addr) >> PAGE_SHIFT;
--- /dev/null
+From da8fd820f389a0e29080b14c61bf5cf1d8ef5ca1 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Sat, 4 Feb 2017 11:40:36 +0100
+Subject: s390: make setup_randomness work
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit da8fd820f389a0e29080b14c61bf5cf1d8ef5ca1 upstream.
+
+Commit bcfcbb6bae64 ("s390: add system information as device
+randomness") intended to add some virtual machine specific information
+to the randomness pool.
+
+Unfortunately it uses the page allocator before it is ready to use. In
+result the page allocator always returns NULL and the setup_randomness
+function never adds anything to the randomness pool.
+
+To fix this use memblock_alloc and memblock_free instead.
+
+Fixes: bcfcbb6bae64 ("s390: add system information as device randomness")
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/setup.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -805,10 +805,10 @@ static void __init setup_randomness(void
+ {
+ struct sysinfo_3_2_2 *vmms;
+
+- vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
+- if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
++ vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++ if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+ add_device_randomness(&vmms, vmms->count);
+- free_page((unsigned long) vmms);
++ memblock_free((unsigned long) vmms, PAGE_SIZE);
+ }
+
+ /*
--- /dev/null
+From 1e4a382fdc0ba8d1a85b758c0811de3a3631085e Mon Sep 17 00:00:00 2001
+From: Julian Wiedmann <jwi@linux.vnet.ibm.com>
+Date: Mon, 21 Nov 2016 13:37:48 +0100
+Subject: s390/qdio: clear DSCI prior to scanning multiple input queues
+
+From: Julian Wiedmann <jwi@linux.vnet.ibm.com>
+
+commit 1e4a382fdc0ba8d1a85b758c0811de3a3631085e upstream.
+
+For devices with multiple input queues, tiqdio_call_inq_handlers()
+iterates over all input queues and clears the device's DSCI
+during each iteration. If the DSCI is re-armed during one
+of the later iterations, we therefore do not scan the previous
+queues again.
+The re-arming also raises a new adapter interrupt. But its
+handler does not trigger a rescan for the device, as the DSCI
+has already been erroneously cleared.
+This can result in queue stalls on devices with multiple
+input queues.
+
+Fix it by clearing the DSCI just once, prior to scanning the queues.
+
+As the code is moved in front of the loop, we also need to access
+the DSCI directly (ie irq->dsci) instead of going via each queue's
+parent pointer to the same irq. This is not a functional change,
+and a follow-up patch will clean up the other users.
+
+In practice, this bug only affects CQ-enabled HiperSockets devices,
+ie. devices with sysfs-attribute "hsuid" set. Setting a hsuid is
+needed for AF_IUCV socket applications that use HiperSockets
+communication.
+
+Fixes: 104ea556ee7f ("qdio: support asynchronous delivery of storage blocks")
+Reviewed-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
+Signed-off-by: Julian Wiedmann <jwi@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/cio/qdio_thinint.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/s390/cio/qdio_thinint.c
++++ b/drivers/s390/cio/qdio_thinint.c
+@@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handl
+ struct qdio_q *q;
+ int i;
+
+- for_each_input_queue(irq, q, i) {
+- if (!references_shared_dsci(irq) &&
+- has_multiple_inq_on_dsci(irq))
+- xchg(q->irq_ptr->dsci, 0);
++ if (!references_shared_dsci(irq) &&
++ has_multiple_inq_on_dsci(irq))
++ xchg(irq->dsci, 0);
+
++ for_each_input_queue(irq, q, i) {
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
--- /dev/null
+From fb94a687d96c570d46332a4a890f1dcb7310e643 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Fri, 24 Feb 2017 07:43:51 +0100
+Subject: s390: TASK_SIZE for kernel threads
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit fb94a687d96c570d46332a4a890f1dcb7310e643 upstream.
+
+Return a sensible value if TASK_SIZE if called from a kernel thread.
+
+This gets us around an issue with copy_mount_options that does a magic
+size calculation "TASK_SIZE - (unsigned long)data" while in a kernel
+thread and data pointing to kernel space.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/processor.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -74,7 +74,8 @@ extern void execve_tail(void);
+ * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
+ */
+
+-#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
++#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
++ (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
+ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
+ (1UL << 30) : (1UL << 41))
+ #define TASK_SIZE TASK_SIZE_OF(current)
--- /dev/null
+From 4920e3cf77347d7d7373552d4839e8d832321313 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Sun, 5 Feb 2017 23:03:18 +0100
+Subject: s390: use correct input data address for setup_randomness
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 4920e3cf77347d7d7373552d4839e8d832321313 upstream.
+
+The current implementation of setup_randomness uses the stack address
+and therefore the pointer to the SYSIB 3.2.2 block as input data
+address. Furthermore the length of the input data is the number of
+virtual-machine description blocks which is typically one.
+
+This means that typically a single zero byte is fed to
+add_device_randomness.
+
+Fix both of these and use the address of the first virtual machine
+description block as input data address and also use the correct
+length.
+
+Fixes: bcfcbb6bae64 ("s390: add system information as device randomness")
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/setup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -807,7 +807,7 @@ static void __init setup_randomness(void
+
+ vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+- add_device_randomness(&vmms, vmms->count);
++ add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
+ memblock_free((unsigned long) vmms, PAGE_SIZE);
+ }
+
--- /dev/null
+From 1c9c858e2ff8ae8024a3d75d2ed080063af43754 Mon Sep 17 00:00:00 2001
+From: Ian Abbott <abbotti@mev.co.uk>
+Date: Fri, 3 Feb 2017 20:25:00 +0000
+Subject: serial: 8250_pci: Add MKS Tenta SCOM-0800 and SCOM-0801 cards
+
+From: Ian Abbott <abbotti@mev.co.uk>
+
+commit 1c9c858e2ff8ae8024a3d75d2ed080063af43754 upstream.
+
+The MKS Instruments SCOM-0800 and SCOM-0801 cards (originally by Tenta
+Technologies) are 3U CompactPCI serial cards with 4 and 8 serial ports,
+respectively. The first 4 ports are implemented by an OX16PCI954 chip,
+and the second 4 ports are implemented by an OX16C954 chip on a local
+bus, bridged by the second PCI function of the OX16PCI954. The ports
+are jumper-selectable as RS-232 and RS-422/485, and the UARTs use a
+non-standard oscillator frequency of 20 MHz (base_baud = 1250000).
+
+Signed-off-by: Ian Abbott <abbotti@mev.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/8250/8250_pci.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2880,6 +2880,8 @@ enum pci_board_num_t {
+ pbn_b0_4_1152000_200,
+ pbn_b0_8_1152000_200,
+
++ pbn_b0_4_1250000,
++
+ pbn_b0_2_1843200,
+ pbn_b0_4_1843200,
+
+@@ -3113,6 +3115,13 @@ static struct pciserial_board pci_boards
+ .uart_offset = 0x200,
+ },
+
++ [pbn_b0_4_1250000] = {
++ .flags = FL_BASE0,
++ .num_ports = 4,
++ .base_baud = 1250000,
++ .uart_offset = 8,
++ },
++
+ [pbn_b0_2_1843200] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+@@ -5778,6 +5787,10 @@ static struct pci_device_id serial_pci_t
+ { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
+ { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
+
++ /* MKS Tenta SCOM-080x serial cards */
++ { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
++ { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
++
+ /*
+ * These entries match devices with class COMMUNICATION_SERIAL,
+ * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
tty-n_hdlc-fix-lockdep-false-positive.patch
tty-n_hdlc-get-rid-of-racy-n_hdlc.tbuf.patch
+serial-8250_pci-add-mks-tenta-scom-0800-and-scom-0801-cards.patch
+kvm-s390-disable-dirty-log-retrieval-for-ucontrol-guests.patch
+kvm-vmx-use-correct-vmcs_read-write-for-guest-segment-selector-base.patch
+bluetooth-add-another-ar3012-04ca-3018-device.patch
+s390-qdio-clear-dsci-prior-to-scanning-multiple-input-queues.patch
+s390-dcssblk-fix-device-size-calculation-in-dcssblk_direct_access.patch
+s390-task_size-for-kernel-threads.patch
+s390-make-setup_randomness-work.patch
+s390-use-correct-input-data-address-for-setup_randomness.patch
+net-mvpp2-fix-dma-address-calculation-in-mvpp2_txq_inc_put.patch
+mnt-tuck-mounts-under-others-instead-of-creating-shadow-side-mounts.patch
+ib-ipoib-fix-deadlock-between-rmmod-and-set_mode.patch
+ib-ipoib-add-destination-address-when-re-queue-packet.patch
+ib-srp-avoid-that-duplicate-responses-trigger-a-kernel-bug.patch
+ib-srp-fix-race-conditions-related-to-task-management.patch
+ktest-fix-child-exit-code-processing.patch
+ceph-remove-req-from-unsafe-list-when-unregistering-it.patch
+target-fix-null-dereference-during-lun-lookup-active-i-o-shutdown.patch
+nlm-ensure-callback-code-also-checks-that-the-files-match.patch
+pwm-pca9685-fix-period-change-with-same-duty-cycle.patch
+xtensa-move-parse_tag_fdt-out-of-ifdef-config_blk_dev_initrd.patch
+mac80211-flush-delayed-work-when-entering-suspend.patch
--- /dev/null
+From bd4e2d2907fa23a11d46217064ecf80470ddae10 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Wed, 22 Feb 2017 22:06:32 -0800
+Subject: target: Fix NULL dereference during LUN lookup + active I/O shutdown
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit bd4e2d2907fa23a11d46217064ecf80470ddae10 upstream.
+
+When transport_clear_lun_ref() is shutting down a se_lun via
+configfs with new I/O in-flight, it's possible to trigger a
+NULL pointer dereference in transport_lookup_cmd_lun() due
+to the fact percpu_ref_get() doesn't do any __PERCPU_REF_DEAD
+checking before incrementing lun->lun_ref.count after
+lun->lun_ref has switched to atomic_t mode.
+
+This results in a NULL pointer dereference as LUN shutdown
+code in core_tpg_remove_lun() continues running after the
+existing ->release() -> core_tpg_lun_ref_release() callback
+completes, and clears the RCU protected se_lun->lun_se_dev
+pointer.
+
+During the OOPs, the state of lun->lun_ref in the process
+which triggered the NULL pointer dereference looks like
+the following on v4.1.y stable code:
+
+struct se_lun {
+ lun_link_magic = 4294932337,
+ lun_status = TRANSPORT_LUN_STATUS_FREE,
+
+ .....
+
+ lun_se_dev = 0x0,
+ lun_sep = 0x0,
+
+ .....
+
+ lun_ref = {
+ count = {
+ counter = 1
+ },
+ percpu_count_ptr = 3,
+ release = 0xffffffffa02fa1e0 <core_tpg_lun_ref_release>,
+ confirm_switch = 0x0,
+ force_atomic = false,
+ rcu = {
+ next = 0xffff88154fa1a5d0,
+ func = 0xffffffff8137c4c0 <percpu_ref_switch_to_atomic_rcu>
+ }
+ }
+}
+
+To address this bug, use percpu_ref_tryget_live() to ensure
+once __PERCPU_REF_DEAD is visable on all CPUs and ->lun_ref
+has switched to atomic_t, all new I/Os will fail to obtain
+a new lun->lun_ref reference.
+
+Also use an explicit percpu_ref_kill_and_confirm() callback
+to block on ->lun_ref_comp to allow the first stage and
+associated RCU grace period to complete, and then block on
+->lun_ref_shutdown waiting for the final percpu_ref_put()
+to drop the last reference via transport_lun_remove_cmd()
+before continuing with core_tpg_remove_lun() shutdown.
+
+Reported-by: Rob Millner <rlm@daterainc.com>
+Tested-by: Rob Millner <rlm@daterainc.com>
+Cc: Rob Millner <rlm@daterainc.com>
+Tested-by: Vaibhav Tandon <vst@datera.io>
+Cc: Vaibhav Tandon <vst@datera.io>
+Tested-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_device.c | 10 ++++++++--
+ drivers/target/target_core_tpg.c | 3 ++-
+ drivers/target/target_core_transport.c | 31 ++++++++++++++++++++++++++++++-
+ include/target/target_core_base.h | 1 +
+ 4 files changed, 41 insertions(+), 4 deletions(-)
+
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -77,12 +77,16 @@ transport_lookup_cmd_lun(struct se_cmd *
+ &deve->read_bytes);
+
+ se_lun = rcu_dereference(deve->se_lun);
++
++ if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
++ se_lun = NULL;
++ goto out_unlock;
++ }
++
+ se_cmd->se_lun = rcu_dereference(deve->se_lun);
+ se_cmd->pr_res_key = deve->pr_res_key;
+ se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+-
+- percpu_ref_get(&se_lun->lun_ref);
+ se_cmd->lun_ref_active = true;
+
+ if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+@@ -96,6 +100,7 @@ transport_lookup_cmd_lun(struct se_cmd *
+ goto ref_dev;
+ }
+ }
++out_unlock:
+ rcu_read_unlock();
+
+ if (!se_lun) {
+@@ -826,6 +831,7 @@ struct se_device *target_alloc_device(st
+ xcopy_lun = &dev->xcopy_lun;
+ rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
+ init_completion(&xcopy_lun->lun_ref_comp);
++ init_completion(&xcopy_lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
+ INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
+ mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -539,7 +539,7 @@ static void core_tpg_lun_ref_release(str
+ {
+ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+- complete(&lun->lun_ref_comp);
++ complete(&lun->lun_shutdown_comp);
+ }
+
+ int core_tpg_register(
+@@ -666,6 +666,7 @@ struct se_lun *core_tpg_alloc_lun(
+ lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_ref_comp);
++ init_completion(&lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&lun->lun_deve_list);
+ INIT_LIST_HEAD(&lun->lun_dev_link);
+ atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2680,10 +2680,39 @@ void target_wait_for_sess_cmds(struct se
+ }
+ EXPORT_SYMBOL(target_wait_for_sess_cmds);
+
++static void target_lun_confirm(struct percpu_ref *ref)
++{
++ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
++
++ complete(&lun->lun_ref_comp);
++}
++
+ void transport_clear_lun_ref(struct se_lun *lun)
+ {
+- percpu_ref_kill(&lun->lun_ref);
++ /*
++ * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
++ * the initial reference and schedule confirm kill to be
++ * executed after one full RCU grace period has completed.
++ */
++ percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
++ /*
++ * The first completion waits for percpu_ref_switch_to_atomic_rcu()
++ * to call target_lun_confirm after lun->lun_ref has been marked
++ * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
++ * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
++ * fails for all new incoming I/O.
++ */
+ wait_for_completion(&lun->lun_ref_comp);
++ /*
++ * The second completion waits for percpu_ref_put_many() to
++ * invoke ->release() after lun->lun_ref has switched to
++ * atomic_t mode, and lun->lun_ref.count has reached zero.
++ *
++ * At this point all target-core lun->lun_ref references have
++ * been dropped via transport_lun_remove_cmd(), and it's safe
++ * to proceed with the remaining LUN shutdown.
++ */
++ wait_for_completion(&lun->lun_shutdown_comp);
+ }
+
+ static bool
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -740,6 +740,7 @@ struct se_lun {
+ struct config_group lun_group;
+ struct se_port_stat_grps port_stat_grps;
+ struct completion lun_ref_comp;
++ struct completion lun_shutdown_comp;
+ struct percpu_ref lun_ref;
+ struct list_head lun_dev_link;
+ struct hlist_node link;
--- /dev/null
+From 4ab18701c66552944188dbcd0ce0012729baab84 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Tue, 3 Jan 2017 09:37:34 -0800
+Subject: xtensa: move parse_tag_fdt out of #ifdef CONFIG_BLK_DEV_INITRD
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 4ab18701c66552944188dbcd0ce0012729baab84 upstream.
+
+FDT tag parsing is not related to whether BLK_DEV_INITRD is configured
+or not, move it out of the corresponding #ifdef/#endif block.
+This fixes passing external FDT to the kernel configured w/o
+BLK_DEV_INITRD support.
+
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/setup.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -133,6 +133,8 @@ static int __init parse_tag_initrd(const
+
+ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
+
++#endif /* CONFIG_BLK_DEV_INITRD */
++
+ #ifdef CONFIG_OF
+
+ static int __init parse_tag_fdt(const bp_tag_t *tag)
+@@ -145,8 +147,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
+
+ #endif /* CONFIG_OF */
+
+-#endif /* CONFIG_BLK_DEV_INITRD */
+-
+ static int __init parse_tag_cmdline(const bp_tag_t* tag)
+ {
+ strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);