--- /dev/null
+From 22f6b4d34fcf039c63a94e7670e0da24f8575a5a Mon Sep 17 00:00:00 2001
+From: Jann Horn <jann@thejh.net>
+Date: Fri, 16 Sep 2016 00:31:22 +0200
+Subject: aio: mark AIO pseudo-fs noexec
+
+From: Jann Horn <jann@thejh.net>
+
+commit 22f6b4d34fcf039c63a94e7670e0da24f8575a5a upstream.
+
+This ensures that do_mmap() won't implicitly make AIO memory mappings
+executable if the READ_IMPLIES_EXEC personality flag is set. Such
+behavior is problematic because the security_mmap_file LSM hook doesn't
+catch this case, potentially permitting an attacker to bypass a W^X
+policy enforced by SELinux.
+
+I have tested the patch on my machine.
+
+To test the behavior, compile and run this:
+
+ #define _GNU_SOURCE
+ #include <unistd.h>
+ #include <sys/personality.h>
+ #include <linux/aio_abi.h>
+ #include <err.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <sys/syscall.h>
+
+ int main(void) {
+ personality(READ_IMPLIES_EXEC);
+ aio_context_t ctx = 0;
+ if (syscall(__NR_io_setup, 1, &ctx))
+ err(1, "io_setup");
+
+ char cmd[1000];
+ sprintf(cmd, "cat /proc/%d/maps | grep -F '/[aio]'",
+ (int)getpid());
+ system(cmd);
+ return 0;
+ }
+
+In the output, "rw-s" is good, "rwxs" is bad.
+
+Signed-off-by: Jann Horn <jann@thejh.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct f
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+- return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
++ struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
++ AIO_RING_MAGIC);
++
++ if (!IS_ERR(root))
++ root->d_sb->s_iflags |= SB_I_NOEXEC;
++ return root;
+ }
+
+ /* aio_setup
--- /dev/null
+From 237e15dfd5d651868726111c3a9d828bec700490 Mon Sep 17 00:00:00 2001
+From: Ashok Raj Nagarajan <arnagara@qti.qualcomm.com>
+Date: Fri, 19 Aug 2016 13:37:37 +0300
+Subject: ath10k: fix get rx_status from htt context
+
+From: Ashok Raj Nagarajan <arnagara@qti.qualcomm.com>
+
+commit 237e15dfd5d651868726111c3a9d828bec700490 upstream.
+
+On handling amsdu on rx path, get the rx_status from htt context. Without this
+fix, we are seeing warnings when running DBDC traffic like this.
+
+WARNING: CPU: 0 PID: 0 at net/mac80211/rx.c:4105 ieee80211_rx_napi+0x88/0x7d8 [mac80211]()
+
+[ 1715.878248] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 3.18.21 #1
+[ 1715.878273] [<c001d3f4>] (unwind_backtrace) from [<c001a4b0>] (show_stack+0x10/0x14)
+[ 1715.878293] [<c001a4b0>] (show_stack) from [<c01bee64>] (dump_stack+0x70/0xbc)
+[ 1715.878315] [<c01bee64>] (dump_stack) from [<c002a61c>] (warn_slowpath_common+0x64/0x88)
+[ 1715.878339] [<c002a61c>] (warn_slowpath_common) from [<c002a6d0>] (warn_slowpath_null+0x18/0x20)
+[ 1715.878395] [<c002a6d0>] (warn_slowpath_null) from [<bf4caa98>] (ieee80211_rx_napi+0x88/0x7d8 [mac80211])
+[ 1715.878474] [<bf4caa98>] (ieee80211_rx_napi [mac80211]) from [<bf568658>] (ath10k_htt_t2h_msg_handler+0xb48/0xbfc [ath10k_core])
+[ 1715.878535] [<bf568658>] (ath10k_htt_t2h_msg_handler [ath10k_core]) from [<bf568708>] (ath10k_htt_t2h_msg_handler+0xbf8/0xbfc [ath10k_core])
+[ 1715.878597] [<bf568708>] (ath10k_htt_t2h_msg_handler [ath10k_core]) from [<bf569160>] (ath10k_htt_txrx_compl_task+0xa54/0x1170 [ath10k_core])
+[ 1715.878639] [<bf569160>] (ath10k_htt_txrx_compl_task [ath10k_core]) from [<c002db14>] (tasklet_action+0xb4/0x130)
+[ 1715.878659] [<c002db14>] (tasklet_action) from [<c002d110>] (__do_softirq+0xe0/0x210)
+[ 1715.878678] [<c002d110>] (__do_softirq) from [<c002d4b4>] (irq_exit+0x84/0xe0)
+[ 1715.878700] [<c002d4b4>] (irq_exit) from [<c005a544>] (__handle_domain_irq+0x98/0xd0)
+[ 1715.878722] [<c005a544>] (__handle_domain_irq) from [<c00085f4>] (gic_handle_irq+0x38/0x5c)
+[ 1715.878741] [<c00085f4>] (gic_handle_irq) from [<c0009680>] (__irq_svc+0x40/0x74)
+[ 1715.878753] Exception stack(0xc05f9f50 to 0xc05f9f98)
+[ 1715.878767] 9f40: ffffffed 00000000 00399e1e c000a220
+[ 1715.878786] 9f60: 00000000 c05f6780 c05f8000 00000000 c05f5db8 ffffffed c05f8000 c04d1980
+[ 1715.878802] 9f80: 00000000 c05f9f98 c0018110 c0018114 60000013 ffffffff
+[ 1715.878822] [<c0009680>] (__irq_svc) from [<c0018114>] (arch_cpu_idle+0x2c/0x50)
+[ 1715.878844] [<c0018114>] (arch_cpu_idle) from [<c00530d4>] (cpu_startup_entry+0x108/0x234)
+[ 1715.878866] [<c00530d4>] (cpu_startup_entry) from [<c05c7be0>] (start_kernel+0x33c/0x3b8)
+[ 1715.878879] ---[ end trace 6d5e1cc0fef8ed6a ]---
+[ 1715.878899] ------------[ cut here ]------------
+
+Fixes: 18235664e7f9 ("ath10k: cleanup amsdu processing for rx indication")
+Signed-off-by: Ashok Raj Nagarajan <arnagara@qti.qualcomm.com>
+Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath10k/htt_rx.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -1524,7 +1524,7 @@ static void ath10k_htt_rx_h_filter(struc
+ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
+ {
+ struct ath10k *ar = htt->ar;
+- static struct ieee80211_rx_status rx_status;
++ struct ieee80211_rx_status *rx_status = &htt->rx_status;
+ struct sk_buff_head amsdu;
+ int ret;
+
+@@ -1548,11 +1548,11 @@ static int ath10k_htt_rx_handle_amsdu(st
+ return ret;
+ }
+
+- ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
++ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
+ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+- ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
+- ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
+- ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
++ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
++ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
++ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+
+ return 0;
+ }
--- /dev/null
+From d9f179877e50ae2681fe7b0b83e0d9f63b6165ad Mon Sep 17 00:00:00 2001
+From: Marek Lindner <mareklindner@neomailbox.ch>
+Date: Mon, 2 May 2016 21:58:50 +0800
+Subject: batman-adv: remove unused callback from batadv_algo_ops struct
+
+From: Marek Lindner <mareklindner@neomailbox.ch>
+
+commit d9f179877e50ae2681fe7b0b83e0d9f63b6165ad upstream.
+
+Signed-off-by: Marek Lindner <mareklindner@neomailbox.ch>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/batman-adv/originator.c | 5 -----
+ net/batman-adv/types.h | 3 ---
+ 2 files changed, 8 deletions(-)
+
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -251,10 +251,8 @@ static void batadv_neigh_node_release(st
+ struct hlist_node *node_tmp;
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+- struct batadv_algo_ops *bao;
+
+ neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
+- bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
+
+ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+ &neigh_node->ifinfo_list, list) {
+@@ -263,9 +261,6 @@ static void batadv_neigh_node_release(st
+
+ batadv_hardif_neigh_put(neigh_node->hardif_neigh);
+
+- if (bao->bat_neigh_free)
+- bao->bat_neigh_free(neigh_node);
+-
+ batadv_hardif_put(neigh_node->if_incoming);
+
+ kfree_rcu(neigh_node, rcu);
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -1284,8 +1284,6 @@ struct batadv_forw_packet {
+ * better than neigh2 for their respective outgoing interface from the metric
+ * prospective
+ * @bat_neigh_print: print the single hop neighbor list (optional)
+- * @bat_neigh_free: free the resources allocated by the routing algorithm for a
+- * neigh_node object
+ * @bat_orig_print: print the originator table (optional)
+ * @bat_orig_free: free the resources allocated by the routing algorithm for an
+ * orig_node object
+@@ -1316,7 +1314,6 @@ struct batadv_algo_ops {
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2);
+ void (*bat_neigh_print)(struct batadv_priv *priv, struct seq_file *seq);
+- void (*bat_neigh_free)(struct batadv_neigh_node *neigh);
+ /* orig_node handling API */
+ void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq,
+ struct batadv_hard_iface *hard_iface);
--- /dev/null
+From 0f5aa88a7bb28b73253fb42b3df8202142769f39 Mon Sep 17 00:00:00 2001
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Date: Sun, 28 Aug 2016 18:47:12 +0200
+Subject: ceph: do not modify fi->frag in need_reset_readdir()
+
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+
+commit 0f5aa88a7bb28b73253fb42b3df8202142769f39 upstream.
+
+Commit f3c4ebe65ea1 ("ceph: using hash value to compose dentry offset")
+modified "if (fpos_frag(new_pos) != fi->frag)" to "if (fi->frag |=
+fpos_frag(new_pos))" in need_reset_readdir(), thus replacing a
+comparison operator with an assignment one.
+
+This looks like a typo which is reported by clang when building the
+kernel with some warning flags:
+
+ fs/ceph/dir.c:600:22: error: using the result of an assignment as a
+ condition without parentheses [-Werror,-Wparentheses]
+ } else if (fi->frag |= fpos_frag(new_pos)) {
+ ~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~
+ fs/ceph/dir.c:600:22: note: place parentheses around the assignment
+ to silence this warning
+ } else if (fi->frag |= fpos_frag(new_pos)) {
+ ^
+ ( )
+ fs/ceph/dir.c:600:22: note: use '!=' to turn this compound
+ assignment into an inequality comparison
+ } else if (fi->frag |= fpos_frag(new_pos)) {
+ ^~
+ !=
+
+Fixes: f3c4ebe65ea1 ("ceph: using hash value to compose dentry offset")
+Signed-off-by: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/dir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -597,7 +597,7 @@ static bool need_reset_readdir(struct ce
+ if (is_hash_order(new_pos)) {
+ /* no need to reset last_name for a forward seek when
+ * dentries are sotred in hash order */
+- } else if (fi->frag |= fpos_frag(new_pos)) {
++ } else if (fi->frag != fpos_frag(new_pos)) {
+ return true;
+ }
+ rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
--- /dev/null
+From 7efb367320f56fc4d549875b6f3a6940018ef2e5 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 30 Aug 2016 16:20:55 -0400
+Subject: dm log writes: fix bug with too large bios
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 7efb367320f56fc4d549875b6f3a6940018ef2e5 upstream.
+
+bio_alloc() can allocate a bio with at most BIO_MAX_PAGES (256) vector
+entries. However, the incoming bio may have more vector entries if it
+was allocated by other means. For example, bcache submits bios with
+more than BIO_MAX_PAGES entries. This results in bio_alloc() failure.
+
+To avoid the failure, change the code so that it allocates bio with at
+most BIO_MAX_PAGES entries. If the incoming bio has more entries,
+bio_add_page() will fail and a new bio will be allocated - the code that
+handles bio_add_page() failure already exists in the dm-log-writes
+target.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Josef Bacik <jbacik@fb,com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-log-writes.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-log-writes.c
++++ b/drivers/md/dm-log-writes.c
+@@ -259,7 +259,7 @@ static int log_one_block(struct log_writ
+ sector++;
+
+ atomic_inc(&lc->io_blocks);
+- bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
++ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+@@ -280,7 +280,7 @@ static int log_one_block(struct log_writ
+ if (ret != block->vecs[i].bv_len) {
+ atomic_inc(&lc->io_blocks);
+ submit_bio(WRITE, bio);
+- bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
++ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
--- /dev/null
+From 68c6bcdd8bd00394c234b915ab9b97c74104130c Mon Sep 17 00:00:00 2001
+From: Erez Shitrit <erezsh@mellanox.com>
+Date: Sun, 28 Aug 2016 10:58:30 +0300
+Subject: IB/core: Fix use after free in send_leave function
+
+From: Erez Shitrit <erezsh@mellanox.com>
+
+commit 68c6bcdd8bd00394c234b915ab9b97c74104130c upstream.
+
+The function send_leave sets the member: group->query_id
+(group->query_id = ret) after calling the sa_query, but leave_handler
+can be executed before the setting and it might delete the group object,
+and will get a memory corruption.
+
+Additionally, this patch gets rid of group->query_id variable which is
+not used.
+
+Fixes: faec2f7b96b5 ('IB/sa: Track multicast join/leave requests')
+Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/multicast.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+--- a/drivers/infiniband/core/multicast.c
++++ b/drivers/infiniband/core/multicast.c
+@@ -118,7 +118,6 @@ struct mcast_group {
+ atomic_t refcount;
+ enum mcast_group_state state;
+ struct ib_sa_query *query;
+- int query_id;
+ u16 pkey_index;
+ u8 leave_state;
+ int retries;
+@@ -352,11 +351,7 @@ static int send_join(struct mcast_group
+ member->multicast.comp_mask,
+ 3000, GFP_KERNEL, join_handler, group,
+ &group->query);
+- if (ret >= 0) {
+- group->query_id = ret;
+- ret = 0;
+- }
+- return ret;
++ return (ret > 0) ? 0 : ret;
+ }
+
+ static int send_leave(struct mcast_group *group, u8 leave_state)
+@@ -376,11 +371,7 @@ static int send_leave(struct mcast_group
+ IB_SA_MCMEMBER_REC_JOIN_STATE,
+ 3000, GFP_KERNEL, leave_handler,
+ group, &group->query);
+- if (ret >= 0) {
+- group->query_id = ret;
+- ret = 0;
+- }
+- return ret;
++ return (ret > 0) ? 0 : ret;
+ }
+
+ static void join_group(struct mcast_group *group, struct mcast_member *member,
--- /dev/null
+From 344bacca8cd811809fc33a249f2738ab757d327f Mon Sep 17 00:00:00 2001
+From: Alex Vesker <valex@mellanox.com>
+Date: Mon, 12 Sep 2016 09:55:28 +0300
+Subject: IB/ipoib: Don't allow MC joins during light MC flush
+
+From: Alex Vesker <valex@mellanox.com>
+
+commit 344bacca8cd811809fc33a249f2738ab757d327f upstream.
+
+This fix solves a race between light flush and on the fly joins.
+Light flush doesn't set the device to down and unset IPOIB_OPER_UP
+flag, this means that if while flushing we have a MC join in progress
+and the QP was attached to BC MGID we can have a mismatches when
+re-attaching a QP to the BC MGID.
+
+The light flush would set the broadcast group to NULL causing an on
+the fly join to rejoin and reattach to the BC MCG as well as adding
+the BC MGID to the multicast list. The flush process would later on
+remove the BC MGID and detach it from the QP. On the next flush
+the BC MGID is present in the multicast list but not found when trying
+to detach it because of the previous double attach and single detach.
+
+[18332.714265] ------------[ cut here ]------------
+[18332.717775] WARNING: CPU: 6 PID: 3767 at drivers/infiniband/core/verbs.c:280 ib_dealloc_pd+0xff/0x120 [ib_core]
+...
+[18332.775198] Hardware name: Red Hat KVM, BIOS Bochs 01/01/2011
+[18332.779411] 0000000000000000 ffff8800b50dfbb0 ffffffff813fed47 0000000000000000
+[18332.784960] 0000000000000000 ffff8800b50dfbf0 ffffffff8109add1 0000011832f58300
+[18332.790547] ffff880226a596c0 ffff880032482000 ffff880032482830 ffff880226a59280
+[18332.796199] Call Trace:
+[18332.798015] [<ffffffff813fed47>] dump_stack+0x63/0x8c
+[18332.801831] [<ffffffff8109add1>] __warn+0xd1/0xf0
+[18332.805403] [<ffffffff8109aebd>] warn_slowpath_null+0x1d/0x20
+[18332.809706] [<ffffffffa025d90f>] ib_dealloc_pd+0xff/0x120 [ib_core]
+[18332.814384] [<ffffffffa04f3d7c>] ipoib_transport_dev_cleanup+0xfc/0x1d0 [ib_ipoib]
+[18332.820031] [<ffffffffa04ed648>] ipoib_ib_dev_cleanup+0x98/0x110 [ib_ipoib]
+[18332.825220] [<ffffffffa04e62c8>] ipoib_dev_cleanup+0x2d8/0x550 [ib_ipoib]
+[18332.830290] [<ffffffffa04e656f>] ipoib_uninit+0x2f/0x40 [ib_ipoib]
+[18332.834911] [<ffffffff81772a8a>] rollback_registered_many+0x1aa/0x2c0
+[18332.839741] [<ffffffff81772bd1>] rollback_registered+0x31/0x40
+[18332.844091] [<ffffffff81773b18>] unregister_netdevice_queue+0x48/0x80
+[18332.848880] [<ffffffffa04f489b>] ipoib_vlan_delete+0x1fb/0x290 [ib_ipoib]
+[18332.853848] [<ffffffffa04df1cd>] delete_child+0x7d/0xf0 [ib_ipoib]
+[18332.858474] [<ffffffff81520c08>] dev_attr_store+0x18/0x30
+[18332.862510] [<ffffffff8127fe4a>] sysfs_kf_write+0x3a/0x50
+[18332.866349] [<ffffffff8127f4e0>] kernfs_fop_write+0x120/0x170
+[18332.870471] [<ffffffff81207198>] __vfs_write+0x28/0xe0
+[18332.874152] [<ffffffff810e09bf>] ? percpu_down_read+0x1f/0x50
+[18332.878274] [<ffffffff81208062>] vfs_write+0xa2/0x1a0
+[18332.881896] [<ffffffff812093a6>] SyS_write+0x46/0xa0
+[18332.885632] [<ffffffff810039b7>] do_syscall_64+0x57/0xb0
+[18332.889709] [<ffffffff81883321>] entry_SYSCALL64_slow_path+0x25/0x25
+[18332.894727] ---[ end trace 09ebbe31f831ef17 ]---
+
+Fixes: ee1e2c82c245 ("IPoIB: Refresh paths instead of flushing them on SM change events")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib_ib.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct
+ }
+
+ if (level == IPOIB_FLUSH_LIGHT) {
++ int oper_up;
+ ipoib_mark_paths_invalid(dev);
++ /* Set IPoIB operation as down to prevent races between:
++ * the flush flow which leaves MCG and on the fly joins
++ * which can happen during that time. mcast restart task
++ * should deal with join requests we missed.
++ */
++ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_mcast_dev_flush(dev);
++ if (oper_up)
++ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_flush_ah(dev);
+ }
+
--- /dev/null
+From 546481c2816ea3c061ee9d5658eb48070f69212e Mon Sep 17 00:00:00 2001
+From: Erez Shitrit <erezsh@mellanox.com>
+Date: Sun, 28 Aug 2016 10:58:31 +0300
+Subject: IB/ipoib: Fix memory corruption in ipoib cm mode connect flow
+
+From: Erez Shitrit <erezsh@mellanox.com>
+
+commit 546481c2816ea3c061ee9d5658eb48070f69212e upstream.
+
+When a new CM connection is being requested, ipoib driver copies data
+from the path pointer in the CM/tx object, the path object might be
+invalid at the point and memory corruption will happened later when now
+the CM driver will try using that data.
+
+The next scenario demonstrates it:
+ neigh_add_path --> ipoib_cm_create_tx -->
+ queue_work (pointer to path is in the cm/tx struct)
+ #while the work is still in the queue,
+ #the port goes down and causes the ipoib_flush_paths:
+ ipoib_flush_paths --> path_free --> kfree(path)
+ #at this point the work scheduled starts.
+ ipoib_cm_tx_start --> copy from the (invalid)path pointer:
+ (memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);)
+ -> memory corruption.
+
+To fix that the driver now starts the CM/tx connection only if that
+specific path exists in the general paths database.
+This check is protected with the relevant locks, and uses the gid from
+the neigh member in the CM/tx object which is valid according to the ref
+count that was taken by the CM/tx.
+
+Fixes: 839fcaba35 ('IPoIB: Connected mode experimental support')
+Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib.h | 1 +
+ drivers/infiniband/ulp/ipoib/ipoib_cm.c | 16 ++++++++++++++++
+ drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +-
+ 3 files changed, 18 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev,
+ struct ipoib_ah *address, u32 qpn);
+ void ipoib_reap_ah(struct work_struct *work);
+
++struct ipoib_path *__path_find(struct net_device *dev, void *gid);
+ void ipoib_mark_paths_invalid(struct net_device *dev);
+ void ipoib_flush_paths(struct net_device *dev);
+ int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm
+ }
+ }
+
++#define QPN_AND_OPTIONS_OFFSET 4
++
+ static void ipoib_cm_tx_start(struct work_struct *work)
+ {
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct wor
+ struct ipoib_neigh *neigh;
+ struct ipoib_cm_tx *p;
+ unsigned long flags;
++ struct ipoib_path *path;
+ int ret;
+
+ struct ib_sa_path_rec pathrec;
+@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct wor
+ p = list_entry(priv->cm.start_list.next, typeof(*p), list);
+ list_del_init(&p->list);
+ neigh = p->neigh;
++
+ qpn = IPOIB_QPN(neigh->daddr);
++ /*
++ * As long as the search is with these 2 locks,
++ * path existence indicates its validity.
++ */
++ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
++ if (!path) {
++ pr_info("%s ignore not valid path %pI6\n",
++ __func__,
++ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
++ goto free_neigh;
++ }
+ memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct wor
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ret) {
++free_neigh:
+ neigh = p->neigh;
+ if (neigh) {
+ neigh->cm = NULL;
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *de
+ return -EINVAL;
+ }
+
+-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
++struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct rb_node *n = priv->path_tree.rb_node;
--- /dev/null
+From baa0be7026e2f7d1d40bfd45909044169e9e3c68 Mon Sep 17 00:00:00 2001
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Date: Mon, 12 Sep 2016 19:16:19 +0300
+Subject: IB/mlx4: Fix code indentation in QP1 MAD flow
+
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+
+commit baa0be7026e2f7d1d40bfd45909044169e9e3c68 upstream.
+
+The indentation in the QP1 GRH flow in procedure build_mlx_header is
+really confusing. Fix it, in preparation for a commit which touches
+this code.
+
+Fixes: 1ffeb2eb8be9 ("IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support")
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx4/qp.c | 36 +++++++++++++++++++-----------------
+ 1 file changed, 19 insertions(+), 17 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2492,24 +2492,26 @@ static int build_mlx_header(struct mlx4_
+ sqp->ud_header.grh.flow_label =
+ ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
+ sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
+- if (is_eth)
++ if (is_eth) {
+ memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
+- else {
+- if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+- /* When multi-function is enabled, the ib_core gid
+- * indexes don't necessarily match the hw ones, so
+- * we must use our own cache */
+- sqp->ud_header.grh.source_gid.global.subnet_prefix =
+- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+- subnet_prefix;
+- sqp->ud_header.grh.source_gid.global.interface_id =
+- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+- guid_cache[ah->av.ib.gid_index];
+- } else
+- ib_get_cached_gid(ib_dev,
+- be32_to_cpu(ah->av.ib.port_pd) >> 24,
+- ah->av.ib.gid_index,
+- &sqp->ud_header.grh.source_gid, NULL);
++ } else {
++ if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
++ /* When multi-function is enabled, the ib_core gid
++ * indexes don't necessarily match the hw ones, so
++ * we must use our own cache
++ */
++ sqp->ud_header.grh.source_gid.global.subnet_prefix =
++ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
++ subnet_prefix;
++ sqp->ud_header.grh.source_gid.global.interface_id =
++ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
++ guid_cache[ah->av.ib.gid_index];
++ } else {
++ ib_get_cached_gid(ib_dev,
++ be32_to_cpu(ah->av.ib.port_pd) >> 24,
++ ah->av.ib.gid_index,
++ &sqp->ud_header.grh.source_gid, NULL);
++ }
+ }
+ memcpy(sqp->ud_header.grh.destination_gid.raw,
+ ah->av.ib.dgid, 16);
--- /dev/null
+From e5ac40cd66c2f3cd11bc5edc658f012661b16347 Mon Sep 17 00:00:00 2001
+From: Alex Vesker <valex@mellanox.com>
+Date: Mon, 12 Sep 2016 19:16:18 +0300
+Subject: IB/mlx4: Fix incorrect MC join state bit-masking on SR-IOV
+
+From: Alex Vesker <valex@mellanox.com>
+
+commit e5ac40cd66c2f3cd11bc5edc658f012661b16347 upstream.
+
+Because of an incorrect bit-masking done on the join state bits, when
+handling a join request we failed to detect a difference between the
+group join state and the request join state when joining as send only
+full member (0x8). This caused the MC join request not to be sent.
+This issue is relevant only when SRIOV is enabled and SM supports
+send only full member.
+
+This fix separates scope bits and join states bits a nibble each.
+
+Fixes: b9c5d6a64358 ('IB/mlx4: Add multicast group (MCG) paravirtualization for SR-IOV')
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx4/mcg.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx4/mcg.c
++++ b/drivers/infiniband/hw/mlx4/mcg.c
+@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_g
+ if (!group->members[i])
+ leave_state |= (1 << i);
+
+- return leave_state & (group->rec.scope_join_state & 7);
++ return leave_state & (group->rec.scope_join_state & 0xf);
+ }
+
+ static int join_group(struct mcast_group *group, int slave, u8 join_mask)
+@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(
+ } else
+ mcg_warn_group(group, "DRIVER BUG\n");
+ } else if (group->state == MCAST_LEAVE_SENT) {
+- if (group->rec.scope_join_state & 7)
+- group->rec.scope_join_state &= 0xf8;
++ if (group->rec.scope_join_state & 0xf)
++ group->rec.scope_join_state &= 0xf0;
+ group->state = MCAST_IDLE;
+ mutex_unlock(&group->lock);
+ if (release_group(group, 1))
+@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast
+ static int handle_join_req(struct mcast_group *group, u8 join_mask,
+ struct mcast_req *req)
+ {
+- u8 group_join_state = group->rec.scope_join_state & 7;
++ u8 group_join_state = group->rec.scope_join_state & 0xf;
+ int ref = 0;
+ u16 status;
+ struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(str
+ u8 cur_join_state;
+
+ resp_join_state = ((struct ib_sa_mcmember_data *)
+- group->response_sa_mad.data)->scope_join_state & 7;
+- cur_join_state = group->rec.scope_join_state & 7;
++ group->response_sa_mad.data)->scope_join_state & 0xf;
++ cur_join_state = group->rec.scope_join_state & 0xf;
+
+ if (method == IB_MGMT_METHOD_GET_RESP) {
+ /* successfull join */
+@@ -710,7 +710,7 @@ process_requests:
+ req = list_first_entry(&group->pending_list, struct mcast_req,
+ group_list);
+ sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+- req_join_state = sa_data->scope_join_state & 0x7;
++ req_join_state = sa_data->scope_join_state & 0xf;
+
+ /* For a leave request, we will immediately answer the VF, and
+ * update our internal counters. The actual leave will be sent
--- /dev/null
+From 8ec07bf8a8b57d6c58927a16a0a22c0115cf2855 Mon Sep 17 00:00:00 2001
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Date: Mon, 12 Sep 2016 19:16:20 +0300
+Subject: IB/mlx4: Use correct subnet-prefix in QP1 mads under SR-IOV
+
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+
+commit 8ec07bf8a8b57d6c58927a16a0a22c0115cf2855 upstream.
+
+When sending QP1 MAD packets which use a GRH, the source GID
+(which consists of the 64-bit subnet prefix, and the 64 bit port GUID)
+must be included in the packet GRH.
+
+For SR-IOV, a GID cache is used, since the source GID needs to be the
+slave's source GID, and not the Hypervisor's GID. This cache also
+included a subnet_prefix. Unfortunately, the subnet_prefix field in
+the cache was never initialized (to the default subnet prefix 0xfe80::0).
+As a result, this field remained all zeroes. Therefore, when SR-IOV
+was active, all QP1 packets which included a GRH had a source GID
+subnet prefix of all-zeroes.
+
+However, the subnet-prefix should initially be 0xfe80::0 (the default
+subnet prefix). In addition, if OpenSM modifies a port's subnet prefix,
+the new subnet prefix must be used in the GRH when sending QP1 packets.
+To fix this we now initialize the subnet prefix in the SR-IOV GID cache
+to the default subnet prefix. We update the cached value if/when OpenSM
+modifies the port's subnet prefix. We take this cached value when sending
+QP1 packets when SR-IOV is active.
+
+Note that the value is stored as an atomic64. This eliminates any need
+for locking when the subnet prefix is being updated.
+
+Note also that we depend on the FW generating the "port management change"
+event for tracking subnet-prefix changes performed by OpenSM. If running
+early FW (before 2.9.4630), subnet prefix changes will not be tracked (but
+the default subnet prefix still will be stored in the cache; therefore
+users who do not modify the subnet prefix will not have a problem).
+IF there is a need for such tracking also for early FW, we will add that
+capability in a subsequent patch.
+
+Fixes: 1ffeb2eb8be9 ("IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support")
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx4/mad.c | 23 +++++++++++++++++++++++
+ drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 +-
+ drivers/infiniband/hw/mlx4/qp.c | 5 +++--
+ 3 files changed, 27 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struc
+
+ /* Generate GUID changed event */
+ if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
++ if (mlx4_is_master(dev->dev)) {
++ union ib_gid gid;
++ int err = 0;
++
++ if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
++ err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
++ else
++ gid.global.subnet_prefix =
++ eqe->event.port_mgmt_change.params.port_info.gid_prefix;
++ if (err) {
++ pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
++ port, err);
++ } else {
++ pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
++ port,
++ (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
++ be64_to_cpu(gid.global.subnet_prefix));
++ atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
++ be64_to_cpu(gid.global.subnet_prefix));
++ }
++ }
+ mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
+ /*if master, notify all slaves*/
+ if (mlx4_is_master(dev->dev))
+@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_de
+ if (err)
+ goto demux_err;
+ dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
++ atomic64_set(&dev->sriov.demux[i].subnet_prefix,
++ be64_to_cpu(gid.global.subnet_prefix));
+ err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
+ &dev->sriov.sqps[i]);
+ if (err)
+--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
+ struct workqueue_struct *wq;
+ struct workqueue_struct *ud_wq;
+ spinlock_t ud_lock;
+- __be64 subnet_prefix;
++ atomic64_t subnet_prefix;
+ __be64 guid_cache[128];
+ struct mlx4_ib_dev *dev;
+ /* the following lock protects both mcg_table and mcg_mgid0_list */
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2501,8 +2501,9 @@ static int build_mlx_header(struct mlx4_
+ * we must use our own cache
+ */
+ sqp->ud_header.grh.source_gid.global.subnet_prefix =
+- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+- subnet_prefix;
++ cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
++ demux[sqp->qp.port - 1].
++ subnet_prefix)));
+ sqp->ud_header.grh.source_gid.global.interface_id =
+ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+ guid_cache[ah->av.ib.gid_index];
--- /dev/null
+From 7fae6655a0c897875bd34501ec092232b526d3e4 Mon Sep 17 00:00:00 2001
+From: Noa Osherovich <noaos@mellanox.com>
+Date: Mon, 12 Sep 2016 19:16:23 +0300
+Subject: IB/mlx5: Enable MAD_IFC commands for IB ports only
+
+From: Noa Osherovich <noaos@mellanox.com>
+
+commit 7fae6655a0c897875bd34501ec092232b526d3e4 upstream.
+
+MAD_IFC command is supported only for physical functions (PF)
+and when physical port is IB. The proposed fix enforces it.
+
+Fixes: d603c809ef91 ("IB/mlx5: Fix decision on using MAD_IFC")
+Reported-by: David Chang <dchang@suse.com>
+Signed-off-by: Noa Osherovich <noaos@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -287,7 +287,9 @@ __be16 mlx5_get_roce_udp_sport(struct ml
+
+ static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+ {
+- return !MLX5_CAP_GEN(dev->mdev, ib_virt);
++ if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
++ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
++ return 0;
+ }
+
+ enum {
--- /dev/null
+From ee3da804ad1b1dd4c766199a6e8443542b0aaaef Mon Sep 17 00:00:00 2001
+From: Maor Gottlieb <maorg@mellanox.com>
+Date: Mon, 12 Sep 2016 19:16:24 +0300
+Subject: IB/mlx5: Set source mac address in FTE
+
+From: Maor Gottlieb <maorg@mellanox.com>
+
+commit ee3da804ad1b1dd4c766199a6e8443542b0aaaef upstream.
+
+Set the source mac address in the FTE when L2 specification
+is provided.
+
+Fixes: 038d2ef87572 ('IB/mlx5: Add flow steering support')
+Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/main.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1277,6 +1277,13 @@ static int parse_flow_attr(u32 *match_c,
+ dmac_47_16),
+ ib_spec->eth.val.dst_mac);
+
++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
++ smac_47_16),
++ ib_spec->eth.mask.src_mac);
++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
++ smac_47_16),
++ ib_spec->eth.val.src_mac);
++
+ if (ib_spec->eth.mask.vlan_tag) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ vlan_tag, 1);
--- /dev/null
+From 1bc8da4e143c0fd8807e061a66d91d5972601ab1 Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Thu, 4 Aug 2016 17:22:16 +0200
+Subject: regmap: rbtree: Avoid overlapping nodes
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit 1bc8da4e143c0fd8807e061a66d91d5972601ab1 upstream.
+
+When searching for a suitable node that should be used for inserting a new
+register, which does not fall within the range of any existing node, we not
+only looks for nodes which are directly adjacent to the new register, but
+for nodes within a certain proximity. This is done to avoid creating lots
+of small nodes with just a few registers spacing in between, which would
+increase memory usage as well as tree traversal time.
+
+This means there might be multiple node candidates which fall within the
+proximity range of the new register. If we choose the first node we
+encounter, under certain register insertion patterns it is possible to end
+up with overlapping ranges. This will break order in the rbtree and can
+cause the cached register value to become corrupted.
+
+E.g. take the simplified example where the proximity range is 2 and the
+register insertion sequence is 1, 4, 2, 3, 5.
+ * Insert of register 1 creates a new node, this is the root of the rbtree
+ * Insert of register 4 creates a new node, which is inserted to the right
+ of the root.
+ * Insert of register 2 gets inserted to the first node
+ * Insert of register 3 gets inserted to the first node
+ * Insert of register 5 also gets inserted into the first node since
+ this is the first node encountered and it is within the proximity range.
+ Now there are two overlapping nodes.
+
+To avoid this always choose the node that is closest to the new register.
+This will ensure that nodes will not overlap. The tree traversal is still
+done as a binary search, we just don't stop at the first node found. So the
+complexity of the algorithm stays within the same order.
+
+Ideally if a new register is in the range of two adjacent blocks those
+blocks should be merged, but that is a much more invasive change and left
+for later.
+
+The issue was initially introduced in commit 472fdec7380c ("regmap: rbtree:
+Reduce number of nodes, take 2"), but became much more exposed by commit
+6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target
+node") which changed the order in which nodes are looked-up.
+
+Fixes: 6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target node")
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/regmap/regcache-rbtree.c | 38 +++++++++++++++++++++++++---------
+ 1 file changed, 28 insertions(+), 10 deletions(-)
+
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct
+ unsigned int new_base_reg, new_top_reg;
+ unsigned int min, max;
+ unsigned int max_dist;
++ unsigned int dist, best_dist = UINT_MAX;
+
+ max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
+ map->cache_word_size;
+@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct
+ &base_reg, &top_reg);
+
+ if (base_reg <= max && top_reg >= min) {
+- new_base_reg = min(reg, base_reg);
+- new_top_reg = max(reg, top_reg);
+- } else {
+- if (max < base_reg)
+- node = node->rb_left;
++ if (reg < base_reg)
++ dist = base_reg - reg;
++ else if (reg > top_reg)
++ dist = reg - top_reg;
+ else
+- node = node->rb_right;
+-
+- continue;
++ dist = 0;
++ if (dist < best_dist) {
++ rbnode = rbnode_tmp;
++ best_dist = dist;
++ new_base_reg = min(reg, base_reg);
++ new_top_reg = max(reg, top_reg);
++ }
+ }
+
+- ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
++ /*
++ * Keep looking, we want to choose the closest block,
++ * otherwise we might end up creating overlapping
++ * blocks, which breaks the rbtree.
++ */
++ if (reg < base_reg)
++ node = node->rb_left;
++ else if (reg > top_reg)
++ node = node->rb_right;
++ else
++ break;
++ }
++
++ if (rbnode) {
++ ret = regcache_rbtree_insert_to_block(map, rbnode,
+ new_base_reg,
+ new_top_reg, reg,
+ value);
+ if (ret)
+ return ret;
+- rbtree_ctx->cached_rbnode = rbnode_tmp;
++ rbtree_ctx->cached_rbnode = rbnode;
+ return 0;
+ }
+
--- /dev/null
+From 835831c57e9b0cccc24e96a812542875471d75b5 Mon Sep 17 00:00:00 2001
+From: Johannes Thumshirn <jthumshirn@suse.de>
+Date: Wed, 17 Aug 2016 11:46:17 +0200
+Subject: scsi: ses: use scsi_is_sas_rphy instead of is_sas_attached
+
+From: Johannes Thumshirn <jthumshirn@suse.de>
+
+commit 835831c57e9b0cccc24e96a812542875471d75b5 upstream.
+
+Use scsi_is_sas_rphy() instead of is_sas_attached() to decide whether we
+should obtain the SAS address from a scsi device or not. This will
+prevent us from tripping on the BUG_ON() in sas_sdev_to_rdev() if the
+rphy isn't attached to the SAS transport class, like it is with hpsa's
+logical devices.
+
+Fixes: 3f8d6f2a0 ('ses: fix discovery of SATA devices in SAS enclosures')
+Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de>
+Reviewed-by: James E.J. Bottomley <jejb@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ses.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struc
+
+ ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+
+- if (is_sas_attached(sdev))
++ if (scsi_is_sas_rphy(&sdev->sdev_gendev))
+ efd.addr = sas_get_address(sdev);
+
+ if (efd.addr) {
lib-test_hash.c-fix-warning-in-preprocessor-symbol-evaluation.patch
dmaengine-at_xdmac-fix-to-pass-correct-device-identity-to-free_irq.patch
kvm-nvmx-postpone-vmcs-changes-on-msr_ia32_apicbase-write.patch
+ceph-do-not-modify-fi-frag-in-need_reset_readdir.patch
+ib-ipoib-fix-memory-corruption-in-ipoib-cm-mode-connect-flow.patch
+ath10k-fix-get-rx_status-from-htt-context.patch
+ib-core-fix-use-after-free-in-send_leave-function.patch
+regmap-rbtree-avoid-overlapping-nodes.patch
+scsi-ses-use-scsi_is_sas_rphy-instead-of-is_sas_attached.patch
+ib-ipoib-don-t-allow-mc-joins-during-light-mc-flush.patch
+ib-mlx4-fix-incorrect-mc-join-state-bit-masking-on-sr-iov.patch
+ib-mlx4-fix-code-indentation-in-qp1-mad-flow.patch
+ib-mlx4-use-correct-subnet-prefix-in-qp1-mads-under-sr-iov.patch
+ib-mlx5-enable-mad_ifc-commands-for-ib-ports-only.patch
+ib-mlx5-set-source-mac-address-in-fte.patch
+batman-adv-remove-unused-callback-from-batadv_algo_ops-struct.patch
+tpm_crb-drop-struct-resource-res-from-struct-crb_priv.patch
+tpm_crb-fix-mapping-of-the-buffers.patch
+aio-mark-aio-pseudo-fs-noexec.patch
+dm-log-writes-fix-bug-with-too-large-bios.patch
--- /dev/null
+From 14ddfbf488a0223b19abf7e4634e6e676a91a12d Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Date: Tue, 15 Mar 2016 21:41:40 +0200
+Subject: tpm_crb: drop struct resource res from struct crb_priv
+
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+
+commit 14ddfbf488a0223b19abf7e4634e6e676a91a12d upstream.
+
+The iomem resource is needed only temporarily so it is better to pass
+it on instead of storing it permanently. Named the variable as io_res
+so that the code better documents itself.
+
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Reviewed-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_crb.c | 29 ++++++++++++++++-------------
+ 1 file changed, 16 insertions(+), 13 deletions(-)
+
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -77,7 +77,6 @@ enum crb_flags {
+
+ struct crb_priv {
+ unsigned int flags;
+- struct resource res;
+ void __iomem *iobase;
+ struct crb_control_area __iomem *cca;
+ u8 __iomem *cmd;
+@@ -224,19 +223,19 @@ static int crb_init(struct acpi_device *
+
+ static int crb_check_resource(struct acpi_resource *ares, void *data)
+ {
+- struct crb_priv *priv = data;
++ struct resource *io_res = data;
+ struct resource res;
+
+ if (acpi_dev_resource_memory(ares, &res)) {
+- priv->res = res;
+- priv->res.name = NULL;
++ *io_res = res;
++ io_res->name = NULL;
+ }
+
+ return 1;
+ }
+
+ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+- u64 start, u32 size)
++ struct resource *io_res, u64 start, u32 size)
+ {
+ struct resource new_res = {
+ .start = start,
+@@ -248,50 +247,54 @@ static void __iomem *crb_map_res(struct
+ if (start != new_res.start)
+ return (void __iomem *) ERR_PTR(-EINVAL);
+
+- if (!resource_contains(&priv->res, &new_res))
++ if (!resource_contains(io_res, &new_res))
+ return devm_ioremap_resource(dev, &new_res);
+
+- return priv->iobase + (new_res.start - priv->res.start);
++ return priv->iobase + (new_res.start - io_res->start);
+ }
+
+ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf)
+ {
+ struct list_head resources;
++ struct resource io_res;
+ struct device *dev = &device->dev;
+ u64 pa;
+ int ret;
+
+ INIT_LIST_HEAD(&resources);
+ ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
+- priv);
++ &io_res);
+ if (ret < 0)
+ return ret;
+ acpi_dev_free_resource_list(&resources);
+
+- if (resource_type(&priv->res) != IORESOURCE_MEM) {
++ if (resource_type(&io_res) != IORESOURCE_MEM) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ }
+
+- priv->iobase = devm_ioremap_resource(dev, &priv->res);
++ priv->iobase = devm_ioremap_resource(dev, &io_res);
+ if (IS_ERR(priv->iobase))
+ return PTR_ERR(priv->iobase);
+
+- priv->cca = crb_map_res(dev, priv, buf->control_address, 0x1000);
++ priv->cca = crb_map_res(dev, priv, &io_res, buf->control_address,
++ 0x1000);
+ if (IS_ERR(priv->cca))
+ return PTR_ERR(priv->cca);
+
+ pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
+ (u64) ioread32(&priv->cca->cmd_pa_low);
+- priv->cmd = crb_map_res(dev, priv, pa, ioread32(&priv->cca->cmd_size));
++ priv->cmd = crb_map_res(dev, priv, &io_res, pa,
++ ioread32(&priv->cca->cmd_size));
+ if (IS_ERR(priv->cmd))
+ return PTR_ERR(priv->cmd);
+
+ memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
+ pa = le64_to_cpu(pa);
+- priv->rsp = crb_map_res(dev, priv, pa, ioread32(&priv->cca->rsp_size));
++ priv->rsp = crb_map_res(dev, priv, &io_res, pa,
++ ioread32(&priv->cca->rsp_size));
+ return PTR_ERR_OR_ZERO(priv->rsp);
+ }
+
--- /dev/null
+From 422eac3f7deae34dbaffd08e03e27f37a5394a56 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Date: Tue, 19 Apr 2016 12:54:18 +0300
+Subject: tpm_crb: fix mapping of the buffers
+
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+
+commit 422eac3f7deae34dbaffd08e03e27f37a5394a56 upstream.
+
+On my Lenovo x250 the following situation occurs:
+
+[18697.813871] tpm_crb MSFT0101:00: can't request region for resource
+[mem 0xacdff080-0xacdfffff]
+
+The mapping of the control area overlaps the mapping of the command
+buffer. The control area is mapped over page, which is not right. It
+should mapped over sizeof(struct crb_control_area).
+
+Fixing this issue unmasks another issue. Command and response buffers
+can overlap and they do interleave on this machine. According to the PTP
+specification the overlapping means that they are mapped to the same
+buffer.
+
+The commit has been also on a Haswell NUC where things worked before
+applying this fix so that the both code paths for response buffer
+initialization are tested.
+
+Cc: stable@vger.kernel.org
+Fixes: 1bd047be37d9 ("tpm_crb: Use devm_ioremap_resource")
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Reviewed-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_crb.c | 39 ++++++++++++++++++++++++++++-----------
+ 1 file changed, 28 insertions(+), 11 deletions(-)
+
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -259,7 +259,10 @@ static int crb_map_io(struct acpi_device
+ struct list_head resources;
+ struct resource io_res;
+ struct device *dev = &device->dev;
+- u64 pa;
++ u64 cmd_pa;
++ u32 cmd_size;
++ u64 rsp_pa;
++ u32 rsp_size;
+ int ret;
+
+ INIT_LIST_HEAD(&resources);
+@@ -280,22 +283,36 @@ static int crb_map_io(struct acpi_device
+ return PTR_ERR(priv->iobase);
+
+ priv->cca = crb_map_res(dev, priv, &io_res, buf->control_address,
+- 0x1000);
++ sizeof(struct crb_control_area));
+ if (IS_ERR(priv->cca))
+ return PTR_ERR(priv->cca);
+
+- pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
+- (u64) ioread32(&priv->cca->cmd_pa_low);
+- priv->cmd = crb_map_res(dev, priv, &io_res, pa,
+- ioread32(&priv->cca->cmd_size));
++ cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
++ (u64) ioread32(&priv->cca->cmd_pa_low);
++ cmd_size = ioread32(&priv->cca->cmd_size);
++ priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
+ if (IS_ERR(priv->cmd))
+ return PTR_ERR(priv->cmd);
+
+- memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
+- pa = le64_to_cpu(pa);
+- priv->rsp = crb_map_res(dev, priv, &io_res, pa,
+- ioread32(&priv->cca->rsp_size));
+- return PTR_ERR_OR_ZERO(priv->rsp);
++ memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8);
++ rsp_pa = le64_to_cpu(rsp_pa);
++ rsp_size = ioread32(&priv->cca->rsp_size);
++
++ if (cmd_pa != rsp_pa) {
++ priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
++ return PTR_ERR_OR_ZERO(priv->rsp);
++ }
++
++ /* According to the PTP specification, overlapping command and response
++ * buffer sizes must be identical.
++ */
++ if (cmd_size != rsp_size) {
++ dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
++ return -EINVAL;
++ }
++
++ priv->rsp = priv->cmd;
++ return 0;
+ }
+
+ static int crb_acpi_add(struct acpi_device *device)