--- /dev/null
+From 22f6b4d34fcf039c63a94e7670e0da24f8575a5a Mon Sep 17 00:00:00 2001
+From: Jann Horn <jann@thejh.net>
+Date: Fri, 16 Sep 2016 00:31:22 +0200
+Subject: aio: mark AIO pseudo-fs noexec
+
+From: Jann Horn <jann@thejh.net>
+
+commit 22f6b4d34fcf039c63a94e7670e0da24f8575a5a upstream.
+
+This ensures that do_mmap() won't implicitly make AIO memory mappings
+executable if the READ_IMPLIES_EXEC personality flag is set. Such
+behavior is problematic because the security_mmap_file LSM hook doesn't
+catch this case, potentially permitting an attacker to bypass a W^X
+policy enforced by SELinux.
+
+I have tested the patch on my machine.
+
+To test the behavior, compile and run this:
+
+ #define _GNU_SOURCE
+ #include <unistd.h>
+ #include <sys/personality.h>
+ #include <linux/aio_abi.h>
+ #include <err.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <sys/syscall.h>
+
+ int main(void) {
+ personality(READ_IMPLIES_EXEC);
+ aio_context_t ctx = 0;
+ if (syscall(__NR_io_setup, 1, &ctx))
+ err(1, "io_setup");
+
+ char cmd[1000];
+ sprintf(cmd, "cat /proc/%d/maps | grep -F '/[aio]'",
+ (int)getpid());
+ system(cmd);
+ return 0;
+ }
+
+In the output, "rw-s" is good, "rwxs" is bad.
+
+Signed-off-by: Jann Horn <jann@thejh.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct f
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+- return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
++ struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
++ AIO_RING_MAGIC);
++
++ if (!IS_ERR(root))
++ root->d_sb->s_iflags |= SB_I_NOEXEC;
++ return root;
+ }
+
+ /* aio_setup
--- /dev/null
+From d9f179877e50ae2681fe7b0b83e0d9f63b6165ad Mon Sep 17 00:00:00 2001
+From: Marek Lindner <mareklindner@neomailbox.ch>
+Date: Mon, 2 May 2016 21:58:50 +0800
+Subject: batman-adv: remove unused callback from batadv_algo_ops struct
+
+From: Marek Lindner <mareklindner@neomailbox.ch>
+
+commit d9f179877e50ae2681fe7b0b83e0d9f63b6165ad upstream.
+
+Reported-by: Lars Bußmann <ffsoest@kill-you.net>
+Signed-off-by: Marek Lindner <mareklindner@neomailbox.ch>
+[sven@narfation.org: rewritten commit message to make clear that it is an
+ bugfix to an user reported crash]
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/batman-adv/originator.c | 6 ------
+ net/batman-adv/types.h | 3 ---
+ 2 files changed, 9 deletions(-)
+
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -197,18 +197,12 @@ static void batadv_neigh_node_release(st
+ {
+ struct hlist_node *node_tmp;
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+- struct batadv_algo_ops *bao;
+-
+- bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
+
+ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+ &neigh_node->ifinfo_list, list) {
+ batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+ }
+
+- if (bao->bat_neigh_free)
+- bao->bat_neigh_free(neigh_node);
+-
+ batadv_hardif_free_ref(neigh_node->if_incoming);
+
+ kfree_rcu(neigh_node, rcu);
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -1136,8 +1136,6 @@ struct batadv_forw_packet {
+ * @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or better
+ * than neigh2 for their respective outgoing interface from the metric
+ * prospective
+- * @bat_neigh_free: free the resources allocated by the routing algorithm for a
+- * neigh_node object
+ * @bat_orig_print: print the originator table (optional)
+ * @bat_orig_free: free the resources allocated by the routing algorithm for an
+ * orig_node object
+@@ -1165,7 +1163,6 @@ struct batadv_algo_ops {
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2);
+- void (*bat_neigh_free)(struct batadv_neigh_node *neigh);
+ /* orig_node handling API */
+ void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq,
+ struct batadv_hard_iface *hard_iface);
--- /dev/null
+From 0f4c7a138dfefb0ebdbaf56e3ba2acd2958a6605 Mon Sep 17 00:00:00 2001
+From: Loc Ho <lho@apm.com>
+Date: Mon, 29 Feb 2016 14:15:43 -0700
+Subject: clk: xgene: Add missing parenthesis when clearing divider value
+
+From: Loc Ho <lho@apm.com>
+
+commit 0f4c7a138dfefb0ebdbaf56e3ba2acd2958a6605 upstream.
+
+In the initial fix for non-zero divider shift value, the parenthesis
+was missing after the negate operation. This patch adds the required
+parenthesis. Otherwise, lower bits may be cleared unintentionally.
+
+Signed-off-by: Loc Ho <lho@apm.com>
+Acked-by: Toan Le <toanle@apm.com>
+Fixes: 1382ea631ddd ("clk: xgene: Fix divider with non-zero shift value")
+Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/clk-xgene.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/clk/clk-xgene.c
++++ b/drivers/clk/clk-xgene.c
+@@ -351,8 +351,8 @@ static int xgene_clk_set_rate(struct clk
+ /* Set new divider */
+ data = xgene_clk_read(pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
+- data &= ~((1 << pclk->param.reg_divider_width) - 1)
+- << pclk->param.reg_divider_shift;
++ data &= ~(((1 << pclk->param.reg_divider_width) - 1)
++ << pclk->param.reg_divider_shift);
+ data |= divider;
+ xgene_clk_write(data, pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
--- /dev/null
+From 7efb367320f56fc4d549875b6f3a6940018ef2e5 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 30 Aug 2016 16:20:55 -0400
+Subject: dm log writes: fix bug with too large bios
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 7efb367320f56fc4d549875b6f3a6940018ef2e5 upstream.
+
+bio_alloc() can allocate a bio with at most BIO_MAX_PAGES (256) vector
+entries. However, the incoming bio may have more vector entries if it
+was allocated by other means. For example, bcache submits bios with
+more than BIO_MAX_PAGES entries. This results in bio_alloc() failure.
+
+To avoid the failure, change the code so that it allocates bio with at
+most BIO_MAX_PAGES entries. If the incoming bio has more entries,
+bio_add_page() will fail and a new bio will be allocated - the code that
+handles bio_add_page() failure already exists in the dm-log-writes
+target.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Josef Bacik <jbacik@fb,com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-log-writes.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-log-writes.c
++++ b/drivers/md/dm-log-writes.c
+@@ -259,7 +259,7 @@ static int log_one_block(struct log_writ
+ sector++;
+
+ atomic_inc(&lc->io_blocks);
+- bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
++ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+@@ -280,7 +280,7 @@ static int log_one_block(struct log_writ
+ if (ret != block->vecs[i].bv_len) {
+ atomic_inc(&lc->io_blocks);
+ submit_bio(WRITE, bio);
+- bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
++ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
--- /dev/null
+From 68c6bcdd8bd00394c234b915ab9b97c74104130c Mon Sep 17 00:00:00 2001
+From: Erez Shitrit <erezsh@mellanox.com>
+Date: Sun, 28 Aug 2016 10:58:30 +0300
+Subject: IB/core: Fix use after free in send_leave function
+
+From: Erez Shitrit <erezsh@mellanox.com>
+
+commit 68c6bcdd8bd00394c234b915ab9b97c74104130c upstream.
+
+The function send_leave sets the member: group->query_id
+(group->query_id = ret) after calling the sa_query, but leave_handler
+can be executed before the setting and it might delete the group object,
+and will get a memory corruption.
+
+Additionally, this patch gets rid of group->query_id variable which is
+not used.
+
+Fixes: faec2f7b96b5 ('IB/sa: Track multicast join/leave requests')
+Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/multicast.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+--- a/drivers/infiniband/core/multicast.c
++++ b/drivers/infiniband/core/multicast.c
+@@ -106,7 +106,6 @@ struct mcast_group {
+ atomic_t refcount;
+ enum mcast_group_state state;
+ struct ib_sa_query *query;
+- int query_id;
+ u16 pkey_index;
+ u8 leave_state;
+ int retries;
+@@ -339,11 +338,7 @@ static int send_join(struct mcast_group
+ member->multicast.comp_mask,
+ 3000, GFP_KERNEL, join_handler, group,
+ &group->query);
+- if (ret >= 0) {
+- group->query_id = ret;
+- ret = 0;
+- }
+- return ret;
++ return (ret > 0) ? 0 : ret;
+ }
+
+ static int send_leave(struct mcast_group *group, u8 leave_state)
+@@ -363,11 +358,7 @@ static int send_leave(struct mcast_group
+ IB_SA_MCMEMBER_REC_JOIN_STATE,
+ 3000, GFP_KERNEL, leave_handler,
+ group, &group->query);
+- if (ret >= 0) {
+- group->query_id = ret;
+- ret = 0;
+- }
+- return ret;
++ return (ret > 0) ? 0 : ret;
+ }
+
+ static void join_group(struct mcast_group *group, struct mcast_member *member,
--- /dev/null
+From 344bacca8cd811809fc33a249f2738ab757d327f Mon Sep 17 00:00:00 2001
+From: Alex Vesker <valex@mellanox.com>
+Date: Mon, 12 Sep 2016 09:55:28 +0300
+Subject: IB/ipoib: Don't allow MC joins during light MC flush
+
+From: Alex Vesker <valex@mellanox.com>
+
+commit 344bacca8cd811809fc33a249f2738ab757d327f upstream.
+
+This fix solves a race between light flush and on the fly joins.
+Light flush doesn't set the device to down and unset IPOIB_OPER_UP
+flag, this means that if while flushing we have a MC join in progress
+and the QP was attached to BC MGID we can have a mismatches when
+re-attaching a QP to the BC MGID.
+
+The light flush would set the broadcast group to NULL causing an on
+the fly join to rejoin and reattach to the BC MCG as well as adding
+the BC MGID to the multicast list. The flush process would later on
+remove the BC MGID and detach it from the QP. On the next flush
+the BC MGID is present in the multicast list but not found when trying
+to detach it because of the previous double attach and single detach.
+
+[18332.714265] ------------[ cut here ]------------
+[18332.717775] WARNING: CPU: 6 PID: 3767 at drivers/infiniband/core/verbs.c:280 ib_dealloc_pd+0xff/0x120 [ib_core]
+...
+[18332.775198] Hardware name: Red Hat KVM, BIOS Bochs 01/01/2011
+[18332.779411] 0000000000000000 ffff8800b50dfbb0 ffffffff813fed47 0000000000000000
+[18332.784960] 0000000000000000 ffff8800b50dfbf0 ffffffff8109add1 0000011832f58300
+[18332.790547] ffff880226a596c0 ffff880032482000 ffff880032482830 ffff880226a59280
+[18332.796199] Call Trace:
+[18332.798015] [<ffffffff813fed47>] dump_stack+0x63/0x8c
+[18332.801831] [<ffffffff8109add1>] __warn+0xd1/0xf0
+[18332.805403] [<ffffffff8109aebd>] warn_slowpath_null+0x1d/0x20
+[18332.809706] [<ffffffffa025d90f>] ib_dealloc_pd+0xff/0x120 [ib_core]
+[18332.814384] [<ffffffffa04f3d7c>] ipoib_transport_dev_cleanup+0xfc/0x1d0 [ib_ipoib]
+[18332.820031] [<ffffffffa04ed648>] ipoib_ib_dev_cleanup+0x98/0x110 [ib_ipoib]
+[18332.825220] [<ffffffffa04e62c8>] ipoib_dev_cleanup+0x2d8/0x550 [ib_ipoib]
+[18332.830290] [<ffffffffa04e656f>] ipoib_uninit+0x2f/0x40 [ib_ipoib]
+[18332.834911] [<ffffffff81772a8a>] rollback_registered_many+0x1aa/0x2c0
+[18332.839741] [<ffffffff81772bd1>] rollback_registered+0x31/0x40
+[18332.844091] [<ffffffff81773b18>] unregister_netdevice_queue+0x48/0x80
+[18332.848880] [<ffffffffa04f489b>] ipoib_vlan_delete+0x1fb/0x290 [ib_ipoib]
+[18332.853848] [<ffffffffa04df1cd>] delete_child+0x7d/0xf0 [ib_ipoib]
+[18332.858474] [<ffffffff81520c08>] dev_attr_store+0x18/0x30
+[18332.862510] [<ffffffff8127fe4a>] sysfs_kf_write+0x3a/0x50
+[18332.866349] [<ffffffff8127f4e0>] kernfs_fop_write+0x120/0x170
+[18332.870471] [<ffffffff81207198>] __vfs_write+0x28/0xe0
+[18332.874152] [<ffffffff810e09bf>] ? percpu_down_read+0x1f/0x50
+[18332.878274] [<ffffffff81208062>] vfs_write+0xa2/0x1a0
+[18332.881896] [<ffffffff812093a6>] SyS_write+0x46/0xa0
+[18332.885632] [<ffffffff810039b7>] do_syscall_64+0x57/0xb0
+[18332.889709] [<ffffffff81883321>] entry_SYSCALL64_slow_path+0x25/0x25
+[18332.894727] ---[ end trace 09ebbe31f831ef17 ]---
+
+Fixes: ee1e2c82c245 ("IPoIB: Refresh paths instead of flushing them on SM change events")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib_ib.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -1028,8 +1028,17 @@ static void __ipoib_ib_dev_flush(struct
+ }
+
+ if (level == IPOIB_FLUSH_LIGHT) {
++ int oper_up;
+ ipoib_mark_paths_invalid(dev);
++ /* Set IPoIB operation as down to prevent races between:
++ * the flush flow which leaves MCG and on the fly joins
++ * which can happen during that time. mcast restart task
++ * should deal with join requests we missed.
++ */
++ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_mcast_dev_flush(dev);
++ if (oper_up)
++ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_flush_ah(dev);
+ }
+
--- /dev/null
+From 546481c2816ea3c061ee9d5658eb48070f69212e Mon Sep 17 00:00:00 2001
+From: Erez Shitrit <erezsh@mellanox.com>
+Date: Sun, 28 Aug 2016 10:58:31 +0300
+Subject: IB/ipoib: Fix memory corruption in ipoib cm mode connect flow
+
+From: Erez Shitrit <erezsh@mellanox.com>
+
+commit 546481c2816ea3c061ee9d5658eb48070f69212e upstream.
+
+When a new CM connection is being requested, ipoib driver copies data
+from the path pointer in the CM/tx object, the path object might be
+invalid at the point and memory corruption will happened later when now
+the CM driver will try using that data.
+
+The next scenario demonstrates it:
+ neigh_add_path --> ipoib_cm_create_tx -->
+ queue_work (pointer to path is in the cm/tx struct)
+ #while the work is still in the queue,
+ #the port goes down and causes the ipoib_flush_paths:
+ ipoib_flush_paths --> path_free --> kfree(path)
+ #at this point the work scheduled starts.
+ ipoib_cm_tx_start --> copy from the (invalid)path pointer:
+ (memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);)
+ -> memory corruption.
+
+To fix that the driver now starts the CM/tx connection only if that
+specific path exists in the general paths database.
+This check is protected with the relevant locks, and uses the gid from
+the neigh member in the CM/tx object which is valid according to the ref
+count that was taken by the CM/tx.
+
+Fixes: 839fcaba35 ('IPoIB: Connected mode experimental support')
+Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib.h | 1 +
+ drivers/infiniband/ulp/ipoib/ipoib_cm.c | 16 ++++++++++++++++
+ drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +-
+ 3 files changed, 18 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -472,6 +472,7 @@ void ipoib_send(struct net_device *dev,
+ struct ipoib_ah *address, u32 qpn);
+ void ipoib_reap_ah(struct work_struct *work);
+
++struct ipoib_path *__path_find(struct net_device *dev, void *gid);
+ void ipoib_mark_paths_invalid(struct net_device *dev);
+ void ipoib_flush_paths(struct net_device *dev);
+ struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1299,6 +1299,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm
+ }
+ }
+
++#define QPN_AND_OPTIONS_OFFSET 4
++
+ static void ipoib_cm_tx_start(struct work_struct *work)
+ {
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+@@ -1307,6 +1309,7 @@ static void ipoib_cm_tx_start(struct wor
+ struct ipoib_neigh *neigh;
+ struct ipoib_cm_tx *p;
+ unsigned long flags;
++ struct ipoib_path *path;
+ int ret;
+
+ struct ib_sa_path_rec pathrec;
+@@ -1319,7 +1322,19 @@ static void ipoib_cm_tx_start(struct wor
+ p = list_entry(priv->cm.start_list.next, typeof(*p), list);
+ list_del_init(&p->list);
+ neigh = p->neigh;
++
+ qpn = IPOIB_QPN(neigh->daddr);
++ /*
++ * As long as the search is with these 2 locks,
++ * path existence indicates its validity.
++ */
++ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
++ if (!path) {
++ pr_info("%s ignore not valid path %pI6\n",
++ __func__,
++ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
++ goto free_neigh;
++ }
+ memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1331,6 +1346,7 @@ static void ipoib_cm_tx_start(struct wor
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ret) {
++free_neigh:
+ neigh = p->neigh;
+ if (neigh) {
+ neigh->cm = NULL;
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -481,7 +481,7 @@ int ipoib_set_mode(struct net_device *de
+ return -EINVAL;
+ }
+
+-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
++struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct rb_node *n = priv->path_tree.rb_node;
--- /dev/null
+From baa0be7026e2f7d1d40bfd45909044169e9e3c68 Mon Sep 17 00:00:00 2001
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Date: Mon, 12 Sep 2016 19:16:19 +0300
+Subject: IB/mlx4: Fix code indentation in QP1 MAD flow
+
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+
+commit baa0be7026e2f7d1d40bfd45909044169e9e3c68 upstream.
+
+The indentation in the QP1 GRH flow in procedure build_mlx_header is
+really confusing. Fix it, in preparation for a commit which touches
+this code.
+
+Fixes: 1ffeb2eb8be9 ("IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support")
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx4/qp.c | 36 +++++++++++++++++++-----------------
+ 1 file changed, 19 insertions(+), 17 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2331,24 +2331,26 @@ static int build_mlx_header(struct mlx4_
+ sqp->ud_header.grh.flow_label =
+ ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
+ sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
+- if (is_eth)
++ if (is_eth) {
+ memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
+- else {
+- if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+- /* When multi-function is enabled, the ib_core gid
+- * indexes don't necessarily match the hw ones, so
+- * we must use our own cache */
+- sqp->ud_header.grh.source_gid.global.subnet_prefix =
+- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+- subnet_prefix;
+- sqp->ud_header.grh.source_gid.global.interface_id =
+- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+- guid_cache[ah->av.ib.gid_index];
+- } else
+- ib_get_cached_gid(ib_dev,
+- be32_to_cpu(ah->av.ib.port_pd) >> 24,
+- ah->av.ib.gid_index,
+- &sqp->ud_header.grh.source_gid, NULL);
++ } else {
++ if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
++ /* When multi-function is enabled, the ib_core gid
++ * indexes don't necessarily match the hw ones, so
++ * we must use our own cache
++ */
++ sqp->ud_header.grh.source_gid.global.subnet_prefix =
++ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
++ subnet_prefix;
++ sqp->ud_header.grh.source_gid.global.interface_id =
++ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
++ guid_cache[ah->av.ib.gid_index];
++ } else {
++ ib_get_cached_gid(ib_dev,
++ be32_to_cpu(ah->av.ib.port_pd) >> 24,
++ ah->av.ib.gid_index,
++ &sqp->ud_header.grh.source_gid, NULL);
++ }
+ }
+ memcpy(sqp->ud_header.grh.destination_gid.raw,
+ ah->av.ib.dgid, 16);
--- /dev/null
+From e5ac40cd66c2f3cd11bc5edc658f012661b16347 Mon Sep 17 00:00:00 2001
+From: Alex Vesker <valex@mellanox.com>
+Date: Mon, 12 Sep 2016 19:16:18 +0300
+Subject: IB/mlx4: Fix incorrect MC join state bit-masking on SR-IOV
+
+From: Alex Vesker <valex@mellanox.com>
+
+commit e5ac40cd66c2f3cd11bc5edc658f012661b16347 upstream.
+
+Because of an incorrect bit-masking done on the join state bits, when
+handling a join request we failed to detect a difference between the
+group join state and the request join state when joining as send only
+full member (0x8). This caused the MC join request not to be sent.
+This issue is relevant only when SRIOV is enabled and SM supports
+send only full member.
+
+This fix separates scope bits and join states bits a nibble each.
+
+Fixes: b9c5d6a64358 ('IB/mlx4: Add multicast group (MCG) paravirtualization for SR-IOV')
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx4/mcg.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx4/mcg.c
++++ b/drivers/infiniband/hw/mlx4/mcg.c
+@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_g
+ if (!group->members[i])
+ leave_state |= (1 << i);
+
+- return leave_state & (group->rec.scope_join_state & 7);
++ return leave_state & (group->rec.scope_join_state & 0xf);
+ }
+
+ static int join_group(struct mcast_group *group, int slave, u8 join_mask)
+@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(
+ } else
+ mcg_warn_group(group, "DRIVER BUG\n");
+ } else if (group->state == MCAST_LEAVE_SENT) {
+- if (group->rec.scope_join_state & 7)
+- group->rec.scope_join_state &= 0xf8;
++ if (group->rec.scope_join_state & 0xf)
++ group->rec.scope_join_state &= 0xf0;
+ group->state = MCAST_IDLE;
+ mutex_unlock(&group->lock);
+ if (release_group(group, 1))
+@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast
+ static int handle_join_req(struct mcast_group *group, u8 join_mask,
+ struct mcast_req *req)
+ {
+- u8 group_join_state = group->rec.scope_join_state & 7;
++ u8 group_join_state = group->rec.scope_join_state & 0xf;
+ int ref = 0;
+ u16 status;
+ struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(str
+ u8 cur_join_state;
+
+ resp_join_state = ((struct ib_sa_mcmember_data *)
+- group->response_sa_mad.data)->scope_join_state & 7;
+- cur_join_state = group->rec.scope_join_state & 7;
++ group->response_sa_mad.data)->scope_join_state & 0xf;
++ cur_join_state = group->rec.scope_join_state & 0xf;
+
+ if (method == IB_MGMT_METHOD_GET_RESP) {
+ /* successfull join */
+@@ -710,7 +710,7 @@ process_requests:
+ req = list_first_entry(&group->pending_list, struct mcast_req,
+ group_list);
+ sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+- req_join_state = sa_data->scope_join_state & 0x7;
++ req_join_state = sa_data->scope_join_state & 0xf;
+
+ /* For a leave request, we will immediately answer the VF, and
+ * update our internal counters. The actual leave will be sent
--- /dev/null
+From 8ec07bf8a8b57d6c58927a16a0a22c0115cf2855 Mon Sep 17 00:00:00 2001
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Date: Mon, 12 Sep 2016 19:16:20 +0300
+Subject: IB/mlx4: Use correct subnet-prefix in QP1 mads under SR-IOV
+
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+
+commit 8ec07bf8a8b57d6c58927a16a0a22c0115cf2855 upstream.
+
+When sending QP1 MAD packets which use a GRH, the source GID
+(which consists of the 64-bit subnet prefix, and the 64 bit port GUID)
+must be included in the packet GRH.
+
+For SR-IOV, a GID cache is used, since the source GID needs to be the
+slave's source GID, and not the Hypervisor's GID. This cache also
+included a subnet_prefix. Unfortunately, the subnet_prefix field in
+the cache was never initialized (to the default subnet prefix 0xfe80::0).
+As a result, this field remained all zeroes. Therefore, when SR-IOV
+was active, all QP1 packets which included a GRH had a source GID
+subnet prefix of all-zeroes.
+
+However, the subnet-prefix should initially be 0xfe80::0 (the default
+subnet prefix). In addition, if OpenSM modifies a port's subnet prefix,
+the new subnet prefix must be used in the GRH when sending QP1 packets.
+To fix this we now initialize the subnet prefix in the SR-IOV GID cache
+to the default subnet prefix. We update the cached value if/when OpenSM
+modifies the port's subnet prefix. We take this cached value when sending
+QP1 packets when SR-IOV is active.
+
+Note that the value is stored as an atomic64. This eliminates any need
+for locking when the subnet prefix is being updated.
+
+Note also that we depend on the FW generating the "port management change"
+event for tracking subnet-prefix changes performed by OpenSM. If running
+early FW (before 2.9.4630), subnet prefix changes will not be tracked (but
+the default subnet prefix still will be stored in the cache; therefore
+users who do not modify the subnet prefix will not have a problem).
+IF there is a need for such tracking also for early FW, we will add that
+capability in a subsequent patch.
+
+Fixes: 1ffeb2eb8be9 ("IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support")
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx4/mad.c | 23 +++++++++++++++++++++++
+ drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 +-
+ drivers/infiniband/hw/mlx4/qp.c | 5 +++--
+ 3 files changed, 27 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -1080,6 +1080,27 @@ void handle_port_mgmt_change_event(struc
+
+ /* Generate GUID changed event */
+ if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
++ if (mlx4_is_master(dev->dev)) {
++ union ib_gid gid;
++ int err = 0;
++
++ if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
++ err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
++ else
++ gid.global.subnet_prefix =
++ eqe->event.port_mgmt_change.params.port_info.gid_prefix;
++ if (err) {
++ pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
++ port, err);
++ } else {
++ pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
++ port,
++ (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
++ be64_to_cpu(gid.global.subnet_prefix));
++ atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
++ be64_to_cpu(gid.global.subnet_prefix));
++ }
++ }
+ mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
+ /*if master, notify all slaves*/
+ if (mlx4_is_master(dev->dev))
+@@ -2154,6 +2175,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_de
+ if (err)
+ goto demux_err;
+ dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
++ atomic64_set(&dev->sriov.demux[i].subnet_prefix,
++ be64_to_cpu(gid.global.subnet_prefix));
+ err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
+ &dev->sriov.sqps[i]);
+ if (err)
+--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+@@ -441,7 +441,7 @@ struct mlx4_ib_demux_ctx {
+ struct workqueue_struct *wq;
+ struct workqueue_struct *ud_wq;
+ spinlock_t ud_lock;
+- __be64 subnet_prefix;
++ atomic64_t subnet_prefix;
+ __be64 guid_cache[128];
+ struct mlx4_ib_dev *dev;
+ /* the following lock protects both mcg_table and mcg_mgid0_list */
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2340,8 +2340,9 @@ static int build_mlx_header(struct mlx4_
+ * we must use our own cache
+ */
+ sqp->ud_header.grh.source_gid.global.subnet_prefix =
+- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+- subnet_prefix;
++ cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
++ demux[sqp->qp.port - 1].
++ subnet_prefix)));
+ sqp->ud_header.grh.source_gid.global.interface_id =
+ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+ guid_cache[ah->av.ib.gid_index];
kernel-fork-fix-clone_child_cleartid-regression-in-nscd.patch
dmaengine-at_xdmac-fix-to-pass-correct-device-identity-to-free_irq.patch
kvm-nvmx-postpone-vmcs-changes-on-msr_ia32_apicbase-write.patch
+ib-ipoib-fix-memory-corruption-in-ipoib-cm-mode-connect-flow.patch
+ib-core-fix-use-after-free-in-send_leave-function.patch
+ib-ipoib-don-t-allow-mc-joins-during-light-mc-flush.patch
+ib-mlx4-fix-incorrect-mc-join-state-bit-masking-on-sr-iov.patch
+ib-mlx4-fix-code-indentation-in-qp1-mad-flow.patch
+ib-mlx4-use-correct-subnet-prefix-in-qp1-mads-under-sr-iov.patch
+batman-adv-remove-unused-callback-from-batadv_algo_ops-struct.patch
+aio-mark-aio-pseudo-fs-noexec.patch
+clk-xgene-add-missing-parenthesis-when-clearing-divider-value.patch
+dm-log-writes-fix-bug-with-too-large-bios.patch