--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Tue, 21 Jun 2016 14:24:33 +0530
+Subject: ARCv2: smp-boot: wake_flag polling by non-Masters needs to be uncached
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+
+[ Upstream commit 78f824d4312a8944f5340c6b161bba3bf2c81096 ]
+
+This is needed on HS38 cores, for setting up IO-Coherency aperture properly
+
+The polling could perturb the caches and coherecy fabric which could be
+wrong in the small window when Master is setting up IOC aperture etc
+in arc_cache_init()
+
+We do it only for ARCv2 based builds to not affect EZChip ARCompact
+based platform.
+
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arc/kernel/smp.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/arch/arc/kernel/smp.c
++++ b/arch/arc/kernel/smp.c
+@@ -90,10 +90,23 @@ void __init smp_cpus_done(unsigned int m
+ */
+ static volatile int wake_flag;
+
++#ifdef CONFIG_ISA_ARCOMPACT
++
++#define __boot_read(f) f
++#define __boot_write(f, v) f = v
++
++#else
++
++#define __boot_read(f) arc_read_uncached_32(&f)
++#define __boot_write(f, v) arc_write_uncached_32(&f, v)
++
++#endif
++
+ static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
+ {
+ BUG_ON(cpu == 0);
+- wake_flag = cpu;
++
++ __boot_write(wake_flag, cpu);
+ }
+
+ void arc_platform_smp_wait_to_boot(int cpu)
+@@ -102,10 +115,10 @@ void arc_platform_smp_wait_to_boot(int c
+ if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+ return;
+
+- while (wake_flag != cpu)
++ while (__boot_read(wake_flag) != cpu)
+ ;
+
+- wake_flag = 0;
++ __boot_write(wake_flag, 0);
+ }
+
+ const char *arc_platform_smp_cpuinfo(void)
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Tue, 24 Jan 2017 00:51:32 +0100
+Subject: netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+
+[ Upstream commit 35d0ac9070ef619e3bf44324375878a1c540387b ]
+
+If the element exists and no NLM_F_EXCL is specified, do not bump
+set->nelems, otherwise we leak one set element slot. This problem
+amplifies if the set is full since the abort path always decrements the
+counter for the -ENFILE case too, giving one spare extra slot.
+
+Fix this by moving set->nelems update to nft_add_set_elem() after
+successful element insertion. Moreover, remove the element if the set is
+full so there is no need to rely on the abort path to undo things
+anymore.
+
+Fixes: c016c7e45ddf ("netfilter: nf_tables: honor NLM_F_EXCL flag in set element insertion")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3637,10 +3637,18 @@ static int nft_add_set_elem(struct nft_c
+ goto err5;
+ }
+
++ if (set->size &&
++ !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
++ err = -ENFILE;
++ goto err6;
++ }
++
+ nft_trans_elem(trans) = elem;
+ list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ return 0;
+
++err6:
++ set->ops->remove(set, &elem);
+ err5:
+ kfree(trans);
+ err4:
+@@ -3687,15 +3695,9 @@ static int nf_tables_newsetelem(struct n
+ return -EBUSY;
+
+ nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+- if (set->size &&
+- !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
+- return -ENFILE;
+-
+ err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
+- if (err < 0) {
+- atomic_dec(&set->nelems);
++ if (err < 0)
+ break;
+- }
+ }
+ return err;
+ }
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Liping Zhang <zlpnobody@gmail.com>
+Date: Sun, 22 Jan 2017 22:10:32 +0800
+Subject: netfilter: nft_log: restrict the log prefix length to 127
+
+From: Liping Zhang <zlpnobody@gmail.com>
+
+
+[ Upstream commit 5ce6b04ce96896e8a79e6f60740ced911eaac7a4 ]
+
+First, log prefix will be truncated to NF_LOG_PREFIXLEN-1, i.e. 127,
+at nf_log_packet(), so the extra part is useless.
+
+Second, after adding a log rule with a very very long prefix, we will
+fail to dump the nft rules after this _special_ one, but acctually,
+they do exist. For example:
+ # name_65000=$(printf "%0.sQ" {1..65000})
+ # nft add rule filter output log prefix "$name_65000"
+ # nft add rule filter output counter
+ # nft add rule filter output counter
+ # nft list chain filter output
+ table ip filter {
+ chain output {
+ type filter hook output priority 0; policy accept;
+ }
+ }
+
+So now, restrict the log prefix length to NF_LOG_PREFIXLEN-1.
+
+Fixes: 96518518cc41 ("netfilter: add nftables")
+Signed-off-by: Liping Zhang <zlpnobody@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/netfilter/nf_log.h | 2 ++
+ net/netfilter/nf_log.c | 1 -
+ net/netfilter/nft_log.c | 3 ++-
+ 3 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/uapi/linux/netfilter/nf_log.h
++++ b/include/uapi/linux/netfilter/nf_log.h
+@@ -9,4 +9,6 @@
+ #define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
+ #define NF_LOG_MASK 0x2f
+
++#define NF_LOG_PREFIXLEN 128
++
+ #endif /* _NETFILTER_NF_LOG_H */
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -13,7 +13,6 @@
+ /* Internal logging interface, which relies on the real
+ LOG target modules */
+
+-#define NF_LOG_PREFIXLEN 128
+ #define NFLOGGER_NAME_LEN 64
+
+ static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
+--- a/net/netfilter/nft_log.c
++++ b/net/netfilter/nft_log.c
+@@ -38,7 +38,8 @@ static void nft_log_eval(const struct nf
+
+ static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
+ [NFTA_LOG_GROUP] = { .type = NLA_U16 },
+- [NFTA_LOG_PREFIX] = { .type = NLA_STRING },
++ [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
++ .len = NF_LOG_PREFIXLEN - 1 },
+ [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
+ [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
+ [NFTA_LOG_LEVEL] = { .type = NLA_U32 },
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+Date: Tue, 24 Jan 2017 13:51:43 +0200
+Subject: RDMA/qedr: Dispatch port active event from qedr_add
+
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+
+
+[ Upstream commit f449c7a2d822c2d81b5bcb2c50eec80796766726 ]
+
+Relying on qede to trigger qedr on startup is problematic. When probing
+both if qedr loads slowly then qede can assume qedr is missing and not
+trigger it. This patch adds a triggering from qedr and protects against
+a race via an atomic bit.
+
+Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/qedr/main.c | 20 ++++++++++++++------
+ drivers/infiniband/hw/qedr/qedr.h | 5 +++++
+ 2 files changed, 19 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -792,6 +792,9 @@ static struct qedr_dev *qedr_add(struct
+ if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
+ goto sysfs_err;
+
++ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
++ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
++
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+ return dev;
+
+@@ -824,11 +827,10 @@ static void qedr_remove(struct qedr_dev
+ ib_dealloc_device(&dev->ibdev);
+ }
+
+-static int qedr_close(struct qedr_dev *dev)
++static void qedr_close(struct qedr_dev *dev)
+ {
+- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+-
+- return 0;
++ if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
++ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
+ }
+
+ static void qedr_shutdown(struct qedr_dev *dev)
+@@ -837,6 +839,12 @@ static void qedr_shutdown(struct qedr_de
+ qedr_remove(dev);
+ }
+
++static void qedr_open(struct qedr_dev *dev)
++{
++ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
++ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
++}
++
+ static void qedr_mac_address_change(struct qedr_dev *dev)
+ {
+ union ib_gid *sgid = &dev->sgid_tbl[0];
+@@ -863,7 +871,7 @@ static void qedr_mac_address_change(stru
+
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+- qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
++ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
+
+ if (rc)
+ DP_ERR(dev, "Error updating mac filter\n");
+@@ -877,7 +885,7 @@ static void qedr_notify(struct qedr_dev
+ {
+ switch (event) {
+ case QEDE_UP:
+- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
++ qedr_open(dev);
+ break;
+ case QEDE_DOWN:
+ qedr_close(dev);
+--- a/drivers/infiniband/hw/qedr/qedr.h
++++ b/drivers/infiniband/hw/qedr/qedr.h
+@@ -113,6 +113,8 @@ struct qedr_device_attr {
+ struct qed_rdma_events events;
+ };
+
++#define QEDR_ENET_STATE_BIT (0)
++
+ struct qedr_dev {
+ struct ib_device ibdev;
+ struct qed_dev *cdev;
+@@ -153,6 +155,8 @@ struct qedr_dev {
+ struct qedr_cq *gsi_sqcq;
+ struct qedr_cq *gsi_rqcq;
+ struct qedr_qp *gsi_qp;
++
++ unsigned long enet_state;
+ };
+
+ #define QEDR_MAX_SQ_PBL (0x8000)
+@@ -188,6 +192,7 @@ struct qedr_dev {
+ #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
+
+ #define QEDR_MAX_PORT (1)
++#define QEDR_PORT (1)
+
+ #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+Date: Tue, 24 Jan 2017 13:50:38 +0200
+Subject: RDMA/qedr: Don't reset QP when queues aren't flushed
+
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+
+
+[ Upstream commit 933e6dcaa0f65eb2f624ad760274020874a1f35e ]
+
+Fail QP state transition from error to reset if SQ/RQ are not empty
+and still in the process of flushing out the queued work entries.
+
+Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1729,6 +1729,14 @@ static int qedr_update_qp_state(struct q
+ /* ERR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RESET:
++ if ((qp->rq.prod != qp->rq.cons) ||
++ (qp->sq.prod != qp->sq.cons)) {
++ DP_NOTICE(dev,
++ "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
++ qp->rq.prod, qp->rq.cons, qp->sq.prod,
++ qp->sq.cons);
++ status = -EINVAL;
++ }
+ break;
+ default:
+ status = -EINVAL;
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+Date: Tue, 24 Jan 2017 13:50:37 +0200
+Subject: RDMA/qedr: Don't spam dmesg if QP is in error state
+
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+
+
+[ Upstream commit c78c31496111f497b4a03f955c100091185da8b6 ]
+
+It is normal to flush CQEs if the QP is in error state. Hence there's no
+use in printing a message per CQE to dmesg.
+
+Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -3238,9 +3238,10 @@ static int qedr_poll_cq_req(struct qedr_
+ IB_WC_SUCCESS, 0);
+ break;
+ case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
+- DP_ERR(dev,
+- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+- cq->icid, qp->icid);
++ if (qp->state != QED_ROCE_QP_STATE_ERR)
++ DP_ERR(dev,
++ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
++ cq->icid, qp->icid);
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_WR_FLUSH_ERR, 0);
+ break;
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+Date: Tue, 24 Jan 2017 13:51:42 +0200
+Subject: RDMA/qedr: Fix and simplify memory leak in PD alloc
+
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+
+
+[ Upstream commit 9c1e0228ab35e52d30abf4b5629c28350833fbcb ]
+
+Free the PD if no internal resources were available. Move userspace
+code under the relevant 'if'.
+
+Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_de
+ struct ib_ucontext *context, struct ib_udata *udata)
+ {
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+- struct qedr_ucontext *uctx = NULL;
+- struct qedr_alloc_pd_uresp uresp;
+ struct qedr_pd *pd;
+ u16 pd_id;
+ int rc;
+@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_de
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+- dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
++ rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
++ if (rc)
++ goto err;
+
+- uresp.pd_id = pd_id;
+ pd->pd_id = pd_id;
+
+ if (udata && context) {
++ struct qedr_alloc_pd_uresp uresp;
++
++ uresp.pd_id = pd_id;
++
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+- if (rc)
++ if (rc) {
+ DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+- uctx = get_qedr_ucontext(context);
+- uctx->pd = pd;
+- pd->uctx = uctx;
++ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
++ goto err;
++ }
++
++ pd->uctx = get_qedr_ucontext(context);
++ pd->uctx->pd = pd;
+ }
+
+ return &pd->ibpd;
++
++err:
++ kfree(pd);
++ return ERR_PTR(rc);
+ }
+
+ int qedr_dealloc_pd(struct ib_pd *ibpd)
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+Date: Tue, 24 Jan 2017 13:50:35 +0200
+Subject: RDMA/qedr: Return max inline data in QP query result
+
+From: Ram Amrani <Ram.Amrani@Cavium.com>
+
+
+[ Upstream commit 59e8970b3798e4cbe575ed9cf4d53098760a2a86 ]
+
+Return the maximum supported amount of inline data, not the qp's current
+configured inline data size, when filling out the results of a query
+qp call.
+
+Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -2032,7 +2032,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
+ qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+- qp_attr->cap.max_inline_data = qp->max_inline_data;
++ qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+ qp_init_attr->cap = qp_attr->cap;
+
+ memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
kernel-watchdog-prevent-false-hardlockup-on-overloaded-system.patch
vhost-vsock-handle-vhost_vq_init_access-error.patch
arc-smp-boot-decouple-non-masters-waiting-api-from-jump-to-entry-point.patch
+arcv2-smp-boot-wake_flag-polling-by-non-masters-needs-to-be-uncached.patch
+tipc-ignore-requests-when-the-connection-state-is-not-connected.patch
+tipc-fix-connection-refcount-error.patch
+tipc-add-subscription-refcount-to-avoid-invalid-delete.patch
+tipc-fix-nametbl_lock-soft-lockup-at-node-link-events.patch
+netfilter-nf_tables-fix-set-nelems-counting-with-no-nlm_f_excl.patch
+netfilter-nft_log-restrict-the-log-prefix-length-to-127.patch
+rdma-qedr-dispatch-port-active-event-from-qedr_add.patch
+rdma-qedr-fix-and-simplify-memory-leak-in-pd-alloc.patch
+rdma-qedr-don-t-reset-qp-when-queues-aren-t-flushed.patch
+rdma-qedr-don-t-spam-dmesg-if-qp-is-in-error-state.patch
+rdma-qedr-return-max-inline-data-in-qp-query-result.patch
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Date: Tue, 24 Jan 2017 13:00:44 +0100
+Subject: tipc: add subscription refcount to avoid invalid delete
+
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+
+
+[ Upstream commit d094c4d5f5c7e1b225e94227ca3f007be3adc4e8 ]
+
+Until now, the subscribers keep track of the subscriptions using
+reference count at subscriber level. At subscription cancel or
+subscriber delete, we delete the subscription only if the timer
+was pending for the subscription. This approach is incorrect as:
+1. del_timer() is not SMP safe, if on CPU0 the check for pending
+ timer returns true but CPU1 might schedule the timer callback
+ thereby deleting the subscription. Thus when CPU0 is scheduled,
+ it deletes an invalid subscription.
+2. We export tipc_subscrp_report_overlap(), which accesses the
+ subscription pointer multiple times. Meanwhile the subscription
+ timer can expire thereby freeing the subscription and we might
+ continue to access the subscription pointer leading to memory
+ violations.
+
+In this commit, we introduce subscription refcount to avoid deleting
+an invalid subscription.
+
+Reported-and-Tested-by: John Thompson <thompa.atl@gmail.com>
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/subscr.c | 124 ++++++++++++++++++++++++++++++------------------------
+ net/tipc/subscr.h | 1
+ 2 files changed, 71 insertions(+), 54 deletions(-)
+
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -54,6 +54,8 @@ struct tipc_subscriber {
+
+ static void tipc_subscrp_delete(struct tipc_subscription *sub);
+ static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
++static void tipc_subscrp_put(struct tipc_subscription *subscription);
++static void tipc_subscrp_get(struct tipc_subscription *subscription);
+
+ /**
+ * htohl - convert value to endianness used by destination
+@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct
+ {
+ struct tipc_name_seq seq;
+
++ tipc_subscrp_get(sub);
+ tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
+ if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
+ return;
+@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct
+
+ tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
+ node);
++ tipc_subscrp_put(sub);
+ }
+
+ static void tipc_subscrp_timeout(unsigned long data)
+ {
+ struct tipc_subscription *sub = (struct tipc_subscription *)data;
+- struct tipc_subscriber *subscriber = sub->subscriber;
+
+ /* Notify subscriber of timeout */
+ tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
+ TIPC_SUBSCR_TIMEOUT, 0, 0);
+
+- spin_lock_bh(&subscriber->lock);
+- tipc_subscrp_delete(sub);
+- spin_unlock_bh(&subscriber->lock);
+-
+- tipc_subscrb_put(subscriber);
++ tipc_subscrp_put(sub);
+ }
+
+ static void tipc_subscrb_kref_release(struct kref *kref)
+ {
+- struct tipc_subscriber *subcriber = container_of(kref,
+- struct tipc_subscriber, kref);
+-
+- kfree(subcriber);
++ kfree(container_of(kref,struct tipc_subscriber, kref));
+ }
+
+ static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
+@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc
+ kref_get(&subscriber->kref);
+ }
+
++static void tipc_subscrp_kref_release(struct kref *kref)
++{
++ struct tipc_subscription *sub = container_of(kref,
++ struct tipc_subscription,
++ kref);
++ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
++ struct tipc_subscriber *subscriber = sub->subscriber;
++
++ spin_lock_bh(&subscriber->lock);
++ tipc_nametbl_unsubscribe(sub);
++ list_del(&sub->subscrp_list);
++ atomic_dec(&tn->subscription_count);
++ spin_unlock_bh(&subscriber->lock);
++ kfree(sub);
++ tipc_subscrb_put(subscriber);
++}
++
++static void tipc_subscrp_put(struct tipc_subscription *subscription)
++{
++ kref_put(&subscription->kref, tipc_subscrp_kref_release);
++}
++
++static void tipc_subscrp_get(struct tipc_subscription *subscription)
++{
++ kref_get(&subscription->kref);
++}
++
++/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
++ * subscriptions for a given subscriber.
++ */
++static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
++ struct tipc_subscr *s)
++{
++ struct list_head *subscription_list = &subscriber->subscrp_list;
++ struct tipc_subscription *sub, *temp;
++
++ spin_lock_bh(&subscriber->lock);
++ list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
++ if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
++ continue;
++
++ tipc_subscrp_get(sub);
++ spin_unlock_bh(&subscriber->lock);
++ tipc_subscrp_delete(sub);
++ tipc_subscrp_put(sub);
++ spin_lock_bh(&subscriber->lock);
++
++ if (s)
++ break;
++ }
++ spin_unlock_bh(&subscriber->lock);
++}
++
+ static struct tipc_subscriber *tipc_subscrb_create(int conid)
+ {
+ struct tipc_subscriber *subscriber;
+@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subs
+ pr_warn("Subscriber rejected, no memory\n");
+ return NULL;
+ }
+- kref_init(&subscriber->kref);
+ INIT_LIST_HEAD(&subscriber->subscrp_list);
++ kref_init(&subscriber->kref);
+ subscriber->conid = conid;
+ spin_lock_init(&subscriber->lock);
+
+@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subs
+
+ static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
+ {
+- struct tipc_subscription *sub, *temp;
+- u32 timeout;
+-
+- spin_lock_bh(&subscriber->lock);
+- /* Destroy any existing subscriptions for subscriber */
+- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+- subscrp_list) {
+- timeout = htohl(sub->evt.s.timeout, sub->swap);
+- if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
+- tipc_subscrp_delete(sub);
+- tipc_subscrb_put(subscriber);
+- }
+- }
+- spin_unlock_bh(&subscriber->lock);
+-
++ tipc_subscrb_subscrp_delete(subscriber, NULL);
+ tipc_subscrb_put(subscriber);
+ }
+
+ static void tipc_subscrp_delete(struct tipc_subscription *sub)
+ {
+- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
++ u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
+
+- tipc_nametbl_unsubscribe(sub);
+- list_del(&sub->subscrp_list);
+- kfree(sub);
+- atomic_dec(&tn->subscription_count);
++ if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
++ tipc_subscrp_put(sub);
+ }
+
+ static void tipc_subscrp_cancel(struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber)
+ {
+- struct tipc_subscription *sub, *temp;
+- u32 timeout;
+-
+- spin_lock_bh(&subscriber->lock);
+- /* Find first matching subscription, exit if not found */
+- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+- subscrp_list) {
+- if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
+- timeout = htohl(sub->evt.s.timeout, sub->swap);
+- if ((timeout == TIPC_WAIT_FOREVER) ||
+- del_timer(&sub->timer)) {
+- tipc_subscrp_delete(sub);
+- tipc_subscrb_put(subscriber);
+- }
+- break;
+- }
+- }
+- spin_unlock_bh(&subscriber->lock);
++ tipc_subscrb_subscrp_delete(subscriber, s);
+ }
+
+ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
+@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_su
+ sub->swap = swap;
+ memcpy(&sub->evt.s, s, sizeof(*s));
+ atomic_inc(&tn->subscription_count);
++ kref_init(&sub->kref);
+ return sub;
+ }
+
+@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struc
+
+ spin_lock_bh(&subscriber->lock);
+ list_add(&sub->subscrp_list, &subscriber->subscrp_list);
+- tipc_subscrb_get(subscriber);
+ sub->subscriber = subscriber;
+ tipc_nametbl_subscribe(sub);
++ tipc_subscrb_get(subscriber);
+ spin_unlock_bh(&subscriber->lock);
+
++ setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
+ timeout = htohl(sub->evt.s.timeout, swap);
+- if (timeout == TIPC_WAIT_FOREVER)
+- return;
+
+- setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
+- mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
++ if (timeout != TIPC_WAIT_FOREVER)
++ mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ }
+
+ /* Handle one termination request for the subscriber */
+--- a/net/tipc/subscr.h
++++ b/net/tipc/subscr.h
+@@ -57,6 +57,7 @@ struct tipc_subscriber;
+ * @evt: template for events generated by subscription
+ */
+ struct tipc_subscription {
++ struct kref kref;
+ struct tipc_subscriber *subscriber;
+ struct net *net;
+ struct timer_list timer;
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Date: Tue, 24 Jan 2017 13:00:45 +0100
+Subject: tipc: fix connection refcount error
+
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+
+
+[ Upstream commit fc0adfc8fd18b61b6f7a3f28b429e134d6f3a008 ]
+
+Until now, the generic server framework maintains the connection
+id's per subscriber in server's conn_idr. At tipc_close_conn, we
+remove the connection id from the server list, but the connection is
+valid until we call the refcount cleanup. Hence we have a window
+where the server allocates the same connection to an new subscriber
+leading to inconsistent reference count. We have another refcount
+warning we grab the refcount in tipc_conn_lookup() for connections
+with flag with CF_CONNECTED not set. This usually occurs at shutdown
+when the we stop the topology server and withdraw TIPC_CFG_SRV
+publication thereby triggering a withdraw message to subscribers.
+
+In this commit, we:
+1. remove the connection from the server list at recount cleanup.
+2. grab the refcount for a connection only if CF_CONNECTED is set.
+
+Tested-by: John Thompson <thompa.atl@gmail.com>
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/server.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -91,7 +91,8 @@ static void tipc_sock_release(struct tip
+ static void tipc_conn_kref_release(struct kref *kref)
+ {
+ struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+- struct sockaddr_tipc *saddr = con->server->saddr;
++ struct tipc_server *s = con->server;
++ struct sockaddr_tipc *saddr = s->saddr;
+ struct socket *sock = con->sock;
+ struct sock *sk;
+
+@@ -106,6 +107,11 @@ static void tipc_conn_kref_release(struc
+ tipc_sock_release(con);
+ sock_release(sock);
+ con->sock = NULL;
++
++ spin_lock_bh(&s->idr_lock);
++ idr_remove(&s->conn_idr, con->conid);
++ s->idr_in_use--;
++ spin_unlock_bh(&s->idr_lock);
+ }
+
+ tipc_clean_outqueues(con);
+@@ -128,8 +134,10 @@ static struct tipc_conn *tipc_conn_looku
+
+ spin_lock_bh(&s->idr_lock);
+ con = idr_find(&s->conn_idr, conid);
+- if (con)
++ if (con && test_bit(CF_CONNECTED, &con->flags))
+ conn_get(con);
++ else
++ con = NULL;
+ spin_unlock_bh(&s->idr_lock);
+ return con;
+ }
+@@ -198,15 +206,8 @@ static void tipc_sock_release(struct tip
+
+ static void tipc_close_conn(struct tipc_conn *con)
+ {
+- struct tipc_server *s = con->server;
+-
+ if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+
+- spin_lock_bh(&s->idr_lock);
+- idr_remove(&s->conn_idr, con->conid);
+- s->idr_in_use--;
+- spin_unlock_bh(&s->idr_lock);
+-
+ /* We shouldn't flush pending works as we may be in the
+ * thread. In fact the races with pending rx/tx work structs
+ * are harmless for us here as we have already deleted this
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Date: Tue, 24 Jan 2017 13:00:43 +0100
+Subject: tipc: fix nametbl_lock soft lockup at node/link events
+
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+
+
+[ Upstream commit 93f955aad4bacee5acebad141d1a03cd51f27b4e ]
+
+We trigger a soft lockup as we grab nametbl_lock twice if the node
+has a pending node up/down or link up/down event while:
+- we process an incoming named message in tipc_named_rcv() and
+ perform an tipc_update_nametbl().
+- we have pending backlog items in the name distributor queue
+ during a nametable update using tipc_nametbl_publish() or
+ tipc_nametbl_withdraw().
+
+The following are the call chain associated:
+tipc_named_rcv() Grabs nametbl_lock
+ tipc_update_nametbl() (publish/withdraw)
+ tipc_node_subscribe()/unsubscribe()
+ tipc_node_write_unlock()
+ << lockup occurs if an outstanding node/link event
+ exits, as we grabs nametbl_lock again >>
+
+tipc_nametbl_withdraw() Grab nametbl_lock
+ tipc_named_process_backlog()
+ tipc_update_nametbl()
+ << rest as above >>
+
+The function tipc_node_write_unlock(), in addition to releasing the
+lock processes the outstanding node/link up/down events. To do this,
+we need to grab the nametbl_lock again leading to the lockup.
+
+In this commit we fix the soft lockup by introducing a fast variant of
+node_unlock(), where we just release the lock. We adapt the
+node_subscribe()/node_unsubscribe() to use the fast variants.
+
+Reported-and-Tested-by: John Thompson <thompa.atl@gmail.com>
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/node.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct
+ write_lock_bh(&n->lock);
+ }
+
++static void tipc_node_write_unlock_fast(struct tipc_node *n)
++{
++ write_unlock_bh(&n->lock);
++}
++
+ static void tipc_node_write_unlock(struct tipc_node *n)
+ {
+ struct net *net = n->net;
+@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net
+ }
+ tipc_node_write_lock(n);
+ list_add_tail(subscr, &n->publ_list);
+- tipc_node_write_unlock(n);
++ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+ }
+
+@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *n
+ }
+ tipc_node_write_lock(n);
+ list_del_init(subscr);
+- tipc_node_write_unlock(n);
++ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+ }
+
--- /dev/null
+From foo@baz Thu Jun 15 16:35:05 CEST 2017
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Date: Tue, 24 Jan 2017 13:00:47 +0100
+Subject: tipc: ignore requests when the connection state is not CONNECTED
+
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+
+
+[ Upstream commit 4c887aa65d38633885010277f3482400681be719 ]
+
+In tipc_conn_sendmsg(), we first queue the request to the outqueue
+followed by the connection state check. If the connection is not
+connected, we should not queue this message.
+
+In this commit, we reject the messages if the connection state is
+not CF_CONNECTED.
+
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Tested-by: John Thompson <thompa.atl@gmail.com>
+Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/server.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -458,6 +458,11 @@ int tipc_conn_sendmsg(struct tipc_server
+ if (!con)
+ return -EINVAL;
+
++ if (!test_bit(CF_CONNECTED, &con->flags)) {
++ conn_put(con);
++ return 0;
++ }
++
+ e = tipc_alloc_entry(data, len);
+ if (!e) {
+ conn_put(con);
+@@ -471,12 +476,8 @@ int tipc_conn_sendmsg(struct tipc_server
+ list_add_tail(&e->list, &con->outqueue);
+ spin_unlock_bh(&con->outqueue_lock);
+
+- if (test_bit(CF_CONNECTED, &con->flags)) {
+- if (!queue_work(s->send_wq, &con->swork))
+- conn_put(con);
+- } else {
++ if (!queue_work(s->send_wq, &con->swork))
+ conn_put(con);
+- }
+ return 0;
+ }
+
+@@ -500,7 +501,7 @@ static void tipc_send_to_sock(struct tip
+ int ret;
+
+ spin_lock_bh(&con->outqueue_lock);
+- while (1) {
++ while (test_bit(CF_CONNECTED, &con->flags)) {
+ e = list_entry(con->outqueue.next, struct outqueue_entry,
+ list);
+ if ((struct list_head *) e == &con->outqueue)