--- /dev/null
+From c58a584f05e35d1d4342923cd7aac07d9c3d3d16 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Fri, 5 Oct 2018 12:48:48 -0700
+Subject: ARC: clone syscall to setp r25 as thread pointer
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit c58a584f05e35d1d4342923cd7aac07d9c3d3d16 upstream.
+
+Per ARC TLS ABI, r25 is designated TP (thread pointer register).
+However so far kernel didn't do any special treatment, like setting up
+usermode r25, even for CLONE_SETTLS. We instead relied on libc runtime
+to do this, in say clone libc wrapper [1]. This was deliberate to keep
+kernel ABI agnostic (userspace could potentially change TP, specially
+for different ARC ISA say ARCompact vs. ARCv2 with different spare
+registers etc)
+
+However userspace setting up r25, after clone syscall opens a race, if
+child is not scheduled and gets a signal instead. It starts off in
+userspace not in clone but in a signal handler and anything TP sepcific
+there such as pthread_self() fails which showed up with uClibc
+testsuite nptl/tst-kill6 [2]
+
+Fix this by having kernel populate r25 to TP value. So this locks in
+ABI, but it was not going to change anyways, and fwiw is same for both
+ARCompact (arc700 core) and ARCvs (HS3x cores)
+
+[1] https://cgit.uclibc-ng.org/cgi/cgit/uclibc-ng.git/tree/libc/sysdeps/linux/arc/clone.S
+[2] https://github.com/wbx-github/uclibc-ng-test/blob/master/test/nptl/tst-kill6.c
+
+Fixes: ARC STAR 9001378481
+Cc: stable@vger.kernel.org
+Reported-by: Nikita Sobolev <sobolev@synopsys.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/kernel/process.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/arch/arc/kernel/process.c
++++ b/arch/arc/kernel/process.c
+@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flag
+ task_thread_info(current)->thr_ptr;
+ }
+
++
++ /*
++ * setup usermode thread pointer #1:
++ * when child is picked by scheduler, __switch_to() uses @c_callee to
++ * populate usermode callee regs: this works (despite being in a kernel
++ * function) since special return path for child @ret_from_fork()
++ * ensures those regs are not clobbered all the way to RTIE to usermode
++ */
++ c_callee->r25 = task_thread_info(p)->thr_ptr;
++
++#ifdef CONFIG_ARC_CURR_IN_REG
++ /*
++ * setup usermode thread pointer #2:
++ * however for this special use of r25 in kernel, __switch_to() sets
++ * r25 for kernel needs and only in the final return path is usermode
++ * r25 setup, from pt_regs->user_r25. So set that up as well
++ */
++ c_regs->user_r25 = c_callee->r25;
++#endif
++
+ return 0;
+ }
+
--- /dev/null
+From add92a817e60e308a419693413a38d9d1e663aff Mon Sep 17 00:00:00 2001
+From: Harsh Jain <harsh@chelsio.com>
+Date: Wed, 19 Sep 2018 22:42:16 +0530
+Subject: crypto: chelsio - Fix memory corruption in DMA Mapped buffers.
+
+From: Harsh Jain <harsh@chelsio.com>
+
+commit add92a817e60e308a419693413a38d9d1e663aff upstream.
+
+Update PCI Id in "cpl_rx_phys_dsgl" header. In case pci_chan_id and
+tx_chan_id are not derived from same queue, H/W can send request
+completion indication before completing DMA Transfer.
+
+Herbert, It would be good if fix can be merge to stable tree.
+For 4.14 kernel, It requires some update to avoid mege conficts.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Harsh Jain <harsh@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/chelsio/chcr_algo.c | 41 +++++++++++++++++++++++------------
+ drivers/crypto/chelsio/chcr_crypto.h | 2 +
+ 2 files changed, 29 insertions(+), 14 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -384,7 +384,8 @@ static inline int is_hmac(struct crypto_
+
+ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+- struct phys_sge_parm *sg_param)
++ struct phys_sge_parm *sg_param,
++ int pci_chan_id)
+ {
+ struct phys_sge_pairs *to;
+ unsigned int len = 0, left_size = sg_param->obsize;
+@@ -402,6 +403,7 @@ static void write_phys_cpl(struct cpl_rx
+ phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
+ phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
+ phys_cpl->rss_hdr_int.hash_val = 0;
++ phys_cpl->rss_hdr_int.channel = pci_chan_id;
+ to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
+ sizeof(struct cpl_rx_phys_dsgl));
+ for (i = 0; nents && left_size; to++) {
+@@ -418,7 +420,8 @@ static void write_phys_cpl(struct cpl_rx
+ static inline int map_writesg_phys_cpl(struct device *dev,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+- struct phys_sge_parm *sg_param)
++ struct phys_sge_parm *sg_param,
++ int pci_chan_id)
+ {
+ if (!sg || !sg_param->nents)
+ return -EINVAL;
+@@ -428,7 +431,7 @@ static inline int map_writesg_phys_cpl(s
+ pr_err("CHCR : DMA mapping failed\n");
+ return -EINVAL;
+ }
+- write_phys_cpl(phys_cpl, sg, sg_param);
++ write_phys_cpl(phys_cpl, sg, sg_param, pci_chan_id);
+ return 0;
+ }
+
+@@ -608,7 +611,7 @@ static inline void create_wreq(struct ch
+ is_iv ? iv_loc : IV_NOP, !!lcb,
+ ctx->tx_qidx);
+
+- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
++ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
+ qid);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
+@@ -698,7 +701,8 @@ static struct sk_buff *create_cipher_wr(
+ sg_param.obsize = wrparam->bytes;
+ sg_param.qid = wrparam->qid;
+ error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+- reqctx->dst, &sg_param);
++ reqctx->dst, &sg_param,
++ ctx->pci_chan_id);
+ if (error)
+ goto map_fail1;
+
+@@ -1228,16 +1232,23 @@ static int chcr_device_init(struct chcr_
+ adap->vres.ncrypto_fc);
+ rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
+ txq_perchan = ntxq / u_ctx->lldi.nchan;
+- rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
+- rxq_idx += id % rxq_perchan;
+- txq_idx = ctx->dev->tx_channel_id * txq_perchan;
+- txq_idx += id % txq_perchan;
+ spin_lock(&ctx->dev->lock_chcr_dev);
+- ctx->rx_qidx = rxq_idx;
+- ctx->tx_qidx = txq_idx;
++ ctx->tx_chan_id = ctx->dev->tx_channel_id;
+ ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+ ctx->dev->rx_channel_id = 0;
+ spin_unlock(&ctx->dev->lock_chcr_dev);
++ rxq_idx = ctx->tx_chan_id * rxq_perchan;
++ rxq_idx += id % rxq_perchan;
++ txq_idx = ctx->tx_chan_id * txq_perchan;
++ txq_idx += id % txq_perchan;
++ ctx->rx_qidx = rxq_idx;
++ ctx->tx_qidx = txq_idx;
++ /* Channel Id used by SGE to forward packet to Host.
++ * Same value should be used in cpl_fw6_pld RSS_CH field
++ * by FW. Driver programs PCI channel ID to be used in fw
++ * at the time of queue allocation with value "pi->tx_chan"
++ */
++ ctx->pci_chan_id = txq_idx / txq_perchan;
+ }
+ out:
+ return err;
+@@ -2066,7 +2077,8 @@ static struct sk_buff *create_authenc_wr
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+- reqctx->dst, &sg_param);
++ reqctx->dst, &sg_param,
++ ctx->pci_chan_id);
+ if (error)
+ goto dstmap_fail;
+
+@@ -2389,7 +2401,7 @@ static struct sk_buff *create_aead_ccm_w
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+- reqctx->dst, &sg_param);
++ reqctx->dst, &sg_param, ctx->pci_chan_id);
+ if (error)
+ goto dstmap_fail;
+
+@@ -2545,7 +2557,8 @@ static struct sk_buff *create_gcm_wr(str
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+- reqctx->dst, &sg_param);
++ reqctx->dst, &sg_param,
++ ctx->pci_chan_id);
+ if (error)
+ goto dstmap_fail;
+
+--- a/drivers/crypto/chelsio/chcr_crypto.h
++++ b/drivers/crypto/chelsio/chcr_crypto.h
+@@ -222,6 +222,8 @@ struct chcr_context {
+ struct chcr_dev *dev;
+ unsigned char tx_qidx;
+ unsigned char rx_qidx;
++ unsigned char tx_chan_id;
++ unsigned char pci_chan_id;
+ struct __crypto_ctx crypto_ctx[0];
+ };
+