--- /dev/null
+From fee6a8923ae0d318a7f7950c6c6c28a96cea099b Mon Sep 17 00:00:00 2001
+From: Stephane Grosjean <s.grosjean@peak-system.com>
+Date: Fri, 5 Jul 2019 15:32:16 +0200
+Subject: can: peak_usb: fix potential double kfree_skb()
+
+From: Stephane Grosjean <s.grosjean@peak-system.com>
+
+commit fee6a8923ae0d318a7f7950c6c6c28a96cea099b upstream.
+
+When closing the CAN device while tx skbs are inflight, echo skb could
+be released twice. By calling close_candev() before unlinking all
+pending tx urbs, then the internal echo_skb[] array is fully and
+correctly cleared before the USB write callback and, therefore,
+can_get_echo_skb() are called, for each aborted URB.
+
+Fixes: bb4785551f64 ("can: usb: PEAK-System Technik USB adapters driver core")
+Signed-off-by: Stephane Grosjean <s.grosjean@peak-system.com>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/usb/peak_usb/pcan_usb_core.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -594,16 +594,16 @@ static int peak_usb_ndo_stop(struct net_
+ dev->state &= ~PCAN_USB_STATE_STARTED;
+ netif_stop_queue(netdev);
+
++ close_candev(netdev);
++
++ dev->can.state = CAN_STATE_STOPPED;
++
+ /* unlink all pending urbs and free used memory */
+ peak_usb_unlink_all_urbs(dev);
+
+ if (dev->adapter->dev_stop)
+ dev->adapter->dev_stop(dev);
+
+- close_candev(netdev);
+-
+- dev->can.state = CAN_STATE_STOPPED;
+-
+ /* can set bus off now */
+ if (dev->adapter->dev_set_bus) {
+ int err = dev->adapter->dev_set_bus(dev, 0);
--- /dev/null
+From d4b890aec4bea7334ca2ca56fd3b12fb48a00cd1 Mon Sep 17 00:00:00 2001
+From: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+Date: Wed, 26 Jun 2019 16:08:48 +0300
+Subject: can: rcar_canfd: fix possible IRQ storm on high load
+
+From: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+
+commit d4b890aec4bea7334ca2ca56fd3b12fb48a00cd1 upstream.
+
+We have observed rcar_canfd driver entering IRQ storm under high load,
+with following scenario:
+- rcar_canfd_global_interrupt() in entered due to Rx available,
+- napi_schedule_prep() is called, and sets NAPIF_STATE_SCHED in state
+- Rx fifo interrupts are masked,
+- rcar_canfd_global_interrupt() is entered again, this time due to
+ error interrupt (e.g. due to overflow),
+- since scheduled napi poller has not yet executed, condition for calling
+ napi_schedule_prep() from rcar_canfd_global_interrupt() remains true,
+ thus napi_schedule_prep() gets called and sets NAPIF_STATE_MISSED flag
+ in state,
+- later, napi poller function rcar_canfd_rx_poll() gets executed, and
+ calls napi_complete_done(),
+- due to NAPIF_STATE_MISSED flag in state, this call does not clear
+ NAPIF_STATE_SCHED flag from state,
+- on return from napi_complete_done(), rcar_canfd_rx_poll() unmasks Rx
+ interrutps,
+- Rx interrupt happens, rcar_canfd_global_interrupt() gets called
+ and calls napi_schedule_prep(),
+- since NAPIF_STATE_SCHED is set in state at this time, this call
+ returns false,
+- due to that false return, rcar_canfd_global_interrupt() returns
+ without masking Rx interrupt
+- and this results into IRQ storm: unmasked Rx interrupt happens again
+ and again is misprocessed in the same way.
+
+This patch fixes that scenario by unmasking Rx interrupts only when
+napi_complete_done() returns true, which means it has cleared
+NAPIF_STATE_SCHED in state.
+
+Fixes: dd3bd23eb438 ("can: rcar_canfd: Add Renesas R-Car CAN FD driver")
+Signed-off-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/rcar/rcar_canfd.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct nap
+
+ /* All packets processed */
+ if (num_pkts < quota) {
+- napi_complete_done(napi, num_pkts);
+- /* Enable Rx FIFO interrupts */
+- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+- RCANFD_RFCC_RFIE);
++ if (napi_complete_done(napi, num_pkts)) {
++ /* Enable Rx FIFO interrupts */
++ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
++ RCANFD_RFCC_RFIE);
++ }
+ }
+ return num_pkts;
+ }
--- /dev/null
+From 9f00baf74e4b6f79a3a3dfab44fb7bb2e797b551 Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Tue, 30 Jul 2019 16:05:24 +0000
+Subject: crypto: ccp - Add support for valid authsize values less than 16
+
+From: Gary R Hook <gary.hook@amd.com>
+
+commit 9f00baf74e4b6f79a3a3dfab44fb7bb2e797b551 upstream.
+
+AES GCM encryption allows for authsize values of 4, 8, and 12-16 bytes.
+Validate the requested authsize, and retain it to save in the request
+context.
+
+Fixes: 36cf515b9bbe2 ("crypto: ccp - Enable support for AES GCM on v5 CCPs")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-crypto-aes-galois.c | 14 ++++++++++++++
+ drivers/crypto/ccp/ccp-ops.c | 26 +++++++++++++++++++++-----
+ include/linux/ccp.h | 2 ++
+ 3 files changed, 37 insertions(+), 5 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+@@ -63,6 +63,19 @@ static int ccp_aes_gcm_setkey(struct cry
+ static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+ {
++ switch (authsize) {
++ case 16:
++ case 15:
++ case 14:
++ case 13:
++ case 12:
++ case 8:
++ case 4:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+@@ -109,6 +122,7 @@ static int ccp_aes_gcm_crypt(struct aead
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
++ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action = encrypt;
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct cc
+
+ unsigned long long *final;
+ unsigned int dm_offset;
++ unsigned int authsize;
+ unsigned int jobid;
+ unsigned int ilen;
+ bool in_place = true; /* Default value */
+@@ -646,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ if (!aes->key) /* Gotta have a key SGL */
+ return -EINVAL;
+
++ /* Zero defaults to 16 bytes, the maximum size */
++ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
++ switch (authsize) {
++ case 16:
++ case 15:
++ case 14:
++ case 13:
++ case 12:
++ case 8:
++ case 4:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ /* First, decompose the source buffer into AAD & PT,
+ * and the destination buffer into AAD, CT & tag, or
+ * the input into CT & tag.
+@@ -660,7 +676,7 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
+ } else {
+ /* Input length for decryption includes tag */
+- ilen = aes->src_len - AES_BLOCK_SIZE;
++ ilen = aes->src_len - authsize;
+ p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+ }
+
+@@ -841,19 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct cc
+
+ if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+ /* Put the ciphered tag after the ciphertext. */
+- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ } else {
+ /* Does this ciphered tag match the input? */
+- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
++ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_tag;
+- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ if (ret)
+ goto e_tag;
+
+ ret = crypto_memneq(tag.address, final_wa.address,
+- AES_BLOCK_SIZE) ? -EBADMSG : 0;
++ authsize) ? -EBADMSG : 0;
+ ccp_dm_free(&tag);
+ }
+
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -173,6 +173,8 @@ struct ccp_aes_engine {
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+
++ u32 authsize;
++
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
--- /dev/null
+From b698a9f4c5c52317db486b069190c7e3d2b97e7e Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Wed, 7 Mar 2018 11:31:14 -0600
+Subject: crypto: ccp - Validate buffer lengths for copy operations
+
+From: Gary R Hook <gary.hook@amd.com>
+
+commit b698a9f4c5c52317db486b069190c7e3d2b97e7e upstream.
+
+The CCP driver copies data between scatter/gather lists and DMA buffers.
+The length of the requested copy operation must be checked against
+the available destination buffer length.
+
+Reported-by: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-ops.c | 108 +++++++++++++++++++++++++++++++------------
+ 1 file changed, 78 insertions(+), 30 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct c
+ return 0;
+ }
+
+-static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+- struct scatterlist *sg, unsigned int sg_offset,
+- unsigned int len)
++static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
++ struct scatterlist *sg, unsigned int sg_offset,
++ unsigned int len)
+ {
+ WARN_ON(!wa->address);
+
++ if (len > (wa->length - wa_offset))
++ return -EINVAL;
++
+ scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+ 0);
++ return 0;
+ }
+
+ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struc
+ unsigned int len)
+ {
+ u8 *p, *q;
++ int rc;
+
+- ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
++ rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
++ if (rc)
++ return rc;
+
+ p = wa->address + wa_offset;
+ q = p + len - 1;
+@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct c
+ return ret;
+
+ dm_offset = CCP_SB_BYTES - aes->key_len;
+- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ if (ret)
++ goto e_key;
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct c
+ goto e_key;
+
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_ctx;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct c
+ goto e_src;
+ }
+
+- ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+- aes->cmac_key_len);
++ ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
++ aes->cmac_key_len);
++ if (ret)
++ goto e_src;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -669,7 +682,9 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ return ret;
+
+ dm_offset = CCP_SB_BYTES - aes->key_len;
+- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ if (ret)
++ goto e_key;
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -688,7 +703,9 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ goto e_key;
+
+ dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_ctx;
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -779,7 +796,9 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ goto e_dst;
+ }
+
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_dst;
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -829,7 +848,9 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_tag;
+- ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++ if (ret)
++ goto e_tag;
+
+ ret = crypto_memneq(tag.address, final_wa.address,
+ AES_BLOCK_SIZE) ? -EBADMSG : 0;
+@@ -924,7 +945,9 @@ static int ccp_run_aes_cmd(struct ccp_cm
+ return ret;
+
+ dm_offset = CCP_SB_BYTES - aes->key_len;
+- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ if (ret)
++ goto e_key;
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -945,7 +968,9 @@ static int ccp_run_aes_cmd(struct ccp_cm
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ /* Load the AES context - convert to LE */
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_ctx;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -1123,8 +1148,12 @@ static int ccp_run_xts_aes_cmd(struct cc
+ * big endian to little endian.
+ */
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+- ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++ if (ret)
++ goto e_key;
+ } else {
+ /* Version 5 CCPs use a 512-bit space for the key: each portion
+ * occupies 256 bits, or one entire slot, and is zero-padded.
+@@ -1133,9 +1162,13 @@ static int ccp_run_xts_aes_cmd(struct cc
+
+ dm_offset = CCP_SB_BYTES;
+ pad = dm_offset - xts->key_len;
+- ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+- ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
+- xts->key_len);
++ ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
++ xts->key_len, xts->key_len);
++ if (ret)
++ goto e_key;
+ }
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -1154,7 +1187,9 @@ static int ccp_run_xts_aes_cmd(struct cc
+ if (ret)
+ goto e_key;
+
+- ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
++ ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
++ if (ret)
++ goto e_ctx;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+@@ -1297,12 +1332,18 @@ static int ccp_run_des3_cmd(struct ccp_c
+ dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
+
+ len_singlekey = des3->key_len / 3;
+- ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+- des3->key, 0, len_singlekey);
+- ccp_set_dm_area(&key, dm_offset + len_singlekey,
+- des3->key, len_singlekey, len_singlekey);
+- ccp_set_dm_area(&key, dm_offset,
+- des3->key, 2 * len_singlekey, len_singlekey);
++ ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
++ des3->key, 0, len_singlekey);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
++ des3->key, len_singlekey, len_singlekey);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, dm_offset,
++ des3->key, 2 * len_singlekey, len_singlekey);
++ if (ret)
++ goto e_key;
+
+ /* Copy the key to the SB */
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+@@ -1330,7 +1371,10 @@ static int ccp_run_des3_cmd(struct ccp_c
+
+ /* Load the context into the LSB */
+ dm_offset = CCP_SB_BYTES - des3->iv_len;
+- ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
++ des3->iv_len);
++ if (ret)
++ goto e_ctx;
+
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
+@@ -1614,8 +1658,10 @@ static int ccp_run_sha_cmd(struct ccp_cm
+ }
+ } else {
+ /* Restore the context */
+- ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+- sb_count * CCP_SB_BYTES);
++ ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
++ sb_count * CCP_SB_BYTES);
++ if (ret)
++ goto e_ctx;
+ }
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+@@ -1937,7 +1983,9 @@ static int ccp_run_passthru_cmd(struct c
+ if (ret)
+ return ret;
+
+- ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
++ ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
++ if (ret)
++ goto e_mask;
+ ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
--- /dev/null
+From 3f8fd02b1bf1d7ba964485a56f2f4b53ae88c167 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 19 Jul 2019 20:46:52 +0200
+Subject: mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 3f8fd02b1bf1d7ba964485a56f2f4b53ae88c167 upstream.
+
+On x86-32 with PTI enabled, parts of the kernel page-tables are not shared
+between processes. This can cause mappings in the vmalloc/ioremap area to
+persist in some page-tables after the region is unmapped and released.
+
+When the region is re-used the processes with the old mappings do not fault
+in the new mappings but still access the old ones.
+
+This causes undefined behavior, in reality often data corruption, kernel
+oopses and panics and even spontaneous reboots.
+
+Fix this problem by activly syncing unmaps in the vmalloc/ioremap area to
+all page-tables in the system before the regions can be re-used.
+
+References: https://bugzilla.suse.com/show_bug.cgi?id=1118689
+Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lkml.kernel.org/r/20190719184652.11391-4-joro@8bytes.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmalloc.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1766,6 +1766,12 @@ void *__vmalloc_node_range(unsigned long
+ return NULL;
+
+ /*
++ * First make sure the mappings are removed from all page-tables
++ * before they are freed.
++ */
++ vmalloc_sync_all();
++
++ /*
+ * In this function, newly allocated vm_struct has VM_UNINITIALIZED
+ * flag. It means that vm_struct is not fully initialized.
+ * Now, it is fully initialized, so remove this flag here.
+@@ -2314,6 +2320,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
+ /*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
++ *
++ * The purpose of this function is to make sure the vmalloc area
++ * mappings are identical in all page-tables in the system.
+ */
+ void __weak vmalloc_sync_all(void)
+ {
--- /dev/null
+From b9c0a64901d5bdec6eafd38d1dc8fa0e2974fccb Mon Sep 17 00:00:00 2001
+From: Thomas Richter <tmricht@linux.ibm.com>
+Date: Wed, 24 Jul 2019 14:27:03 +0200
+Subject: perf annotate: Fix s390 gap between kernel end and module start
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+commit b9c0a64901d5bdec6eafd38d1dc8fa0e2974fccb upstream.
+
+During execution of command 'perf top' the error message:
+
+ Not enough memory for annotating '__irf_end' symbol!)
+
+is emitted from this call sequence:
+ __cmd_top
+ perf_top__mmap_read
+ perf_top__mmap_read_idx
+ perf_event__process_sample
+ hist_entry_iter__add
+ hist_iter__top_callback
+ perf_top__record_precise_ip
+ hist_entry__inc_addr_samples
+ symbol__inc_addr_samples
+ symbol__get_annotation
+ symbol__alloc_hist
+
+In this function the size of symbol __irf_end is calculated. The size of
+a symbol is the difference between its start and end address.
+
+When the symbol was read the first time, its start and end was set to:
+
+ symbol__new: __irf_end 0xe954d0-0xe954d0
+
+which is correct and maps with /proc/kallsyms:
+
+ root@s8360046:~/linux-4.15.0/tools/perf# fgrep _irf_end /proc/kallsyms
+ 0000000000e954d0 t __irf_end
+ root@s8360046:~/linux-4.15.0/tools/perf#
+
+In function symbol__alloc_hist() the end of symbol __irf_end is
+
+ symbol__alloc_hist sym:__irf_end start:0xe954d0 end:0x3ff80045a8
+
+which is identical with the first module entry in /proc/kallsyms
+
+This results in a symbol size of __irf_req for histogram analyses of
+70334140059072 bytes and a malloc() for this requested size fails.
+
+The root cause of this is function
+ __dso__load_kallsyms()
+ +-> symbols__fixup_end()
+
+Function symbols__fixup_end() enlarges the last symbol in the kallsyms
+map:
+
+ # fgrep __irf_end /proc/kallsyms
+ 0000000000e954d0 t __irf_end
+ #
+
+to the start address of the first module:
+ # cat /proc/kallsyms | sort | egrep ' [tT] '
+ ....
+ 0000000000e952d0 T __security_initcall_end
+ 0000000000e954d0 T __initramfs_size
+ 0000000000e954d0 t __irf_end
+ 000003ff800045a8 T fc_get_event_number [scsi_transport_fc]
+ 000003ff800045d0 t store_fc_vport_disable [scsi_transport_fc]
+ 000003ff800046a8 T scsi_is_fc_rport [scsi_transport_fc]
+ 000003ff800046d0 t fc_target_setup [scsi_transport_fc]
+
+On s390 the kernel is located around memory address 0x200, 0x10000 or
+0x100000, depending on linux version. Modules however start some- where
+around 0x3ff xxxx xxxx.
+
+This is different than x86 and produces a large gap for which histogram
+allocation fails.
+
+Fix this by detecting the kernel's last symbol and do no adjustment for
+it. Introduce a weak function and handle s390 specifics.
+
+Reported-by: Klaus Theurich <klaus.theurich@de.ibm.com>
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Hendrik Brueckner <brueckner@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20190724122703.3996-2-tmricht@linux.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/arch/s390/util/machine.c | 17 +++++++++++++++++
+ tools/perf/util/symbol.c | 7 ++++++-
+ tools/perf/util/symbol.h | 1 +
+ 3 files changed, 24 insertions(+), 1 deletion(-)
+
+--- a/tools/perf/arch/s390/util/machine.c
++++ b/tools/perf/arch/s390/util/machine.c
+@@ -6,6 +6,7 @@
+ #include "machine.h"
+ #include "api/fs/fs.h"
+ #include "debug.h"
++#include "symbol.h"
+
+ int arch__fix_module_text_start(u64 *start, const char *name)
+ {
+@@ -21,3 +22,19 @@ int arch__fix_module_text_start(u64 *sta
+
+ return 0;
+ }
++
++/* On s390 kernel text segment start is located at very low memory addresses,
++ * for example 0x10000. Modules are located at very high memory addresses,
++ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
++ * and beginning of first module's text segment is very big.
++ * Therefore do not fill this gap and do not assign it to the kernel dso map.
++ */
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++ if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
++ /* Last kernel symbol mapped to end of page */
++ p->end = roundup(p->end, page_size);
++ else
++ p->end = c->start;
++ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
++}
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -93,6 +93,11 @@ static int prefix_underscores_count(cons
+ return tail - str;
+ }
+
++void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++ p->end = c->start;
++}
++
+ const char * __weak arch__normalize_symbol_name(const char *name)
+ {
+ return name;
+@@ -219,7 +224,7 @@ void symbols__fixup_end(struct rb_root *
+ curr = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev->end == prev->start && prev->end != curr->start)
+- prev->end = curr->start;
++ arch__symbols__fixup_end(prev, curr);
+ }
+
+ /* Last entry */
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -351,6 +351,7 @@ const char *arch__normalize_symbol_name(
+ #define SYMBOL_A 0
+ #define SYMBOL_B 1
+
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
+ int arch__compare_symbol_names(const char *namea, const char *nameb);
+ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
+ unsigned int n);
--- /dev/null
+From 3de7ae0b2a1d86dbb23d0cb135150534fdb2e836 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Thu, 8 Aug 2019 09:48:23 +0300
+Subject: perf db-export: Fix thread__exec_comm()
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 3de7ae0b2a1d86dbb23d0cb135150534fdb2e836 upstream.
+
+Threads synthesized from /proc have comms with a start time of zero, and
+not marked as "exec". Currently, there can be 2 such comms. The first is
+created by processing a synthesized fork event and is set to the
+parent's comm string, and the second by processing a synthesized comm
+event set to the thread's current comm string.
+
+In the absence of an "exec" comm, thread__exec_comm() picks the last
+(oldest) comm, which, in the case above, is the parent's comm string.
+For a main thread, that is very probably wrong. Use the second-to-last
+in that case.
+
+This affects only db-export because it is the only user of
+thread__exec_comm().
+
+Example:
+
+ $ sudo perf record -a -o pt-a-sleep-1 -e intel_pt//u -- sleep 1
+ $ sudo chown ahunter pt-a-sleep-1
+
+Before:
+
+ $ perf script -i pt-a-sleep-1 --itrace=bep -s tools/perf/scripts/python/export-to-sqlite.py pt-a-sleep-1.db branches calls
+ $ sqlite3 -header -column pt-a-sleep-1.db 'select * from comm_threads_view'
+ comm_id command thread_id pid tid
+ ---------- ---------- ---------- ---------- ----------
+ 1 swapper 1 0 0
+ 2 rcu_sched 2 10 10
+ 3 kthreadd 3 78 78
+ 5 sudo 4 15180 15180
+ 5 sudo 5 15180 15182
+ 7 kworker/4: 6 10335 10335
+ 8 kthreadd 7 55 55
+ 10 systemd 8 865 865
+ 10 systemd 9 865 875
+ 13 perf 10 15181 15181
+ 15 sleep 10 15181 15181
+ 16 kworker/3: 11 14179 14179
+ 17 kthreadd 12 29376 29376
+ 19 systemd 13 746 746
+ 21 systemd 14 401 401
+ 23 systemd 15 879 879
+ 23 systemd 16 879 945
+ 25 kthreadd 17 556 556
+ 27 kworker/u1 18 14136 14136
+ 28 kworker/u1 19 15021 15021
+ 29 kthreadd 20 509 509
+ 31 systemd 21 836 836
+ 31 systemd 22 836 967
+ 33 systemd 23 1148 1148
+ 33 systemd 24 1148 1163
+ 35 kworker/2: 25 17988 17988
+ 36 kworker/0: 26 13478 13478
+
+After:
+
+ $ perf script -i pt-a-sleep-1 --itrace=bep -s tools/perf/scripts/python/export-to-sqlite.py pt-a-sleep-1b.db branches calls
+ $ sqlite3 -header -column pt-a-sleep-1b.db 'select * from comm_threads_view'
+ comm_id command thread_id pid tid
+ ---------- ---------- ---------- ---------- ----------
+ 1 swapper 1 0 0
+ 2 rcu_sched 2 10 10
+ 3 kswapd0 3 78 78
+ 4 perf 4 15180 15180
+ 4 perf 5 15180 15182
+ 6 kworker/4: 6 10335 10335
+ 7 kcompactd0 7 55 55
+ 8 accounts-d 8 865 865
+ 8 accounts-d 9 865 875
+ 10 perf 10 15181 15181
+ 12 sleep 10 15181 15181
+ 13 kworker/3: 11 14179 14179
+ 14 kworker/1: 12 29376 29376
+ 15 haveged 13 746 746
+ 16 systemd-jo 14 401 401
+ 17 NetworkMan 15 879 879
+ 17 NetworkMan 16 879 945
+ 19 irq/131-iw 17 556 556
+ 20 kworker/u1 18 14136 14136
+ 21 kworker/u1 19 15021 15021
+ 22 kworker/u1 20 509 509
+ 23 thermald 21 836 836
+ 23 thermald 22 836 967
+ 25 unity-sett 23 1148 1148
+ 25 unity-sett 24 1148 1163
+ 27 kworker/2: 25 17988 17988
+ 28 kworker/0: 26 13478 13478
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 65de51f93ebf ("perf tools: Identify which comms are from exec")
+Link: http://lkml.kernel.org/r/20190808064823.14846-1-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/thread.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -160,14 +160,24 @@ struct comm *thread__comm(const struct t
+
+ struct comm *thread__exec_comm(const struct thread *thread)
+ {
+- struct comm *comm, *last = NULL;
++ struct comm *comm, *last = NULL, *second_last = NULL;
+
+ list_for_each_entry(comm, &thread->comm_list, list) {
+ if (comm->exec)
+ return comm;
++ second_last = last;
+ last = comm;
+ }
+
++ /*
++ * 'last' with no start time might be the parent's comm of a synthesized
++ * thread (created by processing a synthesized fork event). For a main
++ * thread, that is very probably wrong. Prefer a later comm to avoid
++ * that case.
++ */
++ if (second_last && !last->start && thread->pid_ == thread->tid)
++ return second_last;
++
+ return last;
+ }
+
--- /dev/null
+From 12a6d2940b5f02b4b9f71ce098e3bb02bc24a9ea Mon Sep 17 00:00:00 2001
+From: Thomas Richter <tmricht@linux.ibm.com>
+Date: Wed, 24 Jul 2019 14:27:02 +0200
+Subject: perf record: Fix module size on s390
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+commit 12a6d2940b5f02b4b9f71ce098e3bb02bc24a9ea upstream.
+
+On s390 the modules loaded in memory have the text segment located after
+the GOT and Relocation table. This can be seen with this output:
+
+ [root@m35lp76 perf]# fgrep qeth /proc/modules
+ qeth 151552 1 qeth_l2, Live 0x000003ff800b2000
+ ...
+ [root@m35lp76 perf]# cat /sys/module/qeth/sections/.text
+ 0x000003ff800b3990
+ [root@m35lp76 perf]#
+
+There is an offset of 0x1990 bytes. The size of the qeth module is
+151552 bytes (0x25000 in hex).
+
+The location of the GOT/relocation table at the beginning of a module is
+unique to s390.
+
+commit 203d8a4aa6ed ("perf s390: Fix 'start' address of module's map")
+adjusts the start address of a module in the map structures, but does
+not adjust the size of the modules. This leads to overlapping of module
+maps as this example shows:
+
+[root@m35lp76 perf] # ./perf report -D
+ 0 0 0xfb0 [0xa0]: PERF_RECORD_MMAP -1/0: [0x3ff800b3990(0x25000)
+ @ 0]: x /lib/modules/.../qeth.ko.xz
+ 0 0 0x1050 [0xb0]: PERF_RECORD_MMAP -1/0: [0x3ff800d85a0(0x8000)
+ @ 0]: x /lib/modules/.../ip6_tables.ko.xz
+
+The module qeth.ko has an adjusted start address modified to b3990, but
+its size is unchanged and the module ends at 0x3ff800d8990. This end
+address overlaps with the next modules start address of 0x3ff800d85a0.
+
+When the size of the leading GOT/Relocation table stored in the
+beginning of the text segment (0x1990 bytes) is subtracted from module
+qeth end address, there are no overlaps anymore:
+
+ 0x3ff800d8990 - 0x1990 = 0x0x3ff800d7000
+
+which is the same as
+
+ 0x3ff800b2000 + 0x25000 = 0x0x3ff800d7000.
+
+To fix this issue, also adjust the modules size in function
+arch__fix_module_text_start(). Add another function parameter named size
+and reduce the size of the module when the text segment start address is
+changed.
+
+Output after:
+ 0 0 0xfb0 [0xa0]: PERF_RECORD_MMAP -1/0: [0x3ff800b3990(0x23670)
+ @ 0]: x /lib/modules/.../qeth.ko.xz
+ 0 0 0x1050 [0xb0]: PERF_RECORD_MMAP -1/0: [0x3ff800d85a0(0x7a60)
+ @ 0]: x /lib/modules/.../ip6_tables.ko.xz
+
+Reported-by: Stefan Liebler <stli@linux.ibm.com>
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Hendrik Brueckner <brueckner@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: stable@vger.kernel.org
+Fixes: 203d8a4aa6ed ("perf s390: Fix 'start' address of module's map")
+Link: http://lkml.kernel.org/r/20190724122703.3996-1-tmricht@linux.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/arch/s390/util/machine.c | 14 +++++++++++++-
+ tools/perf/util/machine.c | 3 ++-
+ tools/perf/util/machine.h | 2 +-
+ 3 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/tools/perf/arch/s390/util/machine.c
++++ b/tools/perf/arch/s390/util/machine.c
+@@ -8,7 +8,7 @@
+ #include "debug.h"
+ #include "symbol.h"
+
+-int arch__fix_module_text_start(u64 *start, const char *name)
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
+ {
+ u64 m_start = *start;
+ char path[PATH_MAX];
+@@ -18,6 +18,18 @@ int arch__fix_module_text_start(u64 *sta
+ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
+ pr_debug2("Using module %s start:%#lx\n", path, m_start);
+ *start = m_start;
++ } else {
++ /* Successful read of the modules segment text start address.
++ * Calculate difference between module start address
++ * in memory and module text segment start address.
++ * For example module load address is 0x3ff8011b000
++ * (from /proc/modules) and module text segment start
++ * address is 0x3ff8011b870 (from file above).
++ *
++ * Adjust the module size and subtract the GOT table
++ * size located at the beginning of the module.
++ */
++ *size -= (*start - m_start);
+ }
+
+ return 0;
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1233,6 +1233,7 @@ static int machine__set_modules_path(str
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
+ }
+ int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
++ u64 *size __maybe_unused,
+ const char *name __maybe_unused)
+ {
+ return 0;
+@@ -1244,7 +1245,7 @@ static int machine__create_module(void *
+ struct machine *machine = arg;
+ struct map *map;
+
+- if (arch__fix_module_text_start(&start, name) < 0)
++ if (arch__fix_module_text_start(&start, &size, name) < 0)
+ return -1;
+
+ map = machine__findnew_module_map(machine, start, name);
+--- a/tools/perf/util/machine.h
++++ b/tools/perf/util/machine.h
+@@ -213,7 +213,7 @@ struct symbol *machine__find_kernel_func
+
+ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+ const char *filename);
+-int arch__fix_module_text_start(u64 *start, const char *name);
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
+
+ int __machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, bool no_kcore);
loop-set-pf_memalloc_noio-for-the-worker-thread.patch
input-synaptics-enable-rmi-mode-for-hp-spectre-x360.patch
lkdtm-support-llvm-objcopy.patch
+crypto-ccp-validate-buffer-lengths-for-copy-operations.patch
+crypto-ccp-add-support-for-valid-authsize-values-less-than-16.patch
+tcp-clear-sk_send_head-after-purging-the-write-queue.patch
+x86-mm-check-for-pfn-instead-of-page-in-vmalloc_sync_one.patch
+x86-mm-sync-also-unmappings-in-vmalloc_sync_all.patch
+mm-vmalloc-sync-unmappings-in-__purge_vmap_area_lazy.patch
+perf-annotate-fix-s390-gap-between-kernel-end-and-module-start.patch
+perf-db-export-fix-thread__exec_comm.patch
+perf-record-fix-module-size-on-s390.patch
+usb-host-xhci-rcar-fix-timeout-in-xhci_suspend.patch
+usb-yurex-fix-use-after-free-in-yurex_delete.patch
+can-rcar_canfd-fix-possible-irq-storm-on-high-load.patch
+can-peak_usb-fix-potential-double-kfree_skb.patch
--- /dev/null
+From ben@decadent.org.uk Tue Aug 13 20:28:54 2019
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Tue, 13 Aug 2019 12:53:17 +0100
+Subject: tcp: Clear sk_send_head after purging the write queue
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Cc: stable@vger.kernel.org, Denis Andzakovic <denis.andzakovic@pulsesecurity.co.nz>, Salvatore Bonaccorso <carnil@debian.org>, Eric Dumazet <edumazet@google.com>
+Message-ID: <20190813115317.6cgml2mckd3c6u7z@decadent.org.uk>
+Content-Disposition: inline
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+Denis Andzakovic discovered a potential use-after-free in older kernel
+versions, using syzkaller. tcp_write_queue_purge() frees all skbs in
+the TCP write queue and can leave sk->sk_send_head pointing to freed
+memory. tcp_disconnect() clears that pointer after calling
+tcp_write_queue_purge(), but tcp_connect() does not. It is
+(surprisingly) possible to add to the write queue between
+disconnection and reconnection, so this needs to be done in both
+places.
+
+This bug was introduced by backports of commit 7f582b248d0a ("tcp:
+purge write queue in tcp_connect_init()") and does not exist upstream
+because of earlier changes in commit 75c119afe14f ("tcp: implement
+rb-tree based retransmit queue"). The latter is a major change that's
+not suitable for stable.
+
+Reported-by: Denis Andzakovic <denis.andzakovic@pulsesecurity.co.nz>
+Bisected-by: Salvatore Bonaccorso <carnil@debian.org>
+Fixes: 7f582b248d0a ("tcp: purge write queue in tcp_connect_init()")
+Cc: <stable@vger.kernel.org> # before 4.15
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1613,6 +1613,8 @@ static inline void tcp_init_send_head(st
+ sk->sk_send_head = NULL;
+ }
+
++static inline void tcp_init_send_head(struct sock *sk);
++
+ /* write queue abstraction */
+ static inline void tcp_write_queue_purge(struct sock *sk)
+ {
+@@ -1621,6 +1623,7 @@ static inline void tcp_write_queue_purge
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+ while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
+ sk_wmem_free_skb(sk, skb);
++ tcp_init_send_head(sk);
+ sk_mem_reclaim(sk);
+ tcp_clear_all_retrans_hints(tcp_sk(sk));
+ tcp_init_send_head(sk);
--- /dev/null
+From 783bda5e41acc71f98336e1a402c180f9748e5dc Mon Sep 17 00:00:00 2001
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Date: Fri, 2 Aug 2019 17:33:35 +0900
+Subject: usb: host: xhci-rcar: Fix timeout in xhci_suspend()
+
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+commit 783bda5e41acc71f98336e1a402c180f9748e5dc upstream.
+
+When a USB device is connected to the host controller and
+the system enters suspend, the following error happens
+in xhci_suspend():
+
+ xhci-hcd ee000000.usb: WARN: xHC CMD_RUN timeout
+
+Since the firmware/internal CPU control the USBSTS.STS_HALT
+and the process speed is down when the roothub port enters U3,
+long delay for the handshake of STS_HALT is neeed in xhci_suspend().
+So, this patch adds to set the XHCI_SLOW_SUSPEND.
+
+Fixes: 435cc1138ec9 ("usb: host: xhci-plat: set resume_quirk() for R-Car controllers")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Link: https://lore.kernel.org/r/1564734815-17964-1-git-send-email-yoshihiro.shimoda.uh@renesas.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci-rcar.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -231,10 +231,15 @@ int xhci_rcar_init_quirk(struct usb_hcd
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
++ *
++ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
++ * and the process speed is down when the roothub port enters U3,
++ * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
+ */
+ if (xhci_rcar_is_gen2(hcd->self.controller) ||
+- xhci_rcar_is_gen3(hcd->self.controller))
+- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
++ xhci_rcar_is_gen3(hcd->self.controller)) {
++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
++ }
+
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ return xhci_rcar_download_firmware(hcd);
--- /dev/null
+From fc05481b2fcabaaeccf63e32ac1baab54e5b6963 Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Mon, 5 Aug 2019 12:15:28 +0100
+Subject: usb: yurex: Fix use-after-free in yurex_delete
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit fc05481b2fcabaaeccf63e32ac1baab54e5b6963 upstream.
+
+syzbot reported the following crash [0]:
+
+BUG: KASAN: use-after-free in usb_free_coherent+0x79/0x80
+drivers/usb/core/usb.c:928
+Read of size 8 at addr ffff8881b18599c8 by task syz-executor.4/16007
+
+CPU: 0 PID: 16007 Comm: syz-executor.4 Not tainted 5.3.0-rc2+ #23
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0xca/0x13e lib/dump_stack.c:113
+ print_address_description+0x6a/0x32c mm/kasan/report.c:351
+ __kasan_report.cold+0x1a/0x33 mm/kasan/report.c:482
+ kasan_report+0xe/0x12 mm/kasan/common.c:612
+ usb_free_coherent+0x79/0x80 drivers/usb/core/usb.c:928
+ yurex_delete+0x138/0x330 drivers/usb/misc/yurex.c:100
+ kref_put include/linux/kref.h:65 [inline]
+ yurex_release+0x66/0x90 drivers/usb/misc/yurex.c:392
+ __fput+0x2d7/0x840 fs/file_table.c:280
+ task_work_run+0x13f/0x1c0 kernel/task_work.c:113
+ tracehook_notify_resume include/linux/tracehook.h:188 [inline]
+ exit_to_usermode_loop+0x1d2/0x200 arch/x86/entry/common.c:163
+ prepare_exit_to_usermode arch/x86/entry/common.c:194 [inline]
+ syscall_return_slowpath arch/x86/entry/common.c:274 [inline]
+ do_syscall_64+0x45f/0x580 arch/x86/entry/common.c:299
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x413511
+Code: 75 14 b8 03 00 00 00 0f 05 48 3d 01 f0 ff ff 0f 83 04 1b 00 00 c3 48
+83 ec 08 e8 0a fc ff ff 48 89 04 24 b8 03 00 00 00 0f 05 <48> 8b 3c 24 48
+89 c2 e8 53 fc ff ff 48 89 d0 48 83 c4 08 48 3d 01
+RSP: 002b:00007ffc424ea2e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000003
+RAX: 0000000000000000 RBX: 0000000000000007 RCX: 0000000000413511
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000006
+RBP: 0000000000000001 R08: 0000000029a2fc22 R09: 0000000029a2fc26
+R10: 00007ffc424ea3c0 R11: 0000000000000293 R12: 000000000075c9a0
+R13: 000000000075c9a0 R14: 0000000000761938 R15: ffffffffffffffff
+
+Allocated by task 2776:
+ save_stack+0x1b/0x80 mm/kasan/common.c:69
+ set_track mm/kasan/common.c:77 [inline]
+ __kasan_kmalloc mm/kasan/common.c:487 [inline]
+ __kasan_kmalloc.constprop.0+0xbf/0xd0 mm/kasan/common.c:460
+ kmalloc include/linux/slab.h:552 [inline]
+ kzalloc include/linux/slab.h:748 [inline]
+ usb_alloc_dev+0x51/0xf95 drivers/usb/core/usb.c:583
+ hub_port_connect drivers/usb/core/hub.c:5004 [inline]
+ hub_port_connect_change drivers/usb/core/hub.c:5213 [inline]
+ port_event drivers/usb/core/hub.c:5359 [inline]
+ hub_event+0x15c0/0x3640 drivers/usb/core/hub.c:5441
+ process_one_work+0x92b/0x1530 kernel/workqueue.c:2269
+ worker_thread+0x96/0xe20 kernel/workqueue.c:2415
+ kthread+0x318/0x420 kernel/kthread.c:255
+ ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352
+
+Freed by task 16007:
+ save_stack+0x1b/0x80 mm/kasan/common.c:69
+ set_track mm/kasan/common.c:77 [inline]
+ __kasan_slab_free+0x130/0x180 mm/kasan/common.c:449
+ slab_free_hook mm/slub.c:1423 [inline]
+ slab_free_freelist_hook mm/slub.c:1470 [inline]
+ slab_free mm/slub.c:3012 [inline]
+ kfree+0xe4/0x2f0 mm/slub.c:3953
+ device_release+0x71/0x200 drivers/base/core.c:1064
+ kobject_cleanup lib/kobject.c:693 [inline]
+ kobject_release lib/kobject.c:722 [inline]
+ kref_put include/linux/kref.h:65 [inline]
+ kobject_put+0x171/0x280 lib/kobject.c:739
+ put_device+0x1b/0x30 drivers/base/core.c:2213
+ usb_put_dev+0x1f/0x30 drivers/usb/core/usb.c:725
+ yurex_delete+0x40/0x330 drivers/usb/misc/yurex.c:95
+ kref_put include/linux/kref.h:65 [inline]
+ yurex_release+0x66/0x90 drivers/usb/misc/yurex.c:392
+ __fput+0x2d7/0x840 fs/file_table.c:280
+ task_work_run+0x13f/0x1c0 kernel/task_work.c:113
+ tracehook_notify_resume include/linux/tracehook.h:188 [inline]
+ exit_to_usermode_loop+0x1d2/0x200 arch/x86/entry/common.c:163
+ prepare_exit_to_usermode arch/x86/entry/common.c:194 [inline]
+ syscall_return_slowpath arch/x86/entry/common.c:274 [inline]
+ do_syscall_64+0x45f/0x580 arch/x86/entry/common.c:299
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+The buggy address belongs to the object at ffff8881b1859980
+ which belongs to the cache kmalloc-2k of size 2048
+The buggy address is located 72 bytes inside of
+ 2048-byte region [ffff8881b1859980, ffff8881b185a180)
+The buggy address belongs to the page:
+page:ffffea0006c61600 refcount:1 mapcount:0 mapping:ffff8881da00c000
+index:0x0 compound_mapcount: 0
+flags: 0x200000000010200(slab|head)
+raw: 0200000000010200 0000000000000000 0000000100000001 ffff8881da00c000
+raw: 0000000000000000 00000000000f000f 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff8881b1859880: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8881b1859900: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+> ffff8881b1859980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff8881b1859a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8881b1859a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+==================================================================
+
+A quick look at the yurex_delete() shows that we drop the reference
+to the usb_device before releasing any buffers associated with the
+device. Delay the reference drop until we have finished the cleanup.
+
+[0] https://lore.kernel.org/lkml/0000000000003f86d8058f0bd671@google.com/
+
+Fixes: 6bc235a2e24a5e ("USB: add driver for Meywa-Denki & Kayac YUREX")
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tomoki Sekiyama <tomoki.sekiyama@gmail.com>
+Cc: Oliver Neukum <oneukum@suse.com>
+Cc: andreyknvl@google.com
+Cc: gregkh@linuxfoundation.org
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: syzkaller-bugs@googlegroups.com
+Cc: dtor@chromium.org
+Reported-by: syzbot+d1fedb1c1fdb07fca507@syzkaller.appspotmail.com
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: stable <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20190805111528.6758-1-suzuki.poulose@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/misc/yurex.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -96,7 +96,6 @@ static void yurex_delete(struct kref *kr
+
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+- usb_put_dev(dev->udev);
+ if (dev->cntl_urb) {
+ usb_kill_urb(dev->cntl_urb);
+ kfree(dev->cntl_req);
+@@ -112,6 +111,7 @@ static void yurex_delete(struct kref *kr
+ dev->int_buffer, dev->urb->transfer_dma);
+ usb_free_urb(dev->urb);
+ }
++ usb_put_dev(dev->udev);
+ kfree(dev);
+ }
+
--- /dev/null
+From 51b75b5b563a2637f9d8dc5bd02a31b2ff9e5ea0 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 19 Jul 2019 20:46:50 +0200
+Subject: x86/mm: Check for pfn instead of page in vmalloc_sync_one()
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 51b75b5b563a2637f9d8dc5bd02a31b2ff9e5ea0 upstream.
+
+Do not require a struct page for the mapped memory location because it
+might not exist. This can happen when an ioremapped region is mapped with
+2MB pages.
+
+Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lkml.kernel.org/r/20190719184652.11391-2-joro@8bytes.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/fault.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -266,7 +266,7 @@ static inline pmd_t *vmalloc_sync_one(pg
+ if (!pmd_present(*pmd))
+ set_pmd(pmd, *pmd_k);
+ else
+- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
+
+ return pmd_k;
+ }
--- /dev/null
+From 8e998fc24de47c55b47a887f6c95ab91acd4a720 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 19 Jul 2019 20:46:51 +0200
+Subject: x86/mm: Sync also unmappings in vmalloc_sync_all()
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 8e998fc24de47c55b47a887f6c95ab91acd4a720 upstream.
+
+With huge-page ioremap areas the unmappings also need to be synced between
+all page-tables. Otherwise it can cause data corruption when a region is
+unmapped and later re-used.
+
+Make the vmalloc_sync_one() function ready to sync unmappings and make sure
+vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD
+is found.
+
+Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/fault.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -260,11 +260,12 @@ static inline pmd_t *vmalloc_sync_one(pg
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+- if (!pmd_present(*pmd_k))
+- return NULL;
+
+- if (!pmd_present(*pmd))
++ if (pmd_present(*pmd) != pmd_present(*pmd_k))
+ set_pmd(pmd, *pmd_k);
++
++ if (!pmd_present(*pmd_k))
++ return NULL;
+ else
+ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
+
+@@ -286,17 +287,13 @@ void vmalloc_sync_all(void)
+ spin_lock(&pgd_lock);
+ list_for_each_entry(page, &pgd_list, lru) {
+ spinlock_t *pgt_lock;
+- pmd_t *ret;
+
+ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+- ret = vmalloc_sync_one(page_address(page), address);
++ vmalloc_sync_one(page_address(page), address);
+ spin_unlock(pgt_lock);
+-
+- if (!ret)
+- break;
+ }
+ spin_unlock(&pgd_lock);
+ }