--- /dev/null
+From 730766bae3280a25d40ea76a53dc6342e84e6513 Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Thu, 20 Jun 2019 16:12:36 -0600
+Subject: coresight: etb10: Do not call smp_processor_id from preemptible
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 730766bae3280a25d40ea76a53dc6342e84e6513 upstream.
+
+During a perf session we try to allocate buffers on the "node" associated
+with the CPU the event is bound to. If it is not bound to a CPU, we
+use the current CPU node, using smp_processor_id(). However this is unsafe
+in a pre-emptible context and could generate the splats as below :
+
+ BUG: using smp_processor_id() in preemptible [00000000] code: perf/2544
+
+Use NUMA_NO_NODE hint instead of using the current node for events
+not bound to CPUs.
+
+Fixes: 2997aa4063d97fdb39 ("coresight: etb10: implementing AUX API")
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: stable <stable@vger.kernel.org> # 4.6+
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Link: https://lore.kernel.org/r/20190620221237.3536-5-mathieu.poirier@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/hwtracing/coresight/coresight-etb10.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/hwtracing/coresight/coresight-etb10.c
++++ b/drivers/hwtracing/coresight/coresight-etb10.c
+@@ -275,9 +275,7 @@ static void *etb_alloc_buffer(struct cor
+ int node;
+ struct cs_buffers *buf;
+
+- if (cpu == -1)
+- cpu = smp_processor_id();
+- node = cpu_to_node(cpu);
++ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
+
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+ if (!buf)
--- /dev/null
+From 024c1fd9dbcc1d8a847f1311f999d35783921b7f Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Thu, 20 Jun 2019 16:12:35 -0600
+Subject: coresight: tmc-etf: Do not call smp_processor_id from preemptible
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 024c1fd9dbcc1d8a847f1311f999d35783921b7f upstream.
+
+During a perf session we try to allocate buffers on the "node" associated
+with the CPU the event is bound to. If it is not bound to a CPU, we
+use the current CPU node, using smp_processor_id(). However this is unsafe
+in a pre-emptible context and could generate the splats as below :
+
+ BUG: using smp_processor_id() in preemptible [00000000] code: perf/2544
+ caller is tmc_alloc_etf_buffer+0x5c/0x60
+ CPU: 2 PID: 2544 Comm: perf Not tainted 5.1.0-rc6-147786-g116841e #344
+ Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development Platform, BIOS EDK II Feb 1 2019
+ Call trace:
+ dump_backtrace+0x0/0x150
+ show_stack+0x14/0x20
+ dump_stack+0x9c/0xc4
+ debug_smp_processor_id+0x10c/0x110
+ tmc_alloc_etf_buffer+0x5c/0x60
+ etm_setup_aux+0x1c4/0x230
+ rb_alloc_aux+0x1b8/0x2b8
+ perf_mmap+0x35c/0x478
+ mmap_region+0x34c/0x4f0
+ do_mmap+0x2d8/0x418
+ vm_mmap_pgoff+0xd0/0xf8
+ ksys_mmap_pgoff+0x88/0xf8
+ __arm64_sys_mmap+0x28/0x38
+ el0_svc_handler+0xd8/0x138
+ el0_svc+0x8/0xc
+
+Use NUMA_NO_NODE hint instead of using the current node for events
+not bound to CPUs.
+
+Fixes: 2e499bbc1a929ac ("coresight: tmc: implementing TMC-ETF AUX space API")
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: stable <stable@vger.kernel.org> # 4.7+
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Link: https://lore.kernel.org/r/20190620221237.3536-4-mathieu.poirier@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwtracing/coresight/coresight-tmc-etf.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+@@ -304,9 +304,7 @@ static void *tmc_alloc_etf_buffer(struct
+ int node;
+ struct cs_buffers *buf;
+
+- if (cpu == -1)
+- cpu = smp_processor_id();
+- node = cpu_to_node(cpu);
++ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
+
+ /* Allocate memory structure for interaction with Perf */
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
--- /dev/null
+From 504582e8e40b90b8f8c58783e2d1e4f6a2b71a3a Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Sat, 5 Oct 2019 11:11:10 +0200
+Subject: crypto: geode-aes - switch to skcipher for cbc(aes) fallback
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 504582e8e40b90b8f8c58783e2d1e4f6a2b71a3a upstream.
+
+Commit 79c65d179a40e145 ("crypto: cbc - Convert to skcipher") updated
+the generic CBC template wrapper from a blkcipher to a skcipher algo,
+to get away from the deprecated blkcipher interface. However, as a side
+effect, drivers that instantiate CBC transforms using the blkcipher as
+a fallback no longer work, since skciphers can wrap blkciphers but not
+the other way around. This broke the geode-aes driver.
+
+So let's fix it by moving to the sync skcipher interface when allocating
+the fallback. At the same time, align with the generic API for ECB and
+CBC by rejecting inputs that are not a multiple of the AES block size.
+
+Fixes: 79c65d179a40e145 ("crypto: cbc - Convert to skcipher")
+Cc: <stable@vger.kernel.org> # v4.20+ ONLY
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Florian Bezdeka <florian@bezdeka.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Florian Bezdeka <florian@bezdeka.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/geode-aes.c | 57 ++++++++++++++++++++++++++-------------------
+ drivers/crypto/geode-aes.h | 2 -
+ 2 files changed, 35 insertions(+), 24 deletions(-)
+
+--- a/drivers/crypto/geode-aes.c
++++ b/drivers/crypto/geode-aes.c
+@@ -14,6 +14,7 @@
+ #include <linux/spinlock.h>
+ #include <crypto/algapi.h>
+ #include <crypto/aes.h>
++#include <crypto/skcipher.h>
+
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -170,13 +171,15 @@ static int geode_setkey_blk(struct crypt
+ /*
+ * The requested key size is not supported by HW, do a fallback
+ */
+- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
++ crypto_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK);
++ crypto_skcipher_set_flags(op->fallback.blk,
++ tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+
+- ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
++ ret = crypto_skcipher_setkey(op->fallback.blk, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+- tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
++ tfm->crt_flags |= crypto_skcipher_get_flags(op->fallback.blk) &
++ CRYPTO_TFM_RES_MASK;
+ }
+ return ret;
+ }
+@@ -185,33 +188,28 @@ static int fallback_blk_dec(struct blkci
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- unsigned int ret;
+- struct crypto_blkcipher *tfm;
+ struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
++ SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
+
+- tfm = desc->tfm;
+- desc->tfm = op->fallback.blk;
+-
+- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
++ skcipher_request_set_tfm(req, op->fallback.blk);
++ skcipher_request_set_callback(req, 0, NULL, NULL);
++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+
+- desc->tfm = tfm;
+- return ret;
++ return crypto_skcipher_decrypt(req);
+ }
++
+ static int fallback_blk_enc(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- unsigned int ret;
+- struct crypto_blkcipher *tfm;
+ struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
++ SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
+
+- tfm = desc->tfm;
+- desc->tfm = op->fallback.blk;
+-
+- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
++ skcipher_request_set_tfm(req, op->fallback.blk);
++ skcipher_request_set_callback(req, 0, NULL, NULL);
++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+
+- desc->tfm = tfm;
+- return ret;
++ return crypto_skcipher_encrypt(req);
+ }
+
+ static void
+@@ -311,6 +309,9 @@ geode_cbc_decrypt(struct blkcipher_desc
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+@@ -343,6 +344,9 @@ geode_cbc_encrypt(struct blkcipher_desc
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+@@ -370,8 +374,9 @@ static int fallback_init_blk(struct cryp
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+- op->fallback.blk = crypto_alloc_blkcipher(name, 0,
+- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
++ op->fallback.blk = crypto_alloc_skcipher(name, 0,
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(op->fallback.blk)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+@@ -385,7 +390,7 @@ static void fallback_exit_blk(struct cry
+ {
+ struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+- crypto_free_blkcipher(op->fallback.blk);
++ crypto_free_skcipher(op->fallback.blk);
+ op->fallback.blk = NULL;
+ }
+
+@@ -424,6 +429,9 @@ geode_ecb_decrypt(struct blkcipher_desc
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+@@ -454,6 +462,9 @@ geode_ecb_encrypt(struct blkcipher_desc
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+--- a/drivers/crypto/geode-aes.h
++++ b/drivers/crypto/geode-aes.h
+@@ -64,7 +64,7 @@ struct geode_aes_op {
+ u8 *iv;
+
+ union {
+- struct crypto_blkcipher *blk;
++ struct crypto_skcipher *blk;
+ struct crypto_cipher *cip;
+ } fallback;
+ u32 keylen;
--- /dev/null
+From e5e884b42639c74b5b57dc277909915c0aefc8bb Mon Sep 17 00:00:00 2001
+From: Wen Huang <huangwenabc@gmail.com>
+Date: Thu, 28 Nov 2019 18:51:04 +0800
+Subject: libertas: Fix two buffer overflows at parsing bss descriptor
+
+From: Wen Huang <huangwenabc@gmail.com>
+
+commit e5e884b42639c74b5b57dc277909915c0aefc8bb upstream.
+
+add_ie_rates() copys rates without checking the length
+in bss descriptor from remote AP.when victim connects to
+remote attacker, this may trigger buffer overflow.
+lbs_ibss_join_existing() copys rates without checking the length
+in bss descriptor from remote IBSS node.when victim connects to
+remote attacker, this may trigger buffer overflow.
+Fix them by putting the length check before performing copy.
+
+This fix addresses CVE-2019-14896 and CVE-2019-14897.
+This also fix build warning of mixed declarations and code.
+
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Wen Huang <huangwenabc@gmail.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/marvell/libertas/cfg.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/marvell/libertas/cfg.c
++++ b/drivers/net/wireless/marvell/libertas/cfg.c
+@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int
+ int hw, ap, ap_max = ie[1];
+ u8 hw_rate;
+
++ if (ap_max > MAX_RATES) {
++ lbs_deb_assoc("invalid rates\n");
++ return tlv;
++ }
+ /* Advance past IE header */
+ ie += 2;
+
+@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct
+ struct cmd_ds_802_11_ad_hoc_join cmd;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
+ int ret = 0;
++ int hw, i;
++ u8 rates_max;
++ u8 *rates;
+
+ /* TODO: set preamble based on scan result */
+ ret = lbs_set_radio(priv, preamble, 1);
+@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct
+ if (!rates_eid) {
+ lbs_add_rates(cmd.bss.rates);
+ } else {
+- int hw, i;
+- u8 rates_max = rates_eid[1];
+- u8 *rates = cmd.bss.rates;
++ rates_max = rates_eid[1];
++ if (rates_max > MAX_RATES) {
++ lbs_deb_join("invalid rates");
++ goto out;
++ }
++ rates = cmd.bss.rates;
+ for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
+ u8 hw_rate = lbs_rates[hw].bitrate / 5;
+ for (i = 0; i < rates_max; i++) {
--- /dev/null
+From ee8951e56c0f960b9621636603a822811cef3158 Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Date: Sun, 10 Nov 2019 07:27:04 +0100
+Subject: media: v4l2-ioctl.c: zero reserved fields for S/TRY_FMT
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+commit ee8951e56c0f960b9621636603a822811cef3158 upstream.
+
+v4l2_vbi_format, v4l2_sliced_vbi_format and v4l2_sdr_format
+have a reserved array at the end that should be zeroed by drivers
+as per the V4L2 spec. Older drivers often do not do this, so just
+handle this in the core.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/v4l2-core/v4l2-ioctl.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -1548,12 +1548,12 @@ static int v4l_s_fmt(const struct v4l2_i
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.vbi);
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
+ return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sliced);
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_vid_out))
+@@ -1576,22 +1576,22 @@ static int v4l_s_fmt(const struct v4l2_i
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_vbi_out))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.vbi);
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
+ return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sliced);
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sdr);
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
+ return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_sdr_out))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sdr);
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
+ return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_meta_cap))
+@@ -1635,12 +1635,12 @@ static int v4l_try_fmt(const struct v4l2
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.vbi);
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
+ return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sliced);
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_vid_out))
+@@ -1663,22 +1663,22 @@ static int v4l_try_fmt(const struct v4l2
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_vbi_out))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.vbi);
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
+ return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sliced);
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sdr);
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
+ return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_sdr_out))
+ break;
+- CLEAR_AFTER_FIELD(p, fmt.sdr);
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
+ return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_meta_cap))
--- /dev/null
+From 865ad2f2201dc18685ba2686f13217f8b3a9c52c Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Add mutual exclusion for accessing shared state
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 865ad2f2201dc18685ba2686f13217f8b3a9c52c upstream.
+
+The netif_stop_queue() call in sonic_send_packet() races with the
+netif_wake_queue() call in sonic_interrupt(). This causes issues
+like "NETDEV WATCHDOG: eth0 (macsonic): transmit queue 0 timed out".
+Fix this by disabling interrupts when accessing tx_skb[] and next_tx.
+Update a comment to clarify the synchronization properties.
+
+Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 49 +++++++++++++++++++++++++----------
+ drivers/net/ethernet/natsemi/sonic.h | 1
+ 2 files changed, 36 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -63,6 +63,8 @@ static int sonic_open(struct net_device
+
+ netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
+
++ spin_lock_init(&lp->lock);
++
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+ if (skb == NULL) {
+@@ -205,8 +207,6 @@ static void sonic_tx_timeout(struct net_
+ * wake the tx queue
+ * Concurrently with all of this, the SONIC is potentially writing to
+ * the status flags of the TDs.
+- * Until some mutual exclusion is added, this code will not work with SMP. However,
+- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
+ */
+
+ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
+@@ -214,7 +214,8 @@ static int sonic_send_packet(struct sk_b
+ struct sonic_local *lp = netdev_priv(dev);
+ dma_addr_t laddr;
+ int length;
+- int entry = lp->next_tx;
++ int entry;
++ unsigned long flags;
+
+ netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
+
+@@ -236,6 +237,10 @@ static int sonic_send_packet(struct sk_b
+ return NETDEV_TX_OK;
+ }
+
++ spin_lock_irqsave(&lp->lock, flags);
++
++ entry = lp->next_tx;
++
+ sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
+ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
+@@ -245,10 +250,6 @@ static int sonic_send_packet(struct sk_b
+ sonic_tda_put(dev, entry, SONIC_TD_LINK,
+ sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
+
+- /*
+- * Must set tx_skb[entry] only after clearing status, and
+- * before clearing EOL and before stopping queue
+- */
+ wmb();
+ lp->tx_len[entry] = length;
+ lp->tx_laddr[entry] = laddr;
+@@ -271,6 +272,8 @@ static int sonic_send_packet(struct sk_b
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+
++ spin_unlock_irqrestore(&lp->lock, flags);
++
+ return NETDEV_TX_OK;
+ }
+
+@@ -283,9 +286,21 @@ static irqreturn_t sonic_interrupt(int i
+ struct net_device *dev = dev_id;
+ struct sonic_local *lp = netdev_priv(dev);
+ int status;
++ unsigned long flags;
++
++ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
++ * with sonic_send_packet() so that the two functions can share state.
++ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
++ * by macsonic which must use two IRQs with different priority levels.
++ */
++ spin_lock_irqsave(&lp->lock, flags);
++
++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
++ if (!status) {
++ spin_unlock_irqrestore(&lp->lock, flags);
+
+- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
+ return IRQ_NONE;
++ }
+
+ do {
+ if (status & SONIC_INT_PKTRX) {
+@@ -299,11 +314,12 @@ static irqreturn_t sonic_interrupt(int i
+ int td_status;
+ int freed_some = 0;
+
+- /* At this point, cur_tx is the index of a TD that is one of:
+- * unallocated/freed (status set & tx_skb[entry] clear)
+- * allocated and sent (status set & tx_skb[entry] set )
+- * allocated and not yet sent (status clear & tx_skb[entry] set )
+- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
++ /* The state of a Transmit Descriptor may be inferred
++ * from { tx_skb[entry], td_status } as follows.
++ * { clear, clear } => the TD has never been used
++ * { set, clear } => the TD was handed to SONIC
++ * { set, set } => the TD was handed back
++ * { clear, set } => the TD is available for re-use
+ */
+
+ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
+@@ -405,7 +421,12 @@ static irqreturn_t sonic_interrupt(int i
+ /* load CAM done */
+ if (status & SONIC_INT_LCD)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
+- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
++
++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
++ } while (status);
++
++ spin_unlock_irqrestore(&lp->lock, flags);
++
+ return IRQ_HANDLED;
+ }
+
+--- a/drivers/net/ethernet/natsemi/sonic.h
++++ b/drivers/net/ethernet/natsemi/sonic.h
+@@ -322,6 +322,7 @@ struct sonic_local {
+ int msg_enable;
+ struct device *device; /* generic device */
+ struct net_device_stats stats;
++ spinlock_t lock;
+ };
+
+ #define TX_TIMEOUT (3 * HZ)
--- /dev/null
+From eaabfd19b2c787bbe88dc32424b9a43d67293422 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Avoid needless receive descriptor EOL flag updates
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit eaabfd19b2c787bbe88dc32424b9a43d67293422 upstream.
+
+The while loop in sonic_rx() traverses the rx descriptor ring. It stops
+when it reaches a descriptor that the SONIC has not used. Each iteration
+advances the EOL flag so the SONIC can keep using more descriptors.
+Therefore, the while loop has no definite termination condition.
+
+The algorithm described in the National Semiconductor literature is quite
+different. It consumes descriptors up to the one with its EOL flag set
+(which will also have its "in use" flag set). All freed descriptors are
+then returned to the ring at once, by adjusting the EOL flags (and link
+pointers).
+
+Adopt the algorithm from datasheet as it's simpler, terminates quickly
+and avoids a lot of pointless descriptor EOL flag changes.
+
+Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -435,6 +435,7 @@ static void sonic_rx(struct net_device *
+ struct sonic_local *lp = netdev_priv(dev);
+ int status;
+ int entry = lp->cur_rx;
++ int prev_entry = lp->eol_rx;
+
+ while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
+ struct sk_buff *used_skb;
+@@ -515,13 +516,21 @@ static void sonic_rx(struct net_device *
+ /*
+ * give back the descriptor
+ */
+- sonic_rda_put(dev, entry, SONIC_RD_LINK,
+- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
+- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
+- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
+- lp->eol_rx = entry;
+- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
++
++ prev_entry = entry;
++ entry = (entry + 1) & SONIC_RDS_MASK;
++ }
++
++ lp->cur_rx = entry;
++
++ if (prev_entry != lp->eol_rx) {
++ /* Advance the EOL flag to put descriptors back into service */
++ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
++ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
++ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
++ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
++ lp->eol_rx = prev_entry;
+ }
+ /*
+ * If any worth-while packets have been received, netif_rx()
--- /dev/null
+From 5fedabf5a70be26b19d7520f09f12a62274317c6 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Clear interrupt flags immediately
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 5fedabf5a70be26b19d7520f09f12a62274317c6 upstream.
+
+The chip can change a packet's descriptor status flags at any time.
+However, an active interrupt flag gets cleared rather late. This
+allows a race condition that could theoretically lose an interrupt.
+Fix this by clearing asserted interrupt flags immediately.
+
+Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 28 ++++++----------------------
+ 1 file changed, 6 insertions(+), 22 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -303,10 +303,11 @@ static irqreturn_t sonic_interrupt(int i
+ }
+
+ do {
++ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
++
+ if (status & SONIC_INT_PKTRX) {
+ netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
+ sonic_rx(dev); /* got packet(s) */
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
+ }
+
+ if (status & SONIC_INT_TXDN) {
+@@ -361,7 +362,6 @@ static irqreturn_t sonic_interrupt(int i
+ if (freed_some || lp->tx_skb[entry] == NULL)
+ netif_wake_queue(dev); /* The ring is no longer full */
+ lp->cur_tx = entry;
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
+ }
+
+ /*
+@@ -371,42 +371,31 @@ static irqreturn_t sonic_interrupt(int i
+ netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
+ __func__);
+ lp->stats.rx_fifo_errors++;
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_RDE) {
+ netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
+ __func__);
+ lp->stats.rx_dropped++;
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_RBAE) {
+ netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
+ __func__);
+ lp->stats.rx_dropped++;
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
+ }
+
+ /* counter overruns; all counters are 16bit wide */
+- if (status & SONIC_INT_FAE) {
++ if (status & SONIC_INT_FAE)
+ lp->stats.rx_frame_errors += 65536;
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
+- }
+- if (status & SONIC_INT_CRC) {
++ if (status & SONIC_INT_CRC)
+ lp->stats.rx_crc_errors += 65536;
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
+- }
+- if (status & SONIC_INT_MP) {
++ if (status & SONIC_INT_MP)
+ lp->stats.rx_missed_errors += 65536;
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
+- }
+
+ /* transmit error */
+- if (status & SONIC_INT_TXER) {
++ if (status & SONIC_INT_TXER)
+ if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
+ netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
+ __func__);
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+- }
+
+ /* bus retry */
+ if (status & SONIC_INT_BR) {
+@@ -415,13 +404,8 @@ static irqreturn_t sonic_interrupt(int i
+ /* ... to help debug DMA problems causing endless interrupts. */
+ /* Bounce the eth interface to turn on the interrupt again. */
+ SONIC_WRITE(SONIC_IMR, 0);
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
+ }
+
+- /* load CAM done */
+- if (status & SONIC_INT_LCD)
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
+-
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ } while (status);
+
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
-@@ -549,6 +549,8 @@ static void sonic_multicast_list(struct
+@@ -633,6 +633,8 @@ static void sonic_multicast_list(struct
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
-@@ -562,9 +564,14 @@ static void sonic_multicast_list(struct
+@@ -646,9 +648,14 @@ static void sonic_multicast_list(struct
i++;
}
SONIC_WRITE(SONIC_CDC, 16);
}
}
-@@ -591,6 +598,9 @@ static int sonic_init(struct net_device
+@@ -674,6 +681,9 @@ static int sonic_init(struct net_device
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
/*
* clear software reset flag, disable receiver, clear and
* enable interrupts, then completely initialize the SONIC
-@@ -712,14 +722,7 @@ static int sonic_init(struct net_device
+@@ -784,14 +794,7 @@ static int sonic_init(struct net_device
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
--- /dev/null
+From 27e0c31c5f27c1d1a1d9d135c123069f60dcf97b Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Fix command register usage
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 27e0c31c5f27c1d1a1d9d135c123069f60dcf97b upstream.
+
+There are several issues relating to command register usage during
+chip initialization.
+
+Firstly, the SONIC sometimes comes out of software reset with the
+Start Timer bit set. This gets logged as,
+
+ macsonic macsonic eth0: sonic_init: status=24, i=101
+
+Avoid this by giving the Stop Timer command earlier than later.
+
+Secondly, the loop that waits for the Read RRA command to complete has
+the break condition inverted. That's why the for loop iterates until
+its termination condition. Call the helper for this instead.
+
+Finally, give the Receiver Enable command after clearing interrupts,
+not before, to avoid the possibility of losing an interrupt.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 18 +++---------------
+ 1 file changed, 3 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -663,7 +663,6 @@ static void sonic_multicast_list(struct
+ */
+ static int sonic_init(struct net_device *dev)
+ {
+- unsigned int cmd;
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
+
+@@ -680,7 +679,7 @@ static int sonic_init(struct net_device
+ * enable interrupts, then completely initialize the SONIC
+ */
+ SONIC_WRITE(SONIC_CMD, 0);
+- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
+ /*
+@@ -710,14 +709,7 @@ static int sonic_init(struct net_device
+ netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
+- i = 0;
+- while (i++ < 100) {
+- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
+- break;
+- }
+-
+- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
+- SONIC_READ(SONIC_CMD), i);
++ sonic_quiesce(dev, SONIC_CR_RRRA);
+
+ /*
+ * Initialize the receive descriptors so that they
+@@ -805,15 +797,11 @@ static int sonic_init(struct net_device
+ * enable receiver, disable loopback
+ * and enable all interrupts
+ */
+- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
+ SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
+ SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
+-
+- cmd = SONIC_READ(SONIC_CMD);
+- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
+- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
+
+ netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
+ SONIC_READ(SONIC_CMD));
--- /dev/null
+From 427db97df1ee721c20bdc9a66db8a9e1da719855 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Fix interface error stats collection
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 427db97df1ee721c20bdc9a66db8a9e1da719855 upstream.
+
+The tx_aborted_errors statistic should count packets flagged with EXD,
+EXC, FU, or BCM bits because those bits denote an aborted transmission.
+That corresponds to the bitmask 0x0446, not 0x0642. Use macros for these
+constants to avoid mistakes. Better to leave out FIFO Underruns (FU) as
+there's a separate counter for that purpose.
+
+Don't lump all these errors in with the general tx_errors counter as
+that's used for tx timeout events.
+
+On the rx side, don't count RDE and RBAE interrupts as dropped packets.
+These interrupts don't indicate a lost packet, just a lack of resources.
+When a lack of resources results in a lost packet, this gets reported
+in the rx_missed_errors counter (along with RFO events).
+
+Don't double-count rx_frame_errors and rx_crc_errors.
+
+Don't use the general rx_errors counter for events that already have
+special counters.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 21 +++++++--------------
+ drivers/net/ethernet/natsemi/sonic.h | 1 +
+ 2 files changed, 8 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -329,18 +329,19 @@ static irqreturn_t sonic_interrupt(int i
+ if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
+ break;
+
+- if (td_status & 0x0001) {
++ if (td_status & SONIC_TCR_PTX) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
+ } else {
+- lp->stats.tx_errors++;
+- if (td_status & 0x0642)
++ if (td_status & (SONIC_TCR_EXD |
++ SONIC_TCR_EXC | SONIC_TCR_BCM))
+ lp->stats.tx_aborted_errors++;
+- if (td_status & 0x0180)
++ if (td_status &
++ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
+ lp->stats.tx_carrier_errors++;
+- if (td_status & 0x0020)
++ if (td_status & SONIC_TCR_OWC)
+ lp->stats.tx_window_errors++;
+- if (td_status & 0x0004)
++ if (td_status & SONIC_TCR_FU)
+ lp->stats.tx_fifo_errors++;
+ }
+
+@@ -370,17 +371,14 @@ static irqreturn_t sonic_interrupt(int i
+ if (status & SONIC_INT_RFO) {
+ netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
+ __func__);
+- lp->stats.rx_fifo_errors++;
+ }
+ if (status & SONIC_INT_RDE) {
+ netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
+ __func__);
+- lp->stats.rx_dropped++;
+ }
+ if (status & SONIC_INT_RBAE) {
+ netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
+ __func__);
+- lp->stats.rx_dropped++;
+ }
+
+ /* counter overruns; all counters are 16bit wide */
+@@ -472,11 +470,6 @@ static void sonic_rx(struct net_device *
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
+ } else {
+ /* This should only happen, if we enable accepting broken packets. */
+- lp->stats.rx_errors++;
+- if (status & SONIC_RCR_FAER)
+- lp->stats.rx_frame_errors++;
+- if (status & SONIC_RCR_CRCR)
+- lp->stats.rx_crc_errors++;
+ }
+ if (status & SONIC_RCR_LPKT) {
+ /*
+--- a/drivers/net/ethernet/natsemi/sonic.h
++++ b/drivers/net/ethernet/natsemi/sonic.h
+@@ -175,6 +175,7 @@
+ #define SONIC_TCR_NCRS 0x0100
+ #define SONIC_TCR_CRLS 0x0080
+ #define SONIC_TCR_EXC 0x0040
++#define SONIC_TCR_OWC 0x0020
+ #define SONIC_TCR_PMB 0x0008
+ #define SONIC_TCR_FU 0x0004
+ #define SONIC_TCR_BCM 0x0002
--- /dev/null
+From 9e311820f67e740f4fb8dcb82b4c4b5b05bdd1a5 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Fix receive buffer handling
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 9e311820f67e740f4fb8dcb82b4c4b5b05bdd1a5 upstream.
+
+The SONIC can sometimes advance its rx buffer pointer (RRP register)
+without advancing its rx descriptor pointer (CRDA register). As a result
+the index of the current rx descriptor may not equal that of the current
+rx buffer. The driver mistakenly assumes that they are always equal.
+This assumption leads to incorrect packet lengths and possible packet
+duplication. Avoid this by calling a new function to locate the buffer
+corresponding to a given descriptor.
+
+Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 35 ++++++++++++++++++++++++++++++-----
+ drivers/net/ethernet/natsemi/sonic.h | 5 +++--
+ 2 files changed, 33 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -412,6 +412,21 @@ static irqreturn_t sonic_interrupt(int i
+ return IRQ_HANDLED;
+ }
+
++/* Return the array index corresponding to a given Receive Buffer pointer. */
++static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
++ unsigned int last)
++{
++ unsigned int i = last;
++
++ do {
++ i = (i + 1) & SONIC_RRS_MASK;
++ if (addr == lp->rx_laddr[i])
++ return i;
++ } while (i != last);
++
++ return -ENOENT;
++}
++
+ /*
+ * We have a good packet(s), pass it/them up the network stack.
+ */
+@@ -431,6 +446,16 @@ static void sonic_rx(struct net_device *
+
+ status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+ if (status & SONIC_RCR_PRX) {
++ u32 addr = (sonic_rda_get(dev, entry,
++ SONIC_RD_PKTPTR_H) << 16) |
++ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
++ int i = index_from_addr(lp, addr, entry);
++
++ if (i < 0) {
++ WARN_ONCE(1, "failed to find buffer!\n");
++ break;
++ }
++
+ /* Malloc up new buffer. */
+ new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+ if (new_skb == NULL) {
+@@ -452,7 +477,7 @@ static void sonic_rx(struct net_device *
+
+ /* now we have a new skb to replace it, pass the used one up the stack */
+ dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
+- used_skb = lp->rx_skb[entry];
++ used_skb = lp->rx_skb[i];
+ pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb, dev);
+@@ -461,13 +486,13 @@ static void sonic_rx(struct net_device *
+ lp->stats.rx_bytes += pkt_len;
+
+ /* and insert the new skb */
+- lp->rx_laddr[entry] = new_laddr;
+- lp->rx_skb[entry] = new_skb;
++ lp->rx_laddr[i] = new_laddr;
++ lp->rx_skb[i] = new_skb;
+
+ bufadr_l = (unsigned long)new_laddr & 0xffff;
+ bufadr_h = (unsigned long)new_laddr >> 16;
+- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
+- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
++ sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
++ sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
+ } else {
+ /* This should only happen, if we enable accepting broken packets. */
+ }
+--- a/drivers/net/ethernet/natsemi/sonic.h
++++ b/drivers/net/ethernet/natsemi/sonic.h
+@@ -275,8 +275,9 @@
+ #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
+ #define SONIC_NUM_TDS 16 /* number of transmit descriptors */
+
+-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
+-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
++#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
++#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
++#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
+
+ #define SONIC_RBSIZE 1520 /* size of one resource buffer */
+
--- /dev/null
+From 89ba879e95582d3bba55081e45b5409e883312ca Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Fix receive buffer replenishment
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 89ba879e95582d3bba55081e45b5409e883312ca upstream.
+
+As soon as the driver is finished with a receive buffer it allocs a new
+one and overwrites the corresponding RRA entry with a new buffer pointer.
+
+Problem is, the buffer pointer is split across two word-sized registers.
+It can't be updated in one atomic store. So this operation races with the
+chip while it stores received packets and advances its RRP register.
+This could result in memory corruption by a DMA write.
+
+Avoid this problem by adding buffers only at the location given by the
+RWP register, in accordance with the National Semiconductor datasheet.
+
+Re-factor this code into separate functions to calculate a RRA pointer
+and to update the RWP.
+
+Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 150 ++++++++++++++++++++---------------
+ drivers/net/ethernet/natsemi/sonic.h | 18 +++-
+ 2 files changed, 105 insertions(+), 63 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -427,6 +427,59 @@ static int index_from_addr(struct sonic_
+ return -ENOENT;
+ }
+
++/* Allocate and map a new skb to be used as a receive buffer. */
++static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
++ struct sk_buff **new_skb, dma_addr_t *new_addr)
++{
++ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
++ if (!*new_skb)
++ return false;
++
++ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
++ skb_reserve(*new_skb, 2);
++
++ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
++ SONIC_RBSIZE, DMA_FROM_DEVICE);
++ if (!*new_addr) {
++ dev_kfree_skb(*new_skb);
++ *new_skb = NULL;
++ return false;
++ }
++
++ return true;
++}
++
++/* Place a new receive resource in the Receive Resource Area and update RWP. */
++static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
++ dma_addr_t old_addr, dma_addr_t new_addr)
++{
++ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
++ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
++ u32 buf;
++
++ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
++ * scans the other resources in the RRA, those in the range [RWP, RRP).
++ */
++ do {
++ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
++ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
++
++ if (buf == old_addr)
++ break;
++
++ entry = (entry + 1) & SONIC_RRS_MASK;
++ } while (entry != end);
++
++ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
++
++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
++
++ entry = (entry + 1) & SONIC_RRS_MASK;
++
++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
++}
++
+ /*
+ * We have a good packet(s), pass it/them up the network stack.
+ */
+@@ -435,18 +488,15 @@ static void sonic_rx(struct net_device *
+ struct sonic_local *lp = netdev_priv(dev);
+ int entry = lp->cur_rx;
+ int prev_entry = lp->eol_rx;
++ bool rbe = false;
+
+ while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
+- struct sk_buff *used_skb;
+- struct sk_buff *new_skb;
+- dma_addr_t new_laddr;
+- u16 bufadr_l;
+- u16 bufadr_h;
+- int pkt_len;
+ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+ /* If the RD has LPKT set, the chip has finished with the RB */
+ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
++ struct sk_buff *new_skb;
++ dma_addr_t new_laddr;
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+@@ -457,55 +507,35 @@ static void sonic_rx(struct net_device *
+ break;
+ }
+
+- /* Malloc up new buffer. */
+- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+- if (new_skb == NULL) {
++ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
++ struct sk_buff *used_skb = lp->rx_skb[i];
++ int pkt_len;
++
++ /* Pass the used buffer up the stack */
++ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
++ DMA_FROM_DEVICE);
++
++ pkt_len = sonic_rda_get(dev, entry,
++ SONIC_RD_PKTLEN);
++ skb_trim(used_skb, pkt_len);
++ used_skb->protocol = eth_type_trans(used_skb,
++ dev);
++ netif_rx(used_skb);
++ lp->stats.rx_packets++;
++ lp->stats.rx_bytes += pkt_len;
++
++ lp->rx_skb[i] = new_skb;
++ lp->rx_laddr[i] = new_laddr;
++ } else {
++ /* Failed to obtain a new buffer so re-use it */
++ new_laddr = addr;
+ lp->stats.rx_dropped++;
+- break;
+ }
+- /* provide 16 byte IP header alignment unless DMA requires otherwise */
+- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+- skb_reserve(new_skb, 2);
+-
+- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
+- SONIC_RBSIZE, DMA_FROM_DEVICE);
+- if (!new_laddr) {
+- dev_kfree_skb(new_skb);
+- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+- lp->stats.rx_dropped++;
+- break;
+- }
+-
+- /* now we have a new skb to replace it, pass the used one up the stack */
+- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
+- used_skb = lp->rx_skb[i];
+- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
+- skb_trim(used_skb, pkt_len);
+- used_skb->protocol = eth_type_trans(used_skb, dev);
+- netif_rx(used_skb);
+- lp->stats.rx_packets++;
+- lp->stats.rx_bytes += pkt_len;
+-
+- /* and insert the new skb */
+- lp->rx_laddr[i] = new_laddr;
+- lp->rx_skb[i] = new_skb;
+-
+- bufadr_l = (unsigned long)new_laddr & 0xffff;
+- bufadr_h = (unsigned long)new_laddr >> 16;
+- sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
+- sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
+- /*
+- * this was the last packet out of the current receive buffer
+- * give the buffer back to the SONIC
++ /* If RBE is already asserted when RWP advances then
++ * it's safe to clear RBE after processing this packet.
+ */
+- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
+- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
+- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
+- __func__);
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
+- }
++ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
++ sonic_update_rra(dev, lp, addr, new_laddr);
+ }
+ /*
+ * give back the descriptor
+@@ -527,6 +557,9 @@ static void sonic_rx(struct net_device *
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+ lp->eol_rx = prev_entry;
+ }
++
++ if (rbe)
++ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
+ /*
+ * If any worth-while packets have been received, netif_rx()
+ * has done a mark_bh(NET_BH) for us and will work on them
+@@ -641,15 +674,10 @@ static int sonic_init(struct net_device
+ }
+
+ /* initialize all RRA registers */
+- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
+- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
+- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
+- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
+-
+- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
+- SONIC_WRITE(SONIC_REA, lp->rra_end);
+- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
+- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
++ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
++ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
++ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
+ SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
+ SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
+
+--- a/drivers/net/ethernet/natsemi/sonic.h
++++ b/drivers/net/ethernet/natsemi/sonic.h
+@@ -314,8 +314,6 @@ struct sonic_local {
+ u32 rda_laddr; /* logical DMA address of RDA */
+ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
+ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
+- unsigned int rra_end;
+- unsigned int cur_rwp;
+ unsigned int cur_rx;
+ unsigned int cur_tx; /* first unacked transmit packet */
+ unsigned int eol_rx;
+@@ -450,6 +448,22 @@ static inline __u16 sonic_rra_get(struct
+ (entry * SIZEOF_SONIC_RR) + offset);
+ }
+
++static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
++{
++ struct sonic_local *lp = netdev_priv(dev);
++
++ return lp->rra_laddr +
++ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
++}
++
++static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
++{
++ struct sonic_local *lp = netdev_priv(dev);
++
++ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
++ SONIC_BUS_SCALE(lp->dma_bitmode));
++}
++
+ static const char version[] =
+ "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
+
--- /dev/null
+From 94b166349503957079ef5e7d6f667f157aea014a Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Improve receive descriptor status flag check
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 94b166349503957079ef5e7d6f667f157aea014a upstream.
+
+After sonic_tx_timeout() calls sonic_init(), it can happen that
+sonic_rx() will subsequently encounter a receive descriptor with no
+flags set. Remove the comment that says that this can't happen.
+
+When giving a receive descriptor to the SONIC, clear the descriptor
+status field. That way, any rx descriptor with flags set can only be
+a newly received packet.
+
+Don't process a descriptor without the LPKT bit set. The buffer is
+still in use by the SONIC.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -433,7 +433,6 @@ static int index_from_addr(struct sonic_
+ static void sonic_rx(struct net_device *dev)
+ {
+ struct sonic_local *lp = netdev_priv(dev);
+- int status;
+ int entry = lp->cur_rx;
+ int prev_entry = lp->eol_rx;
+
+@@ -444,9 +443,10 @@ static void sonic_rx(struct net_device *
+ u16 bufadr_l;
+ u16 bufadr_h;
+ int pkt_len;
++ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+- if (status & SONIC_RCR_PRX) {
++ /* If the RD has LPKT set, the chip has finished with the RB */
++ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+@@ -494,10 +494,6 @@ static void sonic_rx(struct net_device *
+ bufadr_h = (unsigned long)new_laddr >> 16;
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
+- } else {
+- /* This should only happen, if we enable accepting broken packets. */
+- }
+- if (status & SONIC_RCR_LPKT) {
+ /*
+ * this was the last packet out of the current receive buffer
+ * give the buffer back to the SONIC
+@@ -510,12 +506,11 @@ static void sonic_rx(struct net_device *
+ __func__);
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
+ }
+- } else
+- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
+- dev->name);
++ }
+ /*
+ * give back the descriptor
+ */
++ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
+ sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
+
+ prev_entry = entry;
--- /dev/null
+From 686f85d71d095f1d26b807e23b0f0bfd22042c45 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Prevent tx watchdog timeout
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 686f85d71d095f1d26b807e23b0f0bfd22042c45 upstream.
+
+Section 5.5.3.2 of the datasheet says,
+
+ If FIFO Underrun, Byte Count Mismatch, Excessive Collision, or
+ Excessive Deferral (if enabled) errors occur, transmission ceases.
+
+In this situation, the chip asserts a TXER interrupt rather than TXDN.
+But the handler for the TXDN is the only way that the transmit queue
+gets restarted. Hence, an aborted transmission can result in a watchdog
+timeout.
+
+This problem can be reproduced on congested link, as that can result in
+excessive transmitter collisions. Another way to reproduce this is with
+a FIFO Underrun, which may be caused by DMA latency.
+
+In event of a TXER interrupt, prevent a watchdog timeout by restarting
+transmission.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -414,10 +414,19 @@ static irqreturn_t sonic_interrupt(int i
+ lp->stats.rx_missed_errors += 65536;
+
+ /* transmit error */
+- if (status & SONIC_INT_TXER)
+- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
+- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
+- __func__);
++ if (status & SONIC_INT_TXER) {
++ u16 tcr = SONIC_READ(SONIC_TCR);
++
++ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
++ __func__, tcr);
++
++ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
++ SONIC_TCR_FU | SONIC_TCR_BCM)) {
++ /* Aborted transmission. Try again. */
++ netif_stop_queue(dev);
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
++ }
++ }
+
+ /* bus retry */
+ if (status & SONIC_INT_BR) {
--- /dev/null
+From 3f4b7e6a2be982fd8820a2b54d46dd9c351db899 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Quiesce SONIC before re-initializing descriptor memory
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 3f4b7e6a2be982fd8820a2b54d46dd9c351db899 upstream.
+
+Make sure the SONIC's DMA engine is idle before altering the transmit
+and receive descriptors. Add a helper for this as it will be needed
+again.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.c | 25 +++++++++++++++++++++++++
+ drivers/net/ethernet/natsemi/sonic.h | 3 +++
+ 2 files changed, 28 insertions(+)
+
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -115,6 +115,24 @@ static int sonic_open(struct net_device
+ return 0;
+ }
+
++/* Wait for the SONIC to become idle. */
++static void sonic_quiesce(struct net_device *dev, u16 mask)
++{
++ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
++ int i;
++ u16 bits;
++
++ for (i = 0; i < 1000; ++i) {
++ bits = SONIC_READ(SONIC_CMD) & mask;
++ if (!bits)
++ return;
++ if (irqs_disabled() || in_interrupt())
++ udelay(20);
++ else
++ usleep_range(100, 200);
++ }
++ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
++}
+
+ /*
+ * Close the SONIC device
+@@ -131,6 +149,9 @@ static int sonic_close(struct net_device
+ /*
+ * stop the SONIC, disable interrupts
+ */
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
++ sonic_quiesce(dev, SONIC_CR_ALL);
++
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+@@ -170,6 +191,9 @@ static void sonic_tx_timeout(struct net_
+ * put the Sonic into software-reset mode and
+ * disable all interrupts before releasing DMA buffers
+ */
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
++ sonic_quiesce(dev, SONIC_CR_ALL);
++
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+@@ -657,6 +681,7 @@ static int sonic_init(struct net_device
+ */
+ SONIC_WRITE(SONIC_CMD, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
++ sonic_quiesce(dev, SONIC_CR_ALL);
+
+ /*
+ * initialize the receive resource area
+--- a/drivers/net/ethernet/natsemi/sonic.h
++++ b/drivers/net/ethernet/natsemi/sonic.h
+@@ -110,6 +110,9 @@
+ #define SONIC_CR_TXP 0x0002
+ #define SONIC_CR_HTX 0x0001
+
++#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
++ SONIC_CR_RXEN | SONIC_CR_TXP)
++
+ /*
+ * SONIC data configuration bits
+ */
--- /dev/null
+From e3885f576196ddfc670b3d53e745de96ffcb49ab Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 23 Jan 2020 09:07:26 +1100
+Subject: net/sonic: Use MMIO accessors
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit e3885f576196ddfc670b3d53e745de96ffcb49ab upstream.
+
+The driver accesses descriptor memory which is simultaneously accessed by
+the chip, so the compiler must not be allowed to re-order CPU accesses.
+sonic_buf_get() used 'volatile' to prevent that. sonic_buf_put() should
+have done so too but was overlooked.
+
+Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update")
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/natsemi/sonic.h | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/natsemi/sonic.h
++++ b/drivers/net/ethernet/natsemi/sonic.h
+@@ -345,30 +345,30 @@ static void sonic_msg_init(struct net_de
+ as far as we can tell. */
+ /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
+ is a much better name. */
+-static inline void sonic_buf_put(void* base, int bitmode,
++static inline void sonic_buf_put(u16 *base, int bitmode,
+ int offset, __u16 val)
+ {
+ if (bitmode)
+ #ifdef __BIG_ENDIAN
+- ((__u16 *) base + (offset*2))[1] = val;
++ __raw_writew(val, base + (offset * 2) + 1);
+ #else
+- ((__u16 *) base + (offset*2))[0] = val;
++ __raw_writew(val, base + (offset * 2) + 0);
+ #endif
+ else
+- ((__u16 *) base)[offset] = val;
++ __raw_writew(val, base + (offset * 1) + 0);
+ }
+
+-static inline __u16 sonic_buf_get(void* base, int bitmode,
++static inline __u16 sonic_buf_get(u16 *base, int bitmode,
+ int offset)
+ {
+ if (bitmode)
+ #ifdef __BIG_ENDIAN
+- return ((volatile __u16 *) base + (offset*2))[1];
++ return __raw_readw(base + (offset * 2) + 1);
+ #else
+- return ((volatile __u16 *) base + (offset*2))[0];
++ return __raw_readw(base + (offset * 2) + 0);
+ #endif
+ else
+- return ((volatile __u16 *) base)[offset];
++ return __raw_readw(base + (offset * 1) + 0);
+ }
+
+ /* Inlines that you should actually use for reading/writing DMA buffers */
--- /dev/null
+From bba340c79bfe3644829db5c852fdfa9e33837d6d Mon Sep 17 00:00:00 2001
+From: Bo Wu <wubo40@huawei.com>
+Date: Wed, 20 Nov 2019 13:26:17 +0000
+Subject: scsi: iscsi: Avoid potential deadlock in iscsi_if_rx func
+
+From: Bo Wu <wubo40@huawei.com>
+
+commit bba340c79bfe3644829db5c852fdfa9e33837d6d upstream.
+
+In iscsi_if_rx func, after receiving one request through
+iscsi_if_recv_msg func, iscsi_if_send_reply will be called to try to
+reply to the request in a do-while loop. If the iscsi_if_send_reply
+function keeps returning -EAGAIN, a deadlock will occur.
+
+For example, a client only send msg without calling recvmsg func, then
+it will result in the watchdog soft lockup. The details are given as
+follows:
+
+ sock_fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ISCSI);
+ retval = bind(sock_fd, (struct sock addr*) & src_addr, sizeof(src_addr);
+ while (1) {
+ state_msg = sendmsg(sock_fd, &msg, 0);
+ //Note: recvmsg(sock_fd, &msg, 0) is not processed here.
+ }
+ close(sock_fd);
+
+watchdog: BUG: soft lockup - CPU#7 stuck for 22s! [netlink_test:253305] Sample time: 4000897528 ns(HZ: 250) Sample stat:
+curr: user: 675503481560, nice: 321724050, sys: 448689506750, idle: 4654054240530, iowait: 40885550700, irq: 14161174020, softirq: 8104324140, st: 0
+deta: user: 0, nice: 0, sys: 3998210100, idle: 0, iowait: 0, irq: 1547170, softirq: 242870, st: 0 Sample softirq:
+ TIMER: 992
+ SCHED: 8
+Sample irqstat:
+ irq 2: delta 1003, curr: 3103802, arch_timer
+CPU: 7 PID: 253305 Comm: netlink_test Kdump: loaded Tainted: G OE
+Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015
+pstate: 40400005 (nZcv daif +PAN -UAO)
+pc : __alloc_skb+0x104/0x1b0
+lr : __alloc_skb+0x9c/0x1b0
+sp : ffff000033603a30
+x29: ffff000033603a30 x28: 00000000000002dd
+x27: ffff800b34ced810 x26: ffff800ba7569f00
+x25: 00000000ffffffff x24: 0000000000000000
+x23: ffff800f7c43f600 x22: 0000000000480020
+x21: ffff0000091d9000 x20: ffff800b34eff200
+x19: ffff800ba7569f00 x18: 0000000000000000
+x17: 0000000000000000 x16: 0000000000000000
+x15: 0000000000000000 x14: 0001000101000100
+x13: 0000000101010000 x12: 0101000001010100
+x11: 0001010101010001 x10: 00000000000002dd
+x9 : ffff000033603d58 x8 : ffff800b34eff400
+x7 : ffff800ba7569200 x6 : ffff800b34eff400
+x5 : 0000000000000000 x4 : 00000000ffffffff
+x3 : 0000000000000000 x2 : 0000000000000001
+x1 : ffff800b34eff2c0 x0 : 0000000000000300 Call trace:
+__alloc_skb+0x104/0x1b0
+iscsi_if_rx+0x144/0x12bc [scsi_transport_iscsi]
+netlink_unicast+0x1e0/0x258
+netlink_sendmsg+0x310/0x378
+sock_sendmsg+0x4c/0x70
+sock_write_iter+0x90/0xf0
+__vfs_write+0x11c/0x190
+vfs_write+0xac/0x1c0
+ksys_write+0x6c/0xd8
+__arm64_sys_write+0x24/0x30
+el0_svc_common+0x78/0x130
+el0_svc_handler+0x38/0x78
+el0_svc+0x8/0xc
+
+Link: https://lore.kernel.org/r/EDBAAA0BBBA2AC4E9C8B6B81DEEE1D6915E3D4D2@dggeml505-mbx.china.huawei.com
+Signed-off-by: Bo Wu <wubo40@huawei.com>
+Reviewed-by: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Reviewed-by: Lee Duncan <lduncan@suse.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/scsi_transport_iscsi.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -37,6 +37,8 @@
+
+ #define ISCSI_TRANSPORT_VERSION "2.0-870"
+
++#define ISCSI_SEND_MAX_ALLOWED 10
++
+ static int dbg_session;
+ module_param_named(debug_session, dbg_session, int,
+ S_IRUGO | S_IWUSR);
+@@ -3680,6 +3682,7 @@ iscsi_if_rx(struct sk_buff *skb)
+ struct nlmsghdr *nlh;
+ struct iscsi_uevent *ev;
+ uint32_t group;
++ int retries = ISCSI_SEND_MAX_ALLOWED;
+
+ nlh = nlmsg_hdr(skb);
+ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
+@@ -3710,6 +3713,10 @@ iscsi_if_rx(struct sk_buff *skb)
+ break;
+ err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
+ ev, sizeof(*ev));
++ if (err == -EAGAIN && --retries < 0) {
++ printk(KERN_WARNING "Send reply failed, error %d\n", err);
++ break;
++ }
+ } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
+ skb_pull(skb, rlen);
+ }
--- /dev/null
+From masato.suzuki@wdc.com Tue Jan 28 09:03:58 2020
+From: Masato Suzuki <masato.suzuki@wdc.com>
+Date: Mon, 27 Jan 2020 14:07:46 +0900
+Subject: sd: Fix REQ_OP_ZONE_REPORT completion handling
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, linux-scsi@vger.kernel.org, "Martin K . Petersen" <martin.petersen@oracle.com>
+Cc: Damien Le Moal <damien.lemoal@wdc.com>
+Message-ID: <20200127050746.136440-1-masato.suzuki@wdc.com>
+
+From: Masato Suzuki <masato.suzuki@wdc.com>
+
+
+ZBC/ZAC report zones command may return less bytes than requested if the
+number of matching zones for the report request is small. However, unlike
+read or write commands, the remainder of incomplete report zones commands
+cannot be automatically requested by the block layer: the start sector of
+the next report cannot be known, and the report reply may not be 512B
+aligned for SAS drives (a report zone reply size is always a multiple of
+64B). The regular request completion code executing bio_advance() and
+restart of the command remainder part currently causes invalid zone
+descriptor data to be reported to the caller if the report zone size is
+smaller than 512B (a case that can happen easily for a report of the last
+zones of a SAS drive for example).
+
+Since blkdev_report_zones() handles report zone command processing in a
+loop until completion (no more zones are being reported), we can safely
+avoid that the block layer performs an incorrect bio_advance() call and
+restart of the remainder of incomplete report zone BIOs. To do so, always
+indicate a full completion of REQ_OP_ZONE_REPORT by setting good_bytes to
+the request buffer size and by setting the command resid to 0. This does
+not affect the post processing of the report zone reply done by
+sd_zbc_complete() since the reply header indicates the number of zones
+reported.
+
+Fixes: 89d947561077 ("sd: Implement support for ZBC devices")
+Cc: <stable@vger.kernel.org> # 4.19
+Cc: <stable@vger.kernel.org> # 4.14
+Signed-off-by: Masato Suzuki <masato.suzuki@wdc.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sd.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1969,9 +1969,13 @@ static int sd_done(struct scsi_cmnd *SCp
+ }
+ break;
+ case REQ_OP_ZONE_REPORT:
++ /* To avoid that the block layer performs an incorrect
++ * bio_advance() call and restart of the remainder of
++ * incomplete report zone BIOs, always indicate a full
++ * completion of REQ_OP_ZONE_REPORT.
++ */
+ if (!result) {
+- good_bytes = scsi_bufflen(SCpnt)
+- - scsi_get_resid(SCpnt);
++ good_bytes = scsi_bufflen(SCpnt);
+ scsi_set_resid(SCpnt, 0);
+ } else {
+ good_bytes = 0;
scsi-rdma-isert-fix-a-recently-introduced-regression-related-to-logout.patch
tracing-xen-ordered-comparison-of-function-pointers.patch
do_last-fetch-directory-i_mode-and-i_uid-before-it-s-too-late.patch
+net-sonic-add-mutual-exclusion-for-accessing-shared-state.patch
+net-sonic-clear-interrupt-flags-immediately.patch
+net-sonic-use-mmio-accessors.patch
+net-sonic-fix-interface-error-stats-collection.patch
+net-sonic-fix-receive-buffer-handling.patch
+net-sonic-avoid-needless-receive-descriptor-eol-flag-updates.patch
+net-sonic-improve-receive-descriptor-status-flag-check.patch
+net-sonic-fix-receive-buffer-replenishment.patch
+net-sonic-quiesce-sonic-before-re-initializing-descriptor-memory.patch
+net-sonic-fix-command-register-usage.patch
net-sonic-fix-cam-initialization.patch
+net-sonic-prevent-tx-watchdog-timeout.patch
+tracing-use-hist-trigger-s-var_ref-array-to-destroy-var_refs.patch
+tracing-remove-open-coding-of-hist-trigger-var_ref-management.patch
+tracing-fix-histogram-code-when-expression-has-same-var-as-value.patch
+sd-fix-req_op_zone_report-completion-handling.patch
+crypto-geode-aes-switch-to-skcipher-for-cbc-aes-fallback.patch
+coresight-etb10-do-not-call-smp_processor_id-from-preemptible.patch
+coresight-tmc-etf-do-not-call-smp_processor_id-from-preemptible.patch
+libertas-fix-two-buffer-overflows-at-parsing-bss-descriptor.patch
+media-v4l2-ioctl.c-zero-reserved-fields-for-s-try_fmt.patch
+scsi-iscsi-avoid-potential-deadlock-in-iscsi_if_rx-func.patch
--- /dev/null
+From 8bcebc77e85f3d7536f96845a0fe94b1dddb6af0 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Mon, 20 Jan 2020 13:07:31 -0500
+Subject: tracing: Fix histogram code when expression has same var as value
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 8bcebc77e85f3d7536f96845a0fe94b1dddb6af0 upstream.
+
+While working on a tool to convert SQL syntex into the histogram language of
+the kernel, I discovered the following bug:
+
+ # echo 'first u64 start_time u64 end_time pid_t pid u64 delta' >> synthetic_events
+ # echo 'hist:keys=pid:start=common_timestamp' > events/sched/sched_waking/trigger
+ # echo 'hist:keys=next_pid:delta=common_timestamp-$start,start2=$start:onmatch(sched.sched_waking).trace(first,$start2,common_timestamp,next_pid,$delta)' > events/sched/sched_switch/trigger
+
+Would not display any histograms in the sched_switch histogram side.
+
+But if I were to swap the location of
+
+ "delta=common_timestamp-$start" with "start2=$start"
+
+Such that the last line had:
+
+ # echo 'hist:keys=next_pid:start2=$start,delta=common_timestamp-$start:onmatch(sched.sched_waking).trace(first,$start2,common_timestamp,next_pid,$delta)' > events/sched/sched_switch/trigger
+
+The histogram works as expected.
+
+What I found out is that the expressions clear out the value once it is
+resolved. As the variables are resolved in the order listed, when
+processing:
+
+ delta=common_timestamp-$start
+
+The $start is cleared. When it gets to "start2=$start", it errors out with
+"unresolved symbol" (which is silent as this happens at the location of the
+trace), and the histogram is dropped.
+
+When processing the histogram for variable references, instead of adding a
+new reference for a variable used twice, use the same reference. That way,
+not only is it more efficient, but the order will no longer matter in
+processing of the variables.
+
+From Tom Zanussi:
+
+ "Just to clarify some more about what the problem was is that without
+ your patch, we would have two separate references to the same variable,
+ and during resolve_var_refs(), they'd both want to be resolved
+ separately, so in this case, since the first reference to start wasn't
+ part of an expression, it wouldn't get the read-once flag set, so would
+ be read normally, and then the second reference would do the read-once
+ read and also be read but using read-once. So everything worked and
+ you didn't see a problem:
+
+ from: start2=$start,delta=common_timestamp-$start
+
+ In the second case, when you switched them around, the first reference
+ would be resolved by doing the read-once, and following that the second
+ reference would try to resolve and see that the variable had already
+ been read, so failed as unset, which caused it to short-circuit out and
+ not do the trigger action to generate the synthetic event:
+
+ to: delta=common_timestamp-$start,start2=$start
+
+ With your patch, we only have the single resolution which happens
+ correctly the one time it's resolved, so this can't happen."
+
+Link: https://lore.kernel.org/r/20200116154216.58ca08eb@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: 067fe038e70f6 ("tracing: Add variable reference handling to hist triggers")
+Reviewed-by: Tom Zanuss <zanussi@kernel.org>
+Tested-by: Tom Zanussi <zanussi@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_hist.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -49,6 +49,7 @@ struct hist_field {
+ struct ftrace_event_field *field;
+ unsigned long flags;
+ hist_field_fn_t fn;
++ unsigned int ref;
+ unsigned int size;
+ unsigned int offset;
+ unsigned int is_signed;
+@@ -2225,8 +2226,16 @@ static int contains_operator(char *str)
+ return field_op;
+ }
+
++static void get_hist_field(struct hist_field *hist_field)
++{
++ hist_field->ref++;
++}
++
+ static void __destroy_hist_field(struct hist_field *hist_field)
+ {
++ if (--hist_field->ref > 1)
++ return;
++
+ kfree(hist_field->var.name);
+ kfree(hist_field->name);
+ kfree(hist_field->type);
+@@ -2268,6 +2277,8 @@ static struct hist_field *create_hist_fi
+ if (!hist_field)
+ return NULL;
+
++ hist_field->ref = 1;
++
+ hist_field->hist_data = hist_data;
+
+ if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
+@@ -2463,6 +2474,17 @@ static struct hist_field *create_var_ref
+ {
+ unsigned long flags = HIST_FIELD_FL_VAR_REF;
+ struct hist_field *ref_field;
++ int i;
++
++ /* Check if the variable already exists */
++ for (i = 0; i < hist_data->n_var_refs; i++) {
++ ref_field = hist_data->var_refs[i];
++ if (ref_field->var.idx == var_field->var.idx &&
++ ref_field->var.hist_data == var_field->hist_data) {
++ get_hist_field(ref_field);
++ return ref_field;
++ }
++ }
+
+ ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
+ if (ref_field) {
--- /dev/null
+From de40f033d4e84e843d6a12266e3869015ea9097c Mon Sep 17 00:00:00 2001
+From: Tom Zanussi <tom.zanussi@linux.intel.com>
+Date: Tue, 18 Dec 2018 14:33:23 -0600
+Subject: tracing: Remove open-coding of hist trigger var_ref management
+
+From: Tom Zanussi <tom.zanussi@linux.intel.com>
+
+commit de40f033d4e84e843d6a12266e3869015ea9097c upstream.
+
+Have create_var_ref() manage the hist trigger's var_ref list, rather
+than having similar code doing it in multiple places. This cleans up
+the code and makes sure var_refs are always accounted properly.
+
+Also, document the var_ref-related functions to make what their
+purpose clearer.
+
+Link: http://lkml.kernel.org/r/05ddae93ff514e66fc03897d6665231892939913.1545161087.git.tom.zanussi@linux.intel.com
+
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_hist.c | 93 +++++++++++++++++++++++++++++++--------
+ 1 file changed, 75 insertions(+), 18 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1274,6 +1274,17 @@ static u64 hist_field_cpu(struct hist_fi
+ return cpu;
+ }
+
++/**
++ * check_field_for_var_ref - Check if a VAR_REF field references a variable
++ * @hist_field: The VAR_REF field to check
++ * @var_data: The hist trigger that owns the variable
++ * @var_idx: The trigger variable identifier
++ *
++ * Check the given VAR_REF field to see whether or not it references
++ * the given variable associated with the given trigger.
++ *
++ * Return: The VAR_REF field if it does reference the variable, NULL if not
++ */
+ static struct hist_field *
+ check_field_for_var_ref(struct hist_field *hist_field,
+ struct hist_trigger_data *var_data,
+@@ -1324,6 +1335,18 @@ check_field_for_var_refs(struct hist_tri
+ return found;
+ }
+
++/**
++ * find_var_ref - Check if a trigger has a reference to a trigger variable
++ * @hist_data: The hist trigger that might have a reference to the variable
++ * @var_data: The hist trigger that owns the variable
++ * @var_idx: The trigger variable identifier
++ *
++ * Check the list of var_refs[] on the first hist trigger to see
++ * whether any of them are references to the variable on the second
++ * trigger.
++ *
++ * Return: The VAR_REF field referencing the variable if so, NULL if not
++ */
+ static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
+ struct hist_trigger_data *var_data,
+ unsigned int var_idx)
+@@ -1350,6 +1373,20 @@ static struct hist_field *find_var_ref(s
+ return found;
+ }
+
++/**
++ * find_any_var_ref - Check if there is a reference to a given trigger variable
++ * @hist_data: The hist trigger
++ * @var_idx: The trigger variable identifier
++ *
++ * Check to see whether the given variable is currently referenced by
++ * any other trigger.
++ *
++ * The trigger the variable is defined on is explicitly excluded - the
++ * assumption being that a self-reference doesn't prevent a trigger
++ * from being removed.
++ *
++ * Return: The VAR_REF field referencing the variable if so, NULL if not
++ */
+ static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
+ unsigned int var_idx)
+ {
+@@ -1368,6 +1405,19 @@ static struct hist_field *find_any_var_r
+ return found;
+ }
+
++/**
++ * check_var_refs - Check if there is a reference to any of trigger's variables
++ * @hist_data: The hist trigger
++ *
++ * A trigger can define one or more variables. If any one of them is
++ * currently referenced by any other trigger, this function will
++ * determine that.
++
++ * Typically used to determine whether or not a trigger can be removed
++ * - if there are any references to a trigger's variables, it cannot.
++ *
++ * Return: True if there is a reference to any of trigger's variables
++ */
+ static bool check_var_refs(struct hist_trigger_data *hist_data)
+ {
+ struct hist_field *field;
+@@ -2392,7 +2442,23 @@ static int init_var_ref(struct hist_fiel
+ goto out;
+ }
+
+-static struct hist_field *create_var_ref(struct hist_field *var_field,
++/**
++ * create_var_ref - Create a variable reference and attach it to trigger
++ * @hist_data: The trigger that will be referencing the variable
++ * @var_field: The VAR field to create a reference to
++ * @system: The optional system string
++ * @event_name: The optional event_name string
++ *
++ * Given a variable hist_field, create a VAR_REF hist_field that
++ * represents a reference to it.
++ *
++ * This function also adds the reference to the trigger that
++ * now references the variable.
++ *
++ * Return: The VAR_REF field if successful, NULL if not
++ */
++static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
++ struct hist_field *var_field,
+ char *system, char *event_name)
+ {
+ unsigned long flags = HIST_FIELD_FL_VAR_REF;
+@@ -2404,6 +2470,9 @@ static struct hist_field *create_var_ref
+ destroy_hist_field(ref_field, 0);
+ return NULL;
+ }
++
++ hist_data->var_refs[hist_data->n_var_refs] = ref_field;
++ ref_field->var_ref_idx = hist_data->n_var_refs++;
+ }
+
+ return ref_field;
+@@ -2477,7 +2546,8 @@ static struct hist_field *parse_var_ref(
+
+ var_field = find_event_var(hist_data, system, event_name, var_name);
+ if (var_field)
+- ref_field = create_var_ref(var_field, system, event_name);
++ ref_field = create_var_ref(hist_data, var_field,
++ system, event_name);
+
+ if (!ref_field)
+ hist_err_event("Couldn't find variable: $",
+@@ -2597,8 +2667,6 @@ static struct hist_field *parse_atom(str
+ if (!s) {
+ hist_field = parse_var_ref(hist_data, ref_system, ref_event, ref_var);
+ if (hist_field) {
+- hist_data->var_refs[hist_data->n_var_refs] = hist_field;
+- hist_field->var_ref_idx = hist_data->n_var_refs++;
+ if (var_name) {
+ hist_field = create_alias(hist_data, hist_field, var_name);
+ if (!hist_field) {
+@@ -3376,7 +3444,6 @@ static int onmax_create(struct hist_trig
+ unsigned int var_ref_idx = hist_data->n_var_refs;
+ struct field_var *field_var;
+ char *onmax_var_str, *param;
+- unsigned long flags;
+ unsigned int i;
+ int ret = 0;
+
+@@ -3393,18 +3460,10 @@ static int onmax_create(struct hist_trig
+ return -EINVAL;
+ }
+
+- flags = HIST_FIELD_FL_VAR_REF;
+- ref_field = create_hist_field(hist_data, NULL, flags, NULL);
++ ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
+ if (!ref_field)
+ return -ENOMEM;
+
+- if (init_var_ref(ref_field, var_field, NULL, NULL)) {
+- destroy_hist_field(ref_field, 0);
+- ret = -ENOMEM;
+- goto out;
+- }
+- hist_data->var_refs[hist_data->n_var_refs] = ref_field;
+- ref_field->var_ref_idx = hist_data->n_var_refs++;
+ data->onmax.var = ref_field;
+
+ data->fn = onmax_save;
+@@ -3595,9 +3654,6 @@ static void save_synth_var_ref(struct hi
+ struct hist_field *var_ref)
+ {
+ hist_data->synth_var_refs[hist_data->n_synth_var_refs++] = var_ref;
+-
+- hist_data->var_refs[hist_data->n_var_refs] = var_ref;
+- var_ref->var_ref_idx = hist_data->n_var_refs++;
+ }
+
+ static int check_synth_field(struct synth_event *event,
+@@ -3752,7 +3808,8 @@ static int onmatch_create(struct hist_tr
+ }
+
+ if (check_synth_field(event, hist_field, field_pos) == 0) {
+- var_ref = create_var_ref(hist_field, system, event_name);
++ var_ref = create_var_ref(hist_data, hist_field,
++ system, event_name);
+ if (!var_ref) {
+ kfree(p);
+ ret = -ENOMEM;
--- /dev/null
+From 656fe2ba85e81d00e4447bf77b8da2be3c47acb2 Mon Sep 17 00:00:00 2001
+From: Tom Zanussi <tom.zanussi@linux.intel.com>
+Date: Tue, 18 Dec 2018 14:33:24 -0600
+Subject: tracing: Use hist trigger's var_ref array to destroy var_refs
+
+From: Tom Zanussi <tom.zanussi@linux.intel.com>
+
+commit 656fe2ba85e81d00e4447bf77b8da2be3c47acb2 upstream.
+
+Since every var ref for a trigger has an entry in the var_ref[] array,
+use that to destroy the var_refs, instead of piecemeal via the field
+expressions.
+
+This allows us to avoid having to keep and treat differently separate
+lists for the action-related references, which future patches will
+remove.
+
+Link: http://lkml.kernel.org/r/fad1a164f0e257c158e70d6eadbf6c586e04b2a2.1545161087.git.tom.zanussi@linux.intel.com
+
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_hist.c | 24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -2175,6 +2175,15 @@ static int contains_operator(char *str)
+ return field_op;
+ }
+
++static void __destroy_hist_field(struct hist_field *hist_field)
++{
++ kfree(hist_field->var.name);
++ kfree(hist_field->name);
++ kfree(hist_field->type);
++
++ kfree(hist_field);
++}
++
+ static void destroy_hist_field(struct hist_field *hist_field,
+ unsigned int level)
+ {
+@@ -2186,14 +2195,13 @@ static void destroy_hist_field(struct hi
+ if (!hist_field)
+ return;
+
++ if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
++ return; /* var refs will be destroyed separately */
++
+ for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
+ destroy_hist_field(hist_field->operands[i], level + 1);
+
+- kfree(hist_field->var.name);
+- kfree(hist_field->name);
+- kfree(hist_field->type);
+-
+- kfree(hist_field);
++ __destroy_hist_field(hist_field);
+ }
+
+ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
+@@ -2320,6 +2328,12 @@ static void destroy_hist_fields(struct h
+ hist_data->fields[i] = NULL;
+ }
+ }
++
++ for (i = 0; i < hist_data->n_var_refs; i++) {
++ WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
++ __destroy_hist_field(hist_data->var_refs[i]);
++ hist_data->var_refs[i] = NULL;
++ }
+ }
+
+ static int init_var_ref(struct hist_field *ref_field,