--- /dev/null
+From 0764e365dacd0b8f75c1736f9236be280649bd18 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Cl=C3=A9ment=20B=C5=93sch?= <u@pkh.me>
+Date: Sun, 5 Sep 2021 02:20:27 +0200
+Subject: arm64: dts: allwinner: h5: NanoPI Neo 2: Fix ethernet node
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Clément Bœsch <u@pkh.me>
+
+commit 0764e365dacd0b8f75c1736f9236be280649bd18 upstream.
+
+RX and TX delay are provided by ethernet PHY. Reflect that in ethernet
+node.
+
+Fixes: 44a94c7ef989 ("arm64: dts: allwinner: H5: Restore EMAC changes")
+Signed-off-by: Clément Bœsch <u@pkh.me>
+Reviewed-by: Jernej Skrabec <jernej.skrabec@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://lore.kernel.org/r/20210905002027.171984-1-u@pkh.me
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+@@ -75,7 +75,7 @@
+ pinctrl-0 = <&emac_rgmii_pins>;
+ phy-supply = <®_gmac_3v3>;
+ phy-handle = <&ext_rgmii_phy>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
+ status = "okay";
+ };
+
--- /dev/null
+From fda7a38714f40b635f5502ec4855602c6b33dad2 Mon Sep 17 00:00:00 2001
+From: Xu Kuohai <xukuohai@huawei.com>
+Date: Tue, 19 Oct 2021 03:29:34 +0000
+Subject: bpf: Fix error usage of map_fd and fdget() in generic_map_update_batch()
+
+From: Xu Kuohai <xukuohai@huawei.com>
+
+commit fda7a38714f40b635f5502ec4855602c6b33dad2 upstream.
+
+1. The ufd in generic_map_update_batch() should be read from batch.map_fd;
+2. A call to fdget() should be followed by a symmetric call to fdput().
+
+Fixes: aa2e93b8e58e ("bpf: Add generic support for update and delete batch ops")
+Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20211019032934.1210517-1-xukuohai@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/syscall.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1309,12 +1309,11 @@ int generic_map_update_batch(struct bpf_
+ void __user *values = u64_to_user_ptr(attr->batch.values);
+ void __user *keys = u64_to_user_ptr(attr->batch.keys);
+ u32 value_size, cp, max_count;
+- int ufd = attr->map_fd;
++ int ufd = attr->batch.map_fd;
+ void *key, *value;
+ struct fd f;
+ int err = 0;
+
+- f = fdget(ufd);
+ if (attr->batch.elem_flags & ~BPF_F_LOCK)
+ return -EINVAL;
+
+@@ -1339,6 +1338,7 @@ int generic_map_update_batch(struct bpf_
+ return -ENOMEM;
+ }
+
++ f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
+ for (cp = 0; cp < max_count; cp++) {
+ err = -EFAULT;
+ if (copy_from_user(key, keys + cp * map->key_size,
+@@ -1358,6 +1358,7 @@ int generic_map_update_batch(struct bpf_
+
+ kfree(value);
+ kfree(key);
++ fdput(f);
+ return err;
+ }
+
--- /dev/null
+From 54713c85f536048e685258f880bf298a74c3620d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Tue, 26 Oct 2021 13:00:19 +0200
+Subject: bpf: Fix potential race in tail call compatibility check
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+commit 54713c85f536048e685258f880bf298a74c3620d upstream.
+
+Lorenzo noticed that the code testing for program type compatibility of
+tail call maps is potentially racy in that two threads could encounter a
+map with an unset type simultaneously and both return true even though they
+are inserting incompatible programs.
+
+The race window is quite small, but artificially enlarging it by adding a
+usleep_range() inside the check in bpf_prog_array_compatible() makes it
+trivial to trigger from userspace with a program that does, essentially:
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, 4, 4, 2, 0);
+ pid = fork();
+ if (pid) {
+ key = 0;
+ value = xdp_fd;
+ } else {
+ key = 1;
+ value = tc_fd;
+ }
+ err = bpf_map_update_elem(map_fd, &key, &value, 0);
+
+While the race window is small, it has potentially serious ramifications in
+that triggering it would allow a BPF program to tail call to a program of a
+different type. So let's get rid of it by protecting the update with a
+spinlock. The commit in the Fixes tag is the last commit that touches the
+code in question.
+
+v2:
+- Use a spinlock instead of an atomic variable and cmpxchg() (Alexei)
+v3:
+- Put lock and the members it protects into an embedded 'owner' struct (Daniel)
+
+Fixes: 3324b584b6f6 ("ebpf: misc core cleanup")
+Reported-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20211026110019.363464-1-toke@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h | 7 +++++--
+ kernel/bpf/arraymap.c | 1 +
+ kernel/bpf/core.c | 20 +++++++++++++-------
+ kernel/bpf/syscall.c | 6 ++++--
+ 4 files changed, 23 insertions(+), 11 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -862,8 +862,11 @@ struct bpf_array_aux {
+ * stored in the map to make sure that all callers and callees have
+ * the same prog type and JITed flag.
+ */
+- enum bpf_prog_type type;
+- bool jited;
++ struct {
++ spinlock_t lock;
++ enum bpf_prog_type type;
++ bool jited;
++ } owner;
+ /* Programs with direct jumps into programs part of this array. */
+ struct list_head poke_progs;
+ struct bpf_map *map;
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -1025,6 +1025,7 @@ static struct bpf_map *prog_array_map_al
+ INIT_WORK(&aux->work, prog_array_map_clear_deferred);
+ INIT_LIST_HEAD(&aux->poke_progs);
+ mutex_init(&aux->poke_mutex);
++ spin_lock_init(&aux->owner.lock);
+
+ map = array_map_alloc(attr);
+ if (IS_ERR(map)) {
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1775,20 +1775,26 @@ static unsigned int __bpf_prog_ret0_warn
+ bool bpf_prog_array_compatible(struct bpf_array *array,
+ const struct bpf_prog *fp)
+ {
++ bool ret;
++
+ if (fp->kprobe_override)
+ return false;
+
+- if (!array->aux->type) {
++ spin_lock(&array->aux->owner.lock);
++
++ if (!array->aux->owner.type) {
+ /* There's no owner yet where we could check for
+ * compatibility.
+ */
+- array->aux->type = fp->type;
+- array->aux->jited = fp->jited;
+- return true;
++ array->aux->owner.type = fp->type;
++ array->aux->owner.jited = fp->jited;
++ ret = true;
++ } else {
++ ret = array->aux->owner.type == fp->type &&
++ array->aux->owner.jited == fp->jited;
+ }
+-
+- return array->aux->type == fp->type &&
+- array->aux->jited == fp->jited;
++ spin_unlock(&array->aux->owner.lock);
++ return ret;
+ }
+
+ static int bpf_check_tail_call(const struct bpf_prog *fp)
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -535,8 +535,10 @@ static void bpf_map_show_fdinfo(struct s
+
+ if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
+ array = container_of(map, struct bpf_array, map);
+- type = array->aux->type;
+- jited = array->aux->jited;
++ spin_lock(&array->aux->owner.lock);
++ type = array->aux->owner.type;
++ jited = array->aux->owner.jited;
++ spin_unlock(&array->aux->owner.lock);
+ }
+
+ seq_printf(m,
--- /dev/null
+From 689a0a9f505f7bffdefe6f17fddb41c8ab6344f6 Mon Sep 17 00:00:00 2001
+From: Janusz Dziedzic <janusz.dziedzic@gmail.com>
+Date: Sun, 24 Oct 2021 22:15:46 +0200
+Subject: cfg80211: correct bridge/4addr mode check
+
+From: Janusz Dziedzic <janusz.dziedzic@gmail.com>
+
+commit 689a0a9f505f7bffdefe6f17fddb41c8ab6344f6 upstream.
+
+Without the patch we fail:
+
+$ sudo brctl addbr br0
+$ sudo brctl addif br0 wlp1s0
+$ sudo iw wlp1s0 set 4addr on
+command failed: Device or resource busy (-16)
+
+Last command failed but iface was already in 4addr mode.
+
+Fixes: ad4bb6f8883a ("cfg80211: disallow bridging managed/adhoc interfaces")
+Signed-off-by: Janusz Dziedzic <janusz.dziedzic@gmail.com>
+Link: https://lore.kernel.org/r/20211024201546.614379-1-janusz.dziedzic@gmail.com
+[add fixes tag, fix indentation, edit commit log]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/wireless/util.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -1028,14 +1028,14 @@ int cfg80211_change_iface(struct cfg8021
+ !(rdev->wiphy.interface_modes & (1 << ntype)))
+ return -EOPNOTSUPP;
+
+- /* if it's part of a bridge, reject changing type to station/ibss */
+- if (netif_is_bridge_port(dev) &&
+- (ntype == NL80211_IFTYPE_ADHOC ||
+- ntype == NL80211_IFTYPE_STATION ||
+- ntype == NL80211_IFTYPE_P2P_CLIENT))
+- return -EBUSY;
+-
+ if (ntype != otype) {
++ /* if it's part of a bridge, reject changing type to station/ibss */
++ if (netif_is_bridge_port(dev) &&
++ (ntype == NL80211_IFTYPE_ADHOC ||
++ ntype == NL80211_IFTYPE_STATION ||
++ ntype == NL80211_IFTYPE_P2P_CLIENT))
++ return -EBUSY;
++
+ dev->ieee80211_ptr->use_4addr = false;
+ dev->ieee80211_ptr->mesh_id_up_len = 0;
+ wdev_lock(dev->ieee80211_ptr);
--- /dev/null
+From 04f8ef5643bcd8bcde25dfdebef998aea480b2ba Mon Sep 17 00:00:00 2001
+From: Quanyang Wang <quanyang.wang@windriver.com>
+Date: Mon, 18 Oct 2021 15:56:23 +0800
+Subject: cgroup: Fix memory leak caused by missing cgroup_bpf_offline
+
+From: Quanyang Wang <quanyang.wang@windriver.com>
+
+commit 04f8ef5643bcd8bcde25dfdebef998aea480b2ba upstream.
+
+When enabling CONFIG_CGROUP_BPF, kmemleak can be observed by running
+the command as below:
+
+ $mount -t cgroup -o none,name=foo cgroup cgroup/
+ $umount cgroup/
+
+unreferenced object 0xc3585c40 (size 64):
+ comm "mount", pid 425, jiffies 4294959825 (age 31.990s)
+ hex dump (first 32 bytes):
+ 01 00 00 80 84 8c 28 c0 00 00 00 00 00 00 00 00 ......(.........
+ 00 00 00 00 00 00 00 00 6c 43 a0 c3 00 00 00 00 ........lC......
+ backtrace:
+ [<e95a2f9e>] cgroup_bpf_inherit+0x44/0x24c
+ [<1f03679c>] cgroup_setup_root+0x174/0x37c
+ [<ed4b0ac5>] cgroup1_get_tree+0x2c0/0x4a0
+ [<f85b12fd>] vfs_get_tree+0x24/0x108
+ [<f55aec5c>] path_mount+0x384/0x988
+ [<e2d5e9cd>] do_mount+0x64/0x9c
+ [<208c9cfe>] sys_mount+0xfc/0x1f4
+ [<06dd06e0>] ret_fast_syscall+0x0/0x48
+ [<a8308cb3>] 0xbeb4daa8
+
+This is because that since the commit 2b0d3d3e4fcf ("percpu_ref: reduce
+memory footprint of percpu_ref in fast path") root_cgrp->bpf.refcnt.data
+is allocated by the function percpu_ref_init in cgroup_bpf_inherit which
+is called by cgroup_setup_root when mounting, but not freed along with
+root_cgrp when umounting. Adding cgroup_bpf_offline which calls
+percpu_ref_kill to cgroup_kill_sb can free root_cgrp->bpf.refcnt.data in
+umount path.
+
+This patch also fixes the commit 4bfc0bb2c60e ("bpf: decouple the lifetime
+of cgroup_bpf from cgroup itself"). A cgroup_bpf_offline is needed to do a
+cleanup that frees the resources which are allocated by cgroup_bpf_inherit
+in cgroup_setup_root.
+
+And inside cgroup_bpf_offline, cgroup_get() is at the beginning and
+cgroup_put is at the end of cgroup_bpf_release which is called by
+cgroup_bpf_offline. So cgroup_bpf_offline can keep the balance of
+cgroup's refcount.
+
+Fixes: 2b0d3d3e4fcf ("percpu_ref: reduce memory footprint of percpu_ref in fast path")
+Fixes: 4bfc0bb2c60e ("bpf: decouple the lifetime of cgroup_bpf from cgroup itself")
+Signed-off-by: Quanyang Wang <quanyang.wang@windriver.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Roman Gushchin <guro@fb.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20211018075623.26884-1-quanyang.wang@windriver.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cgroup/cgroup.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2147,8 +2147,10 @@ static void cgroup_kill_sb(struct super_
+ * And don't kill the default root.
+ */
+ if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
+- !percpu_ref_is_dying(&root->cgrp.self.refcnt))
++ !percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
++ cgroup_bpf_offline(&root->cgrp);
+ percpu_ref_kill(&root->cgrp.self.refcnt);
++ }
+ cgroup_put(&root->cgrp);
+ kernfs_kill_sb(sb);
+ }
--- /dev/null
+From 85fe6415c146d5d42ce300c12f1ecf4d4af47d40 Mon Sep 17 00:00:00 2001
+From: Jonas Gorski <jonas.gorski@gmail.com>
+Date: Thu, 14 Oct 2021 14:33:42 +0200
+Subject: gpio: xgs-iproc: fix parsing of ngpios property
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+commit 85fe6415c146d5d42ce300c12f1ecf4d4af47d40 upstream.
+
+of_property_read_u32 returns 0 on success, not true, so we need to
+invert the check to actually take over the provided ngpio value.
+
+Fixes: 6a41b6c5fc20 ("gpio: Add xgs-iproc driver")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Reviewed-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Bartosz Golaszewski <brgl@bgdev.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-xgs-iproc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-xgs-iproc.c
++++ b/drivers/gpio/gpio-xgs-iproc.c
+@@ -224,7 +224,7 @@ static int iproc_gpio_probe(struct platf
+ }
+
+ chip->gc.label = dev_name(dev);
+- if (of_property_read_u32(dn, "ngpios", &num_gpios))
++ if (!of_property_read_u32(dn, "ngpios", &num_gpios))
+ chip->gc.ngpio = num_gpios;
+
+ irq = platform_get_irq(pdev, 0);
--- /dev/null
+From 13bac861952a78664907a0f927d3e874e9a59034 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Date: Wed, 13 Oct 2021 10:18:52 -0400
+Subject: IB/hfi1: Fix abba locking issue with sc_disable()
+
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+
+commit 13bac861952a78664907a0f927d3e874e9a59034 upstream.
+
+sc_disable() after having disabled the send context wakes up any waiters
+by calling hfi1_qp_wakeup() while holding the waitlock for the sc.
+
+This is contrary to the model for all other calls to hfi1_qp_wakeup()
+where the waitlock is dropped and a local is used to drive calls to
+hfi1_qp_wakeup().
+
+Fix by moving the sc->piowait into a local list and driving the wakeup
+calls from the list.
+
+Fixes: 099a884ba4c0 ("IB/hfi1: Handle wakeup of orphaned QPs for pio")
+Link: https://lore.kernel.org/r/20211013141852.128104.2682.stgit@awfm-01.cornelisnetworks.com
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Reported-by: TOTE Robot <oslab@tsinghua.edu.cn>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/hfi1/pio.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -920,6 +920,7 @@ void sc_disable(struct send_context *sc)
+ {
+ u64 reg;
+ struct pio_buf *pbuf;
++ LIST_HEAD(wake_list);
+
+ if (!sc)
+ return;
+@@ -954,19 +955,21 @@ void sc_disable(struct send_context *sc)
+ spin_unlock(&sc->release_lock);
+
+ write_seqlock(&sc->waitlock);
+- while (!list_empty(&sc->piowait)) {
++ if (!list_empty(&sc->piowait))
++ list_move(&sc->piowait, &wake_list);
++ write_sequnlock(&sc->waitlock);
++ while (!list_empty(&wake_list)) {
+ struct iowait *wait;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
+
+- wait = list_first_entry(&sc->piowait, struct iowait, list);
++ wait = list_first_entry(&wake_list, struct iowait, list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
+ }
+- write_sequnlock(&sc->waitlock);
+
+ spin_unlock_irq(&sc->alloc_lock);
+ }
--- /dev/null
+From d39bf40e55e666b5905fdbd46a0dced030ce87be Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Date: Tue, 12 Oct 2021 13:55:19 -0400
+Subject: IB/qib: Protect from buffer overflow in struct qib_user_sdma_pkt fields
+
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+
+commit d39bf40e55e666b5905fdbd46a0dced030ce87be upstream.
+
+Overflowing either addrlimit or bytes_togo can allow userspace to trigger
+a buffer overflow of kernel memory. Check for overflows in all the places
+doing math on user controlled buffers.
+
+Fixes: f931551bafe1 ("IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters")
+Link: https://lore.kernel.org/r/20211012175519.7298.77738.stgit@awfm-01.cornelisnetworks.com
+Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/qib/qib_user_sdma.c | 33 ++++++++++++++++++++----------
+ 1 file changed, 23 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
+@@ -602,7 +602,7 @@ done:
+ /*
+ * How many pages in this iovec element?
+ */
+-static int qib_user_sdma_num_pages(const struct iovec *iov)
++static size_t qib_user_sdma_num_pages(const struct iovec *iov)
+ {
+ const unsigned long addr = (unsigned long) iov->iov_base;
+ const unsigned long len = iov->iov_len;
+@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(
+ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+- unsigned long addr, int tlen, int npages)
++ unsigned long addr, int tlen, size_t npages)
+ {
+ struct page *pages[8];
+ int i, j;
+@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const s
+ unsigned long idx;
+
+ for (idx = 0; idx < niov; idx++) {
+- const int npages = qib_user_sdma_num_pages(iov + idx);
++ const size_t npages = qib_user_sdma_num_pages(iov + idx);
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+
+ ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
+@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(cons
+ unsigned pktnw;
+ unsigned pktnwc;
+ int nfrags = 0;
+- int npages = 0;
+- int bytes_togo = 0;
++ size_t npages = 0;
++ size_t bytes_togo = 0;
+ int tiddma = 0;
+ int cfur;
+
+@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(cons
+
+ npages += qib_user_sdma_num_pages(&iov[idx]);
+
+- bytes_togo += slen;
++ if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
++ bytes_togo > type_max(typeof(pkt->bytes_togo))) {
++ ret = -EINVAL;
++ goto free_pbc;
++ }
+ pktnwc += slen >> 2;
+ idx++;
+ nfrags++;
+@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(cons
+ }
+
+ if (frag_size) {
+- int tidsmsize, n;
+- size_t pktsize;
++ size_t tidsmsize, n, pktsize, sz, addrlimit;
+
+ n = npages*((2*PAGE_SIZE/frag_size)+1);
+ pktsize = struct_size(pkt, addr, n);
+@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(cons
+ else
+ tidsmsize = 0;
+
+- pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
++ if (check_add_overflow(pktsize, tidsmsize, &sz)) {
++ ret = -EINVAL;
++ goto free_pbc;
++ }
++ pkt = kmalloc(sz, GFP_KERNEL);
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto free_pbc;
+ }
+ pkt->largepkt = 1;
+ pkt->frag_size = frag_size;
+- pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
++ if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
++ &addrlimit) ||
++ addrlimit > type_max(typeof(pkt->addrlimit))) {
++ ret = -EINVAL;
++ goto free_pbc;
++ }
++ pkt->addrlimit = addrlimit;
+
+ if (tiddma) {
+ char *tidsm = (char *)pkt + pktsize;
--- /dev/null
+From 759635760a804b0d8ad0cc677b650f1544cae22f Mon Sep 17 00:00:00 2001
+From: Ido Schimmel <idosch@nvidia.com>
+Date: Sun, 24 Oct 2021 09:40:14 +0300
+Subject: mlxsw: pci: Recycle received packet upon allocation failure
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+commit 759635760a804b0d8ad0cc677b650f1544cae22f upstream.
+
+When the driver fails to allocate a new Rx buffer, it passes an empty Rx
+descriptor (contains zero address and size) to the device and marks it
+as invalid by setting the skb pointer in the descriptor's metadata to
+NULL.
+
+After processing enough Rx descriptors, the driver will try to process
+the invalid descriptor, but will return immediately seeing that the skb
+pointer is NULL. Since the driver no longer passes new Rx descriptors to
+the device, the Rx queue will eventually become full and the device will
+start to drop packets.
+
+Fix this by recycling the received packet if allocation of the new
+packet failed. This means that allocation is no longer performed at the
+end of the Rx routine, but at the start, before tearing down the DMA
+mapping of the received packet.
+
+Remove the comment about the descriptor being zeroed as it is no longer
+correct. This is OK because we either use the descriptor as-is (when
+recycling) or overwrite its address and size fields with that of the
+newly allocated Rx buffer.
+
+The issue was discovered when a process ("perf") consumed too much
+memory and put the system under memory pressure. It can be reproduced by
+injecting slab allocation failures [1]. After the fix, the Rx queue no
+longer comes to a halt.
+
+[1]
+ # echo 10 > /sys/kernel/debug/failslab/times
+ # echo 1000 > /sys/kernel/debug/failslab/interval
+ # echo 100 > /sys/kernel/debug/failslab/probability
+
+ FAULT_INJECTION: forcing a failure.
+ name failslab, interval 1000, probability 100, space 0, times 8
+ [...]
+ Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x34/0x44
+ should_fail.cold+0x32/0x37
+ should_failslab+0x5/0x10
+ kmem_cache_alloc_node+0x23/0x190
+ __alloc_skb+0x1f9/0x280
+ __netdev_alloc_skb+0x3a/0x150
+ mlxsw_pci_rdq_skb_alloc+0x24/0x90
+ mlxsw_pci_cq_tasklet+0x3dc/0x1200
+ tasklet_action_common.constprop.0+0x9f/0x100
+ __do_softirq+0xb5/0x252
+ irq_exit_rcu+0x7a/0xa0
+ common_interrupt+0x83/0xa0
+ </IRQ>
+ asm_common_interrupt+0x1e/0x40
+ RIP: 0010:cpuidle_enter_state+0xc8/0x340
+ [...]
+ mlxsw_spectrum2 0000:06:00.0: Failed to alloc skb for RDQ
+
+Fixes: eda6500a987a ("mlxsw: Add PCI bus implementation")
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Link: https://lore.kernel.org/r/20211024064014.1060919-1-idosch@idosch.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/pci.c | 25 ++++++++++++-------------
+ 1 file changed, 12 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+@@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struc
+ struct sk_buff *skb;
+ int err;
+
+- elem_info->u.rdq.skb = NULL;
+ skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+- /* Assume that wqe was previously zeroed. */
+-
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ buf_len, DMA_FROM_DEVICE);
+ if (err)
+@@ -548,21 +545,26 @@ static void mlxsw_pci_cqe_rdq_handle(str
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ struct mlxsw_rx_info rx_info = {};
+- char *wqe;
++ char wqe[MLXSW_PCI_WQE_SIZE];
+ struct sk_buff *skb;
+ u16 byte_count;
+ int err;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+- skb = elem_info->u.sdq.skb;
+- if (!skb)
+- return;
+- wqe = elem_info->elem;
+- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
++ skb = elem_info->u.rdq.skb;
++ memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
++ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
++ if (err) {
++ dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
++ goto out;
++ }
++
++ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
++
+ if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
+ rx_info.is_lag = true;
+ rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
+@@ -594,10 +596,7 @@ static void mlxsw_pci_cqe_rdq_handle(str
+ skb_put(skb, byte_count);
+ mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+- memset(wqe, 0, q->elem_size);
+- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+- if (err)
+- dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
++out:
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
--- /dev/null
+From 6f68cd634856f8ca93bafd623ba5357e0f648c68 Mon Sep 17 00:00:00 2001
+From: Pavel Skripkin <paskripkin@gmail.com>
+Date: Sun, 24 Oct 2021 16:13:56 +0300
+Subject: net: batman-adv: fix error handling
+
+From: Pavel Skripkin <paskripkin@gmail.com>
+
+commit 6f68cd634856f8ca93bafd623ba5357e0f648c68 upstream.
+
+Syzbot reported ODEBUG warning in batadv_nc_mesh_free(). The problem was
+in wrong error handling in batadv_mesh_init().
+
+Before this patch batadv_mesh_init() was calling batadv_mesh_free() in case
+of any batadv_*_init() calls failure. This approach may work well, when
+there is some kind of indicator, which can tell which parts of batadv are
+initialized; but there isn't any.
+
+All written above lead to cleaning up uninitialized fields. Even if we hide
+ODEBUG warning by initializing bat_priv->nc.work, syzbot was able to hit
+GPF in batadv_nc_purge_paths(), because hash pointer in still NULL. [1]
+
+To fix these bugs we can unwind batadv_*_init() calls one by one.
+It is good approach for 2 reasons: 1) It fixes bugs on error handling
+path 2) It improves the performance, since we won't call unneeded
+batadv_*_free() functions.
+
+So, this patch makes all batadv_*_init() clean up all allocated memory
+before returning with an error to no call correspoing batadv_*_free()
+and open-codes batadv_mesh_free() with proper order to avoid touching
+uninitialized fields.
+
+Link: https://lore.kernel.org/netdev/000000000000c87fbd05cef6bcb0@google.com/ [1]
+Reported-and-tested-by: syzbot+28b0702ada0bf7381f58@syzkaller.appspotmail.com
+Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol")
+Signed-off-by: Pavel Skripkin <paskripkin@gmail.com>
+Acked-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/bridge_loop_avoidance.c | 8 +++-
+ net/batman-adv/main.c | 56 +++++++++++++++++++++++----------
+ net/batman-adv/network-coding.c | 4 +-
+ net/batman-adv/translation-table.c | 4 +-
+ 4 files changed, 52 insertions(+), 20 deletions(-)
+
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -1561,10 +1561,14 @@ int batadv_bla_init(struct batadv_priv *
+ return 0;
+
+ bat_priv->bla.claim_hash = batadv_hash_new(128);
+- bat_priv->bla.backbone_hash = batadv_hash_new(32);
++ if (!bat_priv->bla.claim_hash)
++ return -ENOMEM;
+
+- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
++ bat_priv->bla.backbone_hash = batadv_hash_new(32);
++ if (!bat_priv->bla.backbone_hash) {
++ batadv_hash_destroy(bat_priv->bla.claim_hash);
+ return -ENOMEM;
++ }
+
+ batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
+ &batadv_claim_hash_lock_class_key);
+--- a/net/batman-adv/main.c
++++ b/net/batman-adv/main.c
+@@ -196,29 +196,41 @@ int batadv_mesh_init(struct net_device *
+
+ bat_priv->gw.generation = 0;
+
+- ret = batadv_v_mesh_init(bat_priv);
+- if (ret < 0)
+- goto err;
+-
+ ret = batadv_originator_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_orig;
++ }
+
+ ret = batadv_tt_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_tt;
++ }
++
++ ret = batadv_v_mesh_init(bat_priv);
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_v;
++ }
+
+ ret = batadv_bla_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_bla;
++ }
+
+ ret = batadv_dat_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_dat;
++ }
+
+ ret = batadv_nc_mesh_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_nc;
++ }
+
+ batadv_gw_init(bat_priv);
+ batadv_mcast_init(bat_priv);
+@@ -228,8 +240,20 @@ int batadv_mesh_init(struct net_device *
+
+ return 0;
+
+-err:
+- batadv_mesh_free(soft_iface);
++err_nc:
++ batadv_dat_free(bat_priv);
++err_dat:
++ batadv_bla_free(bat_priv);
++err_bla:
++ batadv_v_mesh_free(bat_priv);
++err_v:
++ batadv_tt_free(bat_priv);
++err_tt:
++ batadv_originator_free(bat_priv);
++err_orig:
++ batadv_purge_outstanding_packets(bat_priv, NULL);
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
++
+ return ret;
+ }
+
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -155,8 +155,10 @@ int batadv_nc_mesh_init(struct batadv_pr
+ &batadv_nc_coding_hash_lock_class_key);
+
+ bat_priv->nc.decoding_hash = batadv_hash_new(128);
+- if (!bat_priv->nc.decoding_hash)
++ if (!bat_priv->nc.decoding_hash) {
++ batadv_hash_destroy(bat_priv->nc.coding_hash);
+ goto err;
++ }
+
+ batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
+ &batadv_nc_decoding_hash_lock_class_key);
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -4405,8 +4405,10 @@ int batadv_tt_init(struct batadv_priv *b
+ return ret;
+
+ ret = batadv_tt_global_init(bat_priv);
+- if (ret < 0)
++ if (ret < 0) {
++ batadv_tt_local_table_free(bat_priv);
+ return ret;
++ }
+
+ batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+ batadv_tt_tvlv_unicast_handler_v1,
--- /dev/null
+From 95a359c9553342d36d408d35331ff0bfce75272f Mon Sep 17 00:00:00 2001
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+Date: Fri, 22 Oct 2021 11:53:43 -0400
+Subject: net: ethernet: microchip: lan743x: Fix dma allocation failure by using dma_set_mask_and_coherent
+
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+
+commit 95a359c9553342d36d408d35331ff0bfce75272f upstream.
+
+The dma failure was reported in the raspberry pi github (issue #4117).
+https://github.com/raspberrypi/linux/issues/4117
+The use of dma_set_mask_and_coherent fixes the issue.
+Tested on 32/64-bit raspberry pi CM4 and 64-bit ubuntu x86 PC with EVB-LAN7430.
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Yuiko Oshino <yuiko.oshino@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1770,6 +1770,16 @@ static int lan743x_tx_ring_init(struct l
+ ret = -EINVAL;
+ goto cleanup;
+ }
++ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
++ DMA_BIT_MASK(64))) {
++ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
++ DMA_BIT_MASK(32))) {
++ dev_warn(&tx->adapter->pdev->dev,
++ "lan743x_: No suitable DMA available\n");
++ ret = -ENOMEM;
++ goto cleanup;
++ }
++ }
+ ring_allocation_size = ALIGN(tx->ring_size *
+ sizeof(struct lan743x_tx_descriptor),
+ PAGE_SIZE);
+@@ -2318,6 +2328,16 @@ static int lan743x_rx_ring_init(struct l
+ ret = -EINVAL;
+ goto cleanup;
+ }
++ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
++ DMA_BIT_MASK(64))) {
++ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
++ DMA_BIT_MASK(32))) {
++ dev_warn(&rx->adapter->pdev->dev,
++ "lan743x_: No suitable DMA available\n");
++ ret = -ENOMEM;
++ goto cleanup;
++ }
++ }
+ ring_allocation_size = ALIGN(rx->ring_size *
+ sizeof(struct lan743x_rx_descriptor),
+ PAGE_SIZE);
--- /dev/null
+From d6423d2ec39cce2bfca418c81ef51792891576bc Mon Sep 17 00:00:00 2001
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+Date: Fri, 22 Oct 2021 11:13:53 -0400
+Subject: net: ethernet: microchip: lan743x: Fix driver crash when lan743x_pm_resume fails
+
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+
+commit d6423d2ec39cce2bfca418c81ef51792891576bc upstream.
+
+The driver needs to clean up and return when the initialization fails on resume.
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Yuiko Oshino <yuiko.oshino@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -3066,6 +3066,8 @@ static int lan743x_pm_resume(struct devi
+ if (ret) {
+ netif_err(adapter, probe, adapter->netdev,
+ "lan743x_hardware_init returned %d\n", ret);
++ lan743x_pci_cleanup(adapter);
++ return ret;
+ }
+
+ /* open netdev when netdev is at running state while resume.
--- /dev/null
+From ace19b992436a257d9a793672e57abc28fe83e2e Mon Sep 17 00:00:00 2001
+From: Trevor Woerner <twoerner@gmail.com>
+Date: Sun, 24 Oct 2021 13:50:02 -0400
+Subject: net: nxp: lpc_eth.c: avoid hang when bringing interface down
+
+From: Trevor Woerner <twoerner@gmail.com>
+
+commit ace19b992436a257d9a793672e57abc28fe83e2e upstream.
+
+A hard hang is observed whenever the ethernet interface is brought
+down. If the PHY is stopped before the LPC core block is reset,
+the SoC will hang. Comparing lpc_eth_close() and lpc_eth_open() I
+re-arranged the ordering of the functions calls in lpc_eth_close() to
+reset the hardware before stopping the PHY.
+Fixes: b7370112f519 ("lpc32xx: Added ethernet driver")
+Signed-off-by: Trevor Woerner <twoerner@gmail.com>
+Acked-by: Vladimir Zapolskiy <vz@mleia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/nxp/lpc_eth.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_devi
+ napi_disable(&pldat->napi);
+ netif_stop_queue(ndev);
+
+- if (ndev->phydev)
+- phy_stop(ndev->phydev);
+-
+ spin_lock_irqsave(&pldat->lock, flags);
+ __lpc_eth_reset(pldat);
+ netif_carrier_off(ndev);
+@@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_devi
+ writel(0, LPC_ENET_MAC2(pldat->net_base));
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
++ if (ndev->phydev)
++ phy_stop(ndev->phydev);
+ clk_disable_unprepare(pldat->clk);
+
+ return 0;
--- /dev/null
+From 0c57eeecc559ca6bc18b8c4e2808bc78dbe769b0 Mon Sep 17 00:00:00 2001
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Mon, 25 Oct 2021 05:05:28 -0400
+Subject: net: Prevent infinite while loop in skb_tx_hash()
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+commit 0c57eeecc559ca6bc18b8c4e2808bc78dbe769b0 upstream.
+
+Drivers call netdev_set_num_tc() and then netdev_set_tc_queue()
+to set the queue count and offset for each TC. So the queue count
+and offset for the TCs may be zero for a short period after dev->num_tc
+has been set. If a TX packet is being transmitted at this time in the
+code path netdev_pick_tx() -> skb_tx_hash(), skb_tx_hash() may see
+nonzero dev->num_tc but zero qcount for the TC. The while loop that
+keeps looping while hash >= qcount will not end.
+
+Fix it by checking the TC's qcount to be nonzero before using it.
+
+Fixes: eadec877ce9c ("net: Add support for subordinate traffic classes to netdev_pick_tx")
+Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3171,6 +3171,12 @@ static u16 skb_tx_hash(const struct net_
+
+ qoffset = sb_dev->tc_to_txq[tc].offset;
+ qcount = sb_dev->tc_to_txq[tc].count;
++ if (unlikely(!qcount)) {
++ net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
++ sb_dev->name, qoffset, tc);
++ qoffset = 0;
++ qcount = dev->real_num_tx_queues;
++ }
+ }
+
+ if (skb_rx_queue_recorded(skb)) {
--- /dev/null
+From f7a1e76d0f608961cc2fc681f867a834f2746bce Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Mon, 25 Oct 2021 02:31:48 -0400
+Subject: net-sysfs: initialize uid and gid before calling net_ns_get_ownership
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit f7a1e76d0f608961cc2fc681f867a834f2746bce upstream.
+
+Currently in net_ns_get_ownership() it may not be able to set uid or gid
+if make_kuid or make_kgid returns an invalid value, and an uninit-value
+issue can be triggered by this.
+
+This patch is to fix it by initializing the uid and gid before calling
+net_ns_get_ownership(), as it does in kobject_get_ownership()
+
+Fixes: e6dee9f3893c ("net-sysfs: add netdev_change_owner()")
+Reported-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/net-sysfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1957,9 +1957,9 @@ int netdev_register_kobject(struct net_d
+ int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
+ const struct net *net_new)
+ {
++ kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
++ kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
+ struct device *dev = &ndev->dev;
+- kuid_t old_uid, new_uid;
+- kgid_t old_gid, new_gid;
+ int error;
+
+ net_ns_get_ownership(net_old, &old_uid, &old_gid);
--- /dev/null
+From 1d9d6fd21ad4a28b16ed9ee5432ae738b9dc58aa Mon Sep 17 00:00:00 2001
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+Date: Wed, 27 Oct 2021 17:59:21 -0400
+Subject: net/tls: Fix flipped sign in async_wait.err assignment
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+commit 1d9d6fd21ad4a28b16ed9ee5432ae738b9dc58aa upstream.
+
+sk->sk_err contains a positive number, yet async_wait.err wants the
+opposite. Fix the missed sign flip, which Jakub caught by inspection.
+
+Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance")
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -459,7 +459,7 @@ static void tls_encrypt_done(struct cryp
+
+ /* If err is already set on socket, return the same code */
+ if (sk->sk_err) {
+- ctx->async_wait.err = sk->sk_err;
++ ctx->async_wait.err = -sk->sk_err;
+ } else {
+ ctx->async_wait.err = err;
+ tls_err_abort(sk, err);
--- /dev/null
+From 4a089e95b4d6bb625044d47aed0c442a8f7bd093 Mon Sep 17 00:00:00 2001
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Wed, 20 Oct 2021 12:11:16 -0700
+Subject: nios2: Make NIOS2_DTB_SOURCE_BOOL depend on !COMPILE_TEST
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+commit 4a089e95b4d6bb625044d47aed0c442a8f7bd093 upstream.
+
+nios2:allmodconfig builds fail with
+
+make[1]: *** No rule to make target 'arch/nios2/boot/dts/""',
+ needed by 'arch/nios2/boot/dts/built-in.a'. Stop.
+make: [Makefile:1868: arch/nios2/boot/dts] Error 2 (ignored)
+
+This is seen with compile tests since those enable NIOS2_DTB_SOURCE_BOOL,
+which in turn enables NIOS2_DTB_SOURCE. This causes the build error
+because the default value for NIOS2_DTB_SOURCE is an empty string.
+Disable NIOS2_DTB_SOURCE_BOOL for compile tests to avoid the error.
+
+Fixes: 2fc8483fdcde ("nios2: Build infrastructure")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Dinh Nguyen <dinguyen@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/nios2/platform/Kconfig.platform | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/nios2/platform/Kconfig.platform
++++ b/arch/nios2/platform/Kconfig.platform
+@@ -37,6 +37,7 @@ config NIOS2_DTB_PHYS_ADDR
+
+ config NIOS2_DTB_SOURCE_BOOL
+ bool "Compile and link device tree into kernel image"
++ depends on !COMPILE_TEST
+ help
+ This allows you to specify a dts (device tree source) file
+ which will be compiled and linked into the kernel image.
--- /dev/null
+From d89b9f3bbb58e9e378881209756b0723694f22ff Mon Sep 17 00:00:00 2001
+From: Varun Prakash <varun@chelsio.com>
+Date: Mon, 25 Oct 2021 22:47:30 +0530
+Subject: nvme-tcp: fix data digest pointer calculation
+
+From: Varun Prakash <varun@chelsio.com>
+
+commit d89b9f3bbb58e9e378881209756b0723694f22ff upstream.
+
+ddgst is of type __le32, &req->ddgst + req->offset
+increases &req->ddgst by 4 * req->offset, fix this by
+type casting &req->ddgst to u8 *.
+
+Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver")
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/tcp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1040,7 +1040,7 @@ static int nvme_tcp_try_send_ddgst(struc
+ int ret;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+- .iov_base = &req->ddgst + req->offset,
++ .iov_base = (u8 *)&req->ddgst + req->offset,
+ .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
+ };
+
--- /dev/null
+From ce7723e9cdae4eb3030da082876580f4b2dc0861 Mon Sep 17 00:00:00 2001
+From: Varun Prakash <varun@chelsio.com>
+Date: Tue, 26 Oct 2021 19:01:55 +0530
+Subject: nvme-tcp: fix possible req->offset corruption
+
+From: Varun Prakash <varun@chelsio.com>
+
+commit ce7723e9cdae4eb3030da082876580f4b2dc0861 upstream.
+
+With commit db5ad6b7f8cd ("nvme-tcp: try to send request in queue_rq
+context") r2t and response PDU can get processed while send function
+is executing.
+
+Current data digest send code uses req->offset after kernel_sendmsg(),
+this creates a race condition where req->offset gets reset before it
+is used in send function.
+
+This can happen in two cases -
+1. Target sends r2t PDU which resets req->offset.
+2. Target send response PDU which completes the req and then req is
+ used for a new command, nvme_tcp_setup_cmd_pdu() resets req->offset.
+
+Fix this by storing req->offset in a local variable and using
+this local variable after kernel_sendmsg().
+
+Fixes: db5ad6b7f8cd ("nvme-tcp: try to send request in queue_rq context")
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/tcp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1037,6 +1037,7 @@ static int nvme_tcp_try_send_data_pdu(st
+ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
+ {
+ struct nvme_tcp_queue *queue = req->queue;
++ size_t offset = req->offset;
+ int ret;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+@@ -1053,7 +1054,7 @@ static int nvme_tcp_try_send_ddgst(struc
+ if (unlikely(ret <= 0))
+ return ret;
+
+- if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
++ if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ nvme_tcp_done_send_req(queue);
+ return 1;
+ }
--- /dev/null
+From e790de54e94a7a15fb725b34724d41d41cbaa60c Mon Sep 17 00:00:00 2001
+From: Varun Prakash <varun@chelsio.com>
+Date: Mon, 25 Oct 2021 22:46:54 +0530
+Subject: nvmet-tcp: fix data digest pointer calculation
+
+From: Varun Prakash <varun@chelsio.com>
+
+commit e790de54e94a7a15fb725b34724d41d41cbaa60c upstream.
+
+exp_ddgst is of type __le32, &cmd->exp_ddgst + cmd->offset increases
+&cmd->exp_ddgst by 4 * cmd->offset, fix this by type casting
+&cmd->exp_ddgst to u8 *.
+
+Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver")
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/target/tcp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -690,7 +690,7 @@ static int nvmet_try_send_ddgst(struct n
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+- .iov_base = &cmd->exp_ddgst + cmd->offset,
++ .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
+ .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+ };
+ int ret;
--- /dev/null
+From e77bcdd1f639809950c45234b08647ac6d3ffe7b Mon Sep 17 00:00:00 2001
+From: Rakesh Babu <rsaladi2@marvell.com>
+Date: Wed, 27 Oct 2021 23:02:33 +0530
+Subject: octeontx2-af: Display all enabled PF VF rsrc_alloc entries.
+
+From: Rakesh Babu <rsaladi2@marvell.com>
+
+commit e77bcdd1f639809950c45234b08647ac6d3ffe7b upstream.
+
+Currently, we are using a fixed buffer size of length 2048 to display
+rsrc_alloc output. As a result a maximum of 2048 characters of
+rsrc_alloc output is displayed, which may lead sometimes to display only
+partial output. This patch fixes this dependency on max limit of buffer
+size and displays all PF VF entries.
+
+Each column of the debugfs entry "rsrc_alloc" uses a fixed width of 12
+characters to print the list of LFs of each block for a PF/VF. If the
+length of list of LFs of a block exceeds this fixed width then the list
+gets truncated and displays only a part of the list. This patch fixes
+this by using the maximum possible length of list of LFs among all
+blocks of all PFs and VFs entries as the width size.
+
+Fixes: f7884097141b ("octeontx2-af: Formatting debugfs entry rsrc_alloc.")
+Fixes: 23205e6d06d4 ("octeontx2-af: Dump current resource provisioning status")
+Signed-off-by: Rakesh Babu <rsaladi2@marvell.com>
+Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
+Signed-off-by: Sunil Kovvuri Goutham <Sunil.Goutham@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c | 138 ++++++++++++----
+ 1 file changed, 106 insertions(+), 32 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -139,18 +139,85 @@ static const struct file_operations rvu_
+
+ static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+
++static void get_lf_str_list(struct rvu_block block, int pcifunc,
++ char *lfs)
++{
++ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
++
++ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
++ if (lf >= block.lf.max)
++ break;
++
++ if (block.fn_map[lf] != pcifunc)
++ continue;
++
++ if (lf == prev_lf + 1) {
++ prev_lf = lf;
++ seq = 1;
++ continue;
++ }
++
++ if (seq)
++ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
++ else
++ len += (len ? sprintf(lfs + len, ",%d", lf) :
++ sprintf(lfs + len, "%d", lf));
++
++ prev_lf = lf;
++ seq = 0;
++ }
++
++ if (seq)
++ len += sprintf(lfs + len, "-%d", prev_lf);
++
++ lfs[len] = '\0';
++}
++
++static int get_max_column_width(struct rvu *rvu)
++{
++ int index, pf, vf, lf_str_size = 12, buf_size = 256;
++ struct rvu_block block;
++ u16 pcifunc;
++ char *buf;
++
++ buf = kzalloc(buf_size, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
++ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
++ pcifunc = pf << 10 | vf;
++ if (!pcifunc)
++ continue;
++
++ for (index = 0; index < BLK_COUNT; index++) {
++ block = rvu->hw->block[index];
++ if (!strlen(block.name))
++ continue;
++
++ get_lf_str_list(block, pcifunc, buf);
++ if (lf_str_size <= strlen(buf))
++ lf_str_size = strlen(buf) + 1;
++ }
++ }
++ }
++
++ kfree(buf);
++ return lf_str_size;
++}
++
+ /* Dumps current provisioning status of all RVU block LFs */
+ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+- int index, off = 0, flag = 0, go_back = 0, len = 0;
++ int index, off = 0, flag = 0, len = 0, i = 0;
+ struct rvu *rvu = filp->private_data;
+- int lf, pf, vf, pcifunc;
++ int bytes_not_copied = 0;
+ struct rvu_block block;
+- int bytes_not_copied;
+- int lf_str_size = 12;
++ int pf, vf, pcifunc;
+ int buf_size = 2048;
++ int lf_str_size;
+ char *lfs;
+ char *buf;
+
+@@ -162,6 +229,9 @@ static ssize_t rvu_dbg_rsrc_attach_statu
+ if (!buf)
+ return -ENOSPC;
+
++ /* Get the maximum width of a column */
++ lf_str_size = get_max_column_width(rvu);
++
+ lfs = kzalloc(lf_str_size, GFP_KERNEL);
+ if (!lfs) {
+ kfree(buf);
+@@ -175,65 +245,69 @@ static ssize_t rvu_dbg_rsrc_attach_statu
+ "%-*s", lf_str_size,
+ rvu->hw->block[index].name);
+ }
++
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
++ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
++ if (bytes_not_copied)
++ goto out;
++
++ i++;
++ *ppos += off;
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
++ off = 0;
++ flag = 0;
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ if (vf) {
+ sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
+- go_back = scnprintf(&buf[off],
+- buf_size - 1 - off,
+- "%-*s", lf_str_size, lfs);
++ off = scnprintf(&buf[off],
++ buf_size - 1 - off,
++ "%-*s", lf_str_size, lfs);
+ } else {
+ sprintf(lfs, "PF%d", pf);
+- go_back = scnprintf(&buf[off],
+- buf_size - 1 - off,
+- "%-*s", lf_str_size, lfs);
++ off = scnprintf(&buf[off],
++ buf_size - 1 - off,
++ "%-*s", lf_str_size, lfs);
+ }
+
+- off += go_back;
+- for (index = 0; index < BLKTYPE_MAX; index++) {
++ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+ len = 0;
+ lfs[len] = '\0';
+- for (lf = 0; lf < block.lf.max; lf++) {
+- if (block.fn_map[lf] != pcifunc)
+- continue;
++ get_lf_str_list(block, pcifunc, lfs);
++ if (strlen(lfs))
+ flag = 1;
+- len += sprintf(&lfs[len], "%d,", lf);
+- }
+
+- if (flag)
+- len--;
+- lfs[len] = '\0';
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+- if (!strlen(lfs))
+- go_back += lf_str_size;
+ }
+- if (!flag)
+- off -= go_back;
+- else
+- flag = 0;
+- off--;
+- off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
++ if (flag) {
++ off += scnprintf(&buf[off],
++ buf_size - 1 - off, "\n");
++ bytes_not_copied = copy_to_user(buffer +
++ (i * off),
++ buf, off);
++ if (bytes_not_copied)
++ goto out;
++
++ i++;
++ *ppos += off;
++ }
+ }
+ }
+
+- bytes_not_copied = copy_to_user(buffer, buf, off);
++out:
+ kfree(lfs);
+ kfree(buf);
+-
+ if (bytes_not_copied)
+ return -EFAULT;
+
+- *ppos = off;
+- return off;
++ return *ppos;
+ }
+
+ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
--- /dev/null
+From c10a485c3de5ccbf1fff65a382cebcb2730c6b06 Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sun, 24 Oct 2021 21:48:02 +0200
+Subject: phy: phy_ethtool_ksettings_get: Lock the phy for consistency
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+commit c10a485c3de5ccbf1fff65a382cebcb2730c6b06 upstream.
+
+The PHY structure should be locked while copying information out if
+it, otherwise there is no guarantee of self consistency. Without the
+lock the PHY state machine could be updating the structure.
+
+Fixes: 2d55173e71b0 ("phy: add generic function to support ksetting support")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -316,6 +316,7 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set)
+ void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd)
+ {
++ mutex_lock(&phydev->lock);
+ linkmode_copy(cmd->link_modes.supported, phydev->supported);
+ linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
+ linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
+@@ -334,6 +335,7 @@ void phy_ethtool_ksettings_get(struct ph
+ cmd->base.autoneg = phydev->autoneg;
+ cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
+ cmd->base.eth_tp_mdix = phydev->mdix;
++ mutex_unlock(&phydev->lock);
+ }
+ EXPORT_SYMBOL(phy_ethtool_ksettings_get);
+
--- /dev/null
+From af1a02aa23c37045e6adfcf074cf7dbac167a403 Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sun, 24 Oct 2021 21:48:05 +0200
+Subject: phy: phy_ethtool_ksettings_set: Lock the PHY while changing settings
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+commit af1a02aa23c37045e6adfcf074cf7dbac167a403 upstream.
+
+There is a race condition where the PHY state machine can change
+members of the phydev structure at the same time userspace requests a
+change via ethtool. To prevent this, have phy_ethtool_ksettings_set
+take the PHY lock.
+
+Fixes: 2d55173e71b0 ("phy: add generic function to support ksetting support")
+Reported-by: Walter Stoll <Walter.Stoll@duagon.com>
+Suggested-by: Walter Stoll <Walter.Stoll@duagon.com>
+Tested-by: Walter Stoll <Walter.Stoll@duagon.com>
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -814,6 +814,7 @@ int phy_ethtool_ksettings_set(struct phy
+ duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
++ mutex_lock(&phydev->lock);
+ phydev->autoneg = autoneg;
+
+ if (autoneg == AUTONEG_DISABLE) {
+@@ -830,8 +831,9 @@ int phy_ethtool_ksettings_set(struct phy
+ phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+ /* Restart the PHY */
+- phy_start_aneg(phydev);
++ _phy_start_aneg(phydev);
+
++ mutex_unlock(&phydev->lock);
+ return 0;
+ }
+ EXPORT_SYMBOL(phy_ethtool_ksettings_set);
--- /dev/null
+From 64cd92d5e8180c2ded3fdea76862de6f596ae2c9 Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sun, 24 Oct 2021 21:48:03 +0200
+Subject: phy: phy_ethtool_ksettings_set: Move after phy_start_aneg
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+commit 64cd92d5e8180c2ded3fdea76862de6f596ae2c9 upstream.
+
+This allows it to make use of a helper which assume the PHY is already
+locked.
+
+Fixes: 2d55173e71b0 ("phy: add generic function to support ksetting support")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c | 106 +++++++++++++++++++++++++-------------------------
+ 1 file changed, 53 insertions(+), 53 deletions(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -260,59 +260,6 @@ static void phy_sanitize_settings(struct
+ }
+ }
+
+-int phy_ethtool_ksettings_set(struct phy_device *phydev,
+- const struct ethtool_link_ksettings *cmd)
+-{
+- __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+- u8 autoneg = cmd->base.autoneg;
+- u8 duplex = cmd->base.duplex;
+- u32 speed = cmd->base.speed;
+-
+- if (cmd->base.phy_address != phydev->mdio.addr)
+- return -EINVAL;
+-
+- linkmode_copy(advertising, cmd->link_modes.advertising);
+-
+- /* We make sure that we don't pass unsupported values in to the PHY */
+- linkmode_and(advertising, advertising, phydev->supported);
+-
+- /* Verify the settings we care about. */
+- if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+- return -EINVAL;
+-
+- if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
+- return -EINVAL;
+-
+- if (autoneg == AUTONEG_DISABLE &&
+- ((speed != SPEED_1000 &&
+- speed != SPEED_100 &&
+- speed != SPEED_10) ||
+- (duplex != DUPLEX_HALF &&
+- duplex != DUPLEX_FULL)))
+- return -EINVAL;
+-
+- phydev->autoneg = autoneg;
+-
+- if (autoneg == AUTONEG_DISABLE) {
+- phydev->speed = speed;
+- phydev->duplex = duplex;
+- }
+-
+- linkmode_copy(phydev->advertising, advertising);
+-
+- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+- phydev->advertising, autoneg == AUTONEG_ENABLE);
+-
+- phydev->master_slave_set = cmd->base.master_slave_cfg;
+- phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+-
+- /* Restart the PHY */
+- phy_start_aneg(phydev);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+-
+ void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd)
+ {
+@@ -818,6 +765,59 @@ static int phy_poll_aneg_done(struct phy
+ return ret < 0 ? ret : 0;
+ }
+
++int phy_ethtool_ksettings_set(struct phy_device *phydev,
++ const struct ethtool_link_ksettings *cmd)
++{
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
++ u8 autoneg = cmd->base.autoneg;
++ u8 duplex = cmd->base.duplex;
++ u32 speed = cmd->base.speed;
++
++ if (cmd->base.phy_address != phydev->mdio.addr)
++ return -EINVAL;
++
++ linkmode_copy(advertising, cmd->link_modes.advertising);
++
++ /* We make sure that we don't pass unsupported values in to the PHY */
++ linkmode_and(advertising, advertising, phydev->supported);
++
++ /* Verify the settings we care about. */
++ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
++ return -EINVAL;
++
++ if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
++ return -EINVAL;
++
++ if (autoneg == AUTONEG_DISABLE &&
++ ((speed != SPEED_1000 &&
++ speed != SPEED_100 &&
++ speed != SPEED_10) ||
++ (duplex != DUPLEX_HALF &&
++ duplex != DUPLEX_FULL)))
++ return -EINVAL;
++
++ phydev->autoneg = autoneg;
++
++ if (autoneg == AUTONEG_DISABLE) {
++ phydev->speed = speed;
++ phydev->duplex = duplex;
++ }
++
++ linkmode_copy(phydev->advertising, advertising);
++
++ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
++ phydev->advertising, autoneg == AUTONEG_ENABLE);
++
++ phydev->master_slave_set = cmd->base.master_slave_cfg;
++ phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
++
++ /* Restart the PHY */
++ phy_start_aneg(phydev);
++
++ return 0;
++}
++EXPORT_SYMBOL(phy_ethtool_ksettings_set);
++
+ /**
+ * phy_speed_down - set speed to lowest speed supported by both link partners
+ * @phydev: the phy_device struct
--- /dev/null
+From 707293a56f95f8e7e0cfae008010c7933fb68973 Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sun, 24 Oct 2021 21:48:04 +0200
+Subject: phy: phy_start_aneg: Add an unlocked version
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+commit 707293a56f95f8e7e0cfae008010c7933fb68973 upstream.
+
+Split phy_start_aneg into a wrapper which takes the PHY lock, and a
+helper doing the real work. This will be needed when
+phy_ethtook_ksettings_set takes the lock.
+
+Fixes: 2d55173e71b0 ("phy: add generic function to support ksetting support")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c | 30 ++++++++++++++++++++++++------
+ 1 file changed, 24 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -716,7 +716,7 @@ static int phy_check_link_status(struct
+ }
+
+ /**
+- * phy_start_aneg - start auto-negotiation for this PHY device
++ * _phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+@@ -724,25 +724,43 @@ static int phy_check_link_status(struct
+ * If the PHYCONTROL Layer is operating, we change the state to
+ * reflect the beginning of Auto-negotiation or forcing.
+ */
+-int phy_start_aneg(struct phy_device *phydev)
++static int _phy_start_aneg(struct phy_device *phydev)
+ {
+ int err;
+
++ lockdep_assert_held(&phydev->lock);
++
+ if (!phydev->drv)
+ return -EIO;
+
+- mutex_lock(&phydev->lock);
+-
+ if (AUTONEG_DISABLE == phydev->autoneg)
+ phy_sanitize_settings(phydev);
+
+ err = phy_config_aneg(phydev);
+ if (err < 0)
+- goto out_unlock;
++ return err;
+
+ if (phy_is_started(phydev))
+ err = phy_check_link_status(phydev);
+-out_unlock:
++
++ return err;
++}
++
++/**
++ * phy_start_aneg - start auto-negotiation for this PHY device
++ * @phydev: the phy_device struct
++ *
++ * Description: Sanitizes the settings (if we're not autonegotiating
++ * them), and then calls the driver's config_aneg function.
++ * If the PHYCONTROL Layer is operating, we change the state to
++ * reflect the beginning of Auto-negotiation or forcing.
++ */
++int phy_start_aneg(struct phy_device *phydev)
++{
++ int err;
++
++ mutex_lock(&phydev->lock);
++ err = _phy_start_aneg(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return err;
--- /dev/null
+From 1ab52ac1e9bc9391f592c9fa8340a6e3e9c36286 Mon Sep 17 00:00:00 2001
+From: Patrisious Haddad <phaddad@nvidia.com>
+Date: Wed, 6 Oct 2021 12:31:53 +0300
+Subject: RDMA/mlx5: Set user priority for DCT
+
+From: Patrisious Haddad <phaddad@nvidia.com>
+
+commit 1ab52ac1e9bc9391f592c9fa8340a6e3e9c36286 upstream.
+
+Currently, the driver doesn't set the PCP-based priority for DCT, hence
+DCT response packets are transmitted without user priority.
+
+Fix it by setting user provided priority in the eth_prio field in the DCT
+context, which in turn sets the value in the transmitted packet.
+
+Fixes: 776a3906b692 ("IB/mlx5: Add support for DC target QP")
+Link: https://lore.kernel.org/r/5fd2d94a13f5742d8803c218927322257d53205c.1633512672.git.leonro@nvidia.com
+Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/qp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4216,6 +4216,8 @@ static int mlx5_ib_modify_dct(struct ib_
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
++ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
++ MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
+
+ err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in), out,
--- /dev/null
+From 64733956ebba7cc629856f4a6ee35a52bc9c023f Mon Sep 17 00:00:00 2001
+From: Mark Zhang <markzhang@nvidia.com>
+Date: Sun, 24 Oct 2021 09:08:20 +0300
+Subject: RDMA/sa_query: Use strscpy_pad instead of memcpy to copy a string
+
+From: Mark Zhang <markzhang@nvidia.com>
+
+commit 64733956ebba7cc629856f4a6ee35a52bc9c023f upstream.
+
+When copying the device name, the length of the data memcpy copied exceeds
+the length of the source buffer, which cause the KASAN issue below. Use
+strscpy_pad() instead.
+
+ BUG: KASAN: slab-out-of-bounds in ib_nl_set_path_rec_attrs+0x136/0x320 [ib_core]
+ Read of size 64 at addr ffff88811a10f5e0 by task rping/140263
+ CPU: 3 PID: 140263 Comm: rping Not tainted 5.15.0-rc1+ #1
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+ Call Trace:
+ dump_stack_lvl+0x57/0x7d
+ print_address_description.constprop.0+0x1d/0xa0
+ kasan_report+0xcb/0x110
+ kasan_check_range+0x13d/0x180
+ memcpy+0x20/0x60
+ ib_nl_set_path_rec_attrs+0x136/0x320 [ib_core]
+ ib_nl_make_request+0x1c6/0x380 [ib_core]
+ send_mad+0x20a/0x220 [ib_core]
+ ib_sa_path_rec_get+0x3e3/0x800 [ib_core]
+ cma_query_ib_route+0x29b/0x390 [rdma_cm]
+ rdma_resolve_route+0x308/0x3e0 [rdma_cm]
+ ucma_resolve_route+0xe1/0x150 [rdma_ucm]
+ ucma_write+0x17b/0x1f0 [rdma_ucm]
+ vfs_write+0x142/0x4d0
+ ksys_write+0x133/0x160
+ do_syscall_64+0x43/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+ RIP: 0033:0x7f26499aa90f
+ Code: 89 54 24 18 48 89 74 24 10 89 7c 24 08 e8 29 fd ff ff 48 8b 54 24 18 48 8b 74 24 10 41 89 c0 8b 7c 24 08 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 31 44 89 c7 48 89 44 24 08 e8 5c fd ff ff 48
+ RSP: 002b:00007f26495f2dc0 EFLAGS: 00000293 ORIG_RAX: 0000000000000001
+ RAX: ffffffffffffffda RBX: 00000000000007d0 RCX: 00007f26499aa90f
+ RDX: 0000000000000010 RSI: 00007f26495f2e00 RDI: 0000000000000003
+ RBP: 00005632a8315440 R08: 0000000000000000 R09: 0000000000000001
+ R10: 0000000000000000 R11: 0000000000000293 R12: 00007f26495f2e00
+ R13: 00005632a83154e0 R14: 00005632a8315440 R15: 00005632a830a810
+
+ Allocated by task 131419:
+ kasan_save_stack+0x1b/0x40
+ __kasan_kmalloc+0x7c/0x90
+ proc_self_get_link+0x8b/0x100
+ pick_link+0x4f1/0x5c0
+ step_into+0x2eb/0x3d0
+ walk_component+0xc8/0x2c0
+ link_path_walk+0x3b8/0x580
+ path_openat+0x101/0x230
+ do_filp_open+0x12e/0x240
+ do_sys_openat2+0x115/0x280
+ __x64_sys_openat+0xce/0x140
+ do_syscall_64+0x43/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+Fixes: 2ca546b92a02 ("IB/sa: Route SA pathrecord query through netlink")
+Link: https://lore.kernel.org/r/72ede0f6dab61f7f23df9ac7a70666e07ef314b0.1635055496.git.leonro@nvidia.com
+Signed-off-by: Mark Zhang <markzhang@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/sa_query.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -760,8 +760,9 @@ static void ib_nl_set_path_rec_attrs(str
+
+ /* Construct the family header first */
+ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+- memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
+- LS_DEVICE_NAME_MAX);
++ strscpy_pad(header->device_name,
++ dev_name(&query->port->agent->device->dev),
++ LS_DEVICE_NAME_MAX);
+ header->port_num = query->port->port_num;
+
+ if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
--- /dev/null
+From 55e6d8037805b3400096d621091dfbf713f97e83 Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Tue, 12 Oct 2021 10:37:35 +0800
+Subject: regmap: Fix possible double-free in regcache_rbtree_exit()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit 55e6d8037805b3400096d621091dfbf713f97e83 upstream.
+
+In regcache_rbtree_insert_to_block(), when 'present' realloc failed,
+the 'blk' which is supposed to assign to 'rbnode->block' will be freed,
+so 'rbnode->block' points a freed memory, in the error handling path of
+regcache_rbtree_init(), 'rbnode->block' will be freed again in
+regcache_rbtree_exit(), KASAN will report double-free as follows:
+
+BUG: KASAN: double-free or invalid-free in kfree+0xce/0x390
+Call Trace:
+ slab_free_freelist_hook+0x10d/0x240
+ kfree+0xce/0x390
+ regcache_rbtree_exit+0x15d/0x1a0
+ regcache_rbtree_init+0x224/0x2c0
+ regcache_init+0x88d/0x1310
+ __regmap_init+0x3151/0x4a80
+ __devm_regmap_init+0x7d/0x100
+ madera_spi_probe+0x10f/0x333 [madera_spi]
+ spi_probe+0x183/0x210
+ really_probe+0x285/0xc30
+
+To fix this, moving up the assignment of rbnode->block to immediately after
+the reallocation has succeeded so that the data structure stays valid even
+if the second reallocation fails.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: 3f4ff561bc88b ("regmap: rbtree: Make cache_present bitmap per node")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20211012023735.1632786-1-yangyingliang@huawei.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/regmap/regcache-rbtree.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_blo
+ if (!blk)
+ return -ENOMEM;
+
++ rbnode->block = blk;
++
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ GFP_KERNEL);
+- if (!present) {
+- kfree(blk);
++ if (!present)
+ return -ENOMEM;
+- }
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+@@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_blo
+ }
+
+ /* update the rbnode block, its size and the base register */
+- rbnode->block = blk;
+ rbnode->blklen = blklen;
+ rbnode->base_reg = base_reg;
+ rbnode->cache_present = present;
--- /dev/null
+From f33eb7f29c16ba78db3221ee02346fd832274cdd Mon Sep 17 00:00:00 2001
+From: Jim Quinlan <jim2101024@gmail.com>
+Date: Tue, 14 Sep 2021 15:11:21 -0700
+Subject: reset: brcmstb-rescal: fix incorrect polarity of status bit
+
+From: Jim Quinlan <jim2101024@gmail.com>
+
+commit f33eb7f29c16ba78db3221ee02346fd832274cdd upstream.
+
+The readl_poll_timeout() should complete when the status bit
+is a 1, not 0.
+
+Fixes: 4cf176e52397 ("reset: Add Broadcom STB RESCAL reset controller")
+Signed-off-by: Jim Quinlan <jim2101024@gmail.com>
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20210914221122.62315-1-f.fainelli@gmail.com
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/reset/reset-brcmstb-rescal.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/reset/reset-brcmstb-rescal.c
++++ b/drivers/reset/reset-brcmstb-rescal.c
+@@ -38,7 +38,7 @@ static int brcm_rescal_reset_set(struct
+ }
+
+ ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
+- !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
++ (reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
+ if (ret) {
+ dev_err(data->dev, "time out on SATA/PCIe rescal\n");
+ return ret;
--- /dev/null
+From 27de809a3d83a6199664479ebb19712533d6fd9b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= <bjorn@kernel.org>
+Date: Thu, 28 Oct 2021 14:51:15 +0200
+Subject: riscv, bpf: Fix potential NULL dereference
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Björn Töpel <bjorn@kernel.org>
+
+commit 27de809a3d83a6199664479ebb19712533d6fd9b upstream.
+
+The bpf_jit_binary_free() function requires a non-NULL argument. When
+the RISC-V BPF JIT fails to converge in NR_JIT_ITERATIONS steps,
+jit_data->header will be NULL, which triggers a NULL
+dereference. Avoid this by checking the argument, prior calling the
+function.
+
+Fixes: ca6cb5447cec ("riscv, bpf: Factor common RISC-V JIT code")
+Signed-off-by: Björn Töpel <bjorn@kernel.org>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20211028125115.514587-1-bjorn@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/net/bpf_jit_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/riscv/net/bpf_jit_core.c
++++ b/arch/riscv/net/bpf_jit_core.c
+@@ -125,7 +125,8 @@ struct bpf_prog *bpf_int_jit_compile(str
+
+ if (i == NR_JIT_ITERATIONS) {
+ pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
+- bpf_jit_binary_free(jit_data->header);
++ if (jit_data->header)
++ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
mm-thp-bail-out-early-in-collapse_file-for-writeback-page.patch
drm-ttm-fix-memleak-in-ttm_transfered_destroy.patch
drm-amdgpu-fix-out-of-bounds-write.patch
+cgroup-fix-memory-leak-caused-by-missing-cgroup_bpf_offline.patch
+riscv-bpf-fix-potential-null-dereference.patch
+tcp_bpf-fix-one-concurrency-problem-in-the-tcp_bpf_send_verdict-function.patch
+bpf-fix-potential-race-in-tail-call-compatibility-check.patch
+bpf-fix-error-usage-of-map_fd-and-fdget-in-generic_map_update_batch.patch
+ib-qib-protect-from-buffer-overflow-in-struct-qib_user_sdma_pkt-fields.patch
+ib-hfi1-fix-abba-locking-issue-with-sc_disable.patch
+nvmet-tcp-fix-data-digest-pointer-calculation.patch
+nvme-tcp-fix-data-digest-pointer-calculation.patch
+nvme-tcp-fix-possible-req-offset-corruption.patch
+octeontx2-af-display-all-enabled-pf-vf-rsrc_alloc-entries.patch
+rdma-mlx5-set-user-priority-for-dct.patch
+arm64-dts-allwinner-h5-nanopi-neo-2-fix-ethernet-node.patch
+reset-brcmstb-rescal-fix-incorrect-polarity-of-status-bit.patch
+regmap-fix-possible-double-free-in-regcache_rbtree_exit.patch
+net-batman-adv-fix-error-handling.patch
+net-sysfs-initialize-uid-and-gid-before-calling-net_ns_get_ownership.patch
+cfg80211-correct-bridge-4addr-mode-check.patch
+net-prevent-infinite-while-loop-in-skb_tx_hash.patch
+rdma-sa_query-use-strscpy_pad-instead-of-memcpy-to-copy-a-string.patch
+gpio-xgs-iproc-fix-parsing-of-ngpios-property.patch
+nios2-make-nios2_dtb_source_bool-depend-on-compile_test.patch
+mlxsw-pci-recycle-received-packet-upon-allocation-failure.patch
+net-ethernet-microchip-lan743x-fix-driver-crash-when-lan743x_pm_resume-fails.patch
+net-ethernet-microchip-lan743x-fix-dma-allocation-failure-by-using-dma_set_mask_and_coherent.patch
+net-nxp-lpc_eth.c-avoid-hang-when-bringing-interface-down.patch
+net-tls-fix-flipped-sign-in-async_wait.err-assignment.patch
+phy-phy_ethtool_ksettings_get-lock-the-phy-for-consistency.patch
+phy-phy_ethtool_ksettings_set-move-after-phy_start_aneg.patch
+phy-phy_start_aneg-add-an-unlocked-version.patch
+phy-phy_ethtool_ksettings_set-lock-the-phy-while-changing-settings.patch
--- /dev/null
+From cd9733f5d75c94a32544d6ce5be47e14194cf137 Mon Sep 17 00:00:00 2001
+From: Liu Jian <liujian56@huawei.com>
+Date: Tue, 12 Oct 2021 13:20:19 +0800
+Subject: tcp_bpf: Fix one concurrency problem in the tcp_bpf_send_verdict function
+
+From: Liu Jian <liujian56@huawei.com>
+
+commit cd9733f5d75c94a32544d6ce5be47e14194cf137 upstream.
+
+With two Msgs, msgA and msgB and a user doing nonblocking sendmsg calls (or
+multiple cores) on a single socket 'sk' we could get the following flow.
+
+ msgA, sk msgB, sk
+ ----------- ---------------
+ tcp_bpf_sendmsg()
+ lock(sk)
+ psock = sk->psock
+ tcp_bpf_sendmsg()
+ lock(sk) ... blocking
+tcp_bpf_send_verdict
+if (psock->eval == NONE)
+ psock->eval = sk_psock_msg_verdict
+ ..
+ < handle SK_REDIRECT case >
+ release_sock(sk) < lock dropped so grab here >
+ ret = tcp_bpf_sendmsg_redir
+ psock = sk->psock
+ tcp_bpf_send_verdict
+ lock_sock(sk) ... blocking on B
+ if (psock->eval == NONE) <- boom.
+ psock->eval will have msgA state
+
+The problem here is we dropped the lock on msgA and grabbed it with msgB.
+Now we have old state in psock and importantly psock->eval has not been
+cleared. So msgB will run whatever action was done on A and the verdict
+program may never see it.
+
+Fixes: 604326b41a6fb ("bpf, sockmap: convert to generic sk_msg interface")
+Signed-off-by: Liu Jian <liujian56@huawei.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20211012052019.184398-1-liujian56@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_bpf.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -317,6 +317,7 @@ static int tcp_bpf_send_verdict(struct s
+ bool cork = false, enospc = sk_msg_full(msg);
+ struct sock *sk_redir;
+ u32 tosend, delta = 0;
++ u32 eval = __SK_NONE;
+ int ret;
+
+ more_data:
+@@ -360,13 +361,24 @@ more_data:
+ case __SK_REDIRECT:
+ sk_redir = psock->sk_redir;
+ sk_msg_apply_bytes(psock, tosend);
++ if (!psock->apply_bytes) {
++ /* Clean up before releasing the sock lock. */
++ eval = psock->eval;
++ psock->eval = __SK_NONE;
++ psock->sk_redir = NULL;
++ }
+ if (psock->cork) {
+ cork = true;
+ psock->cork = NULL;
+ }
+ sk_msg_return(sk, msg, tosend);
+ release_sock(sk);
++
+ ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
++
++ if (eval == __SK_REDIRECT)
++ sock_put(sk_redir);
++
+ lock_sock(sk);
+ if (unlikely(ret < 0)) {
+ int free = sk_msg_free_nocharge(sk, msg);